zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/Kconfig b/ap/os/linux/linux-3.4.x/drivers/net/wan/Kconfig
new file mode 100644
index 0000000..d70ede7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/Kconfig
@@ -0,0 +1,504 @@
+#
+# wan devices configuration
+#
+
+menuconfig WAN
+	bool "Wan interfaces support"
+	---help---
+	  Wide Area Networks (WANs), such as X.25, Frame Relay and leased
+	  lines, are used to interconnect Local Area Networks (LANs) over vast
+	  distances with data transfer rates significantly higher than those
+	  achievable with commonly used asynchronous modem connections.
+
+	  Usually, a quite expensive external device called a `WAN router' is
+	  needed to connect to a WAN. As an alternative, a relatively
+	  inexpensive WAN interface card can allow your Linux box to directly
+	  connect to a WAN.
+
+	  If you have one of those cards and wish to use it under Linux,
+	  say Y here and also to the WAN driver for your card.
+
+	  If unsure, say N.
+
+if WAN
+
+# There is no way to detect a comtrol sv11 - force it modular for now.
+config HOSTESS_SV11
+	tristate "Comtrol Hostess SV-11 support"
+	depends on ISA && m && ISA_DMA_API && INET && HDLC
+	help
+	  Driver for Comtrol Hostess SV-11 network card which
+	  operates on low speed synchronous serial links at up to
+	  256Kbps, supporting PPP and Cisco HDLC.
+
+	  The driver will be compiled as a module: the
+	  module will be called hostess_sv11.
+
+# The COSA/SRP driver has not been tested as non-modular yet.
+config COSA
+	tristate "COSA/SRP sync serial boards support"
+	depends on ISA && m && ISA_DMA_API && HDLC
+	---help---
+	  Driver for COSA and SRP synchronous serial boards.
+
+	  These boards allow to connect synchronous serial devices (for example
+	  base-band modems, or any other device with the X.21, V.24, V.35 or
+	  V.36 interface) to your Linux box. The cards can work as the
+	  character device, synchronous PPP network device, or the Cisco HDLC
+	  network device.
+
+	  You will need user-space utilities COSA or SRP boards for downloading
+ 	  the firmware to the cards and to set them up. Look at the
+	  <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
+	  read the comment at the top of the <file:drivers/net/wan/cosa.c> for
+	  details about the cards and the driver itself.
+
+	  The driver will be compiled as a module: the
+	  module will be called cosa.
+
+#
+# Lan Media's board. Currently 1000, 1200, 5200, 5245
+#
+config LANMEDIA
+	tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
+	depends on PCI && VIRT_TO_BUS && HDLC
+	---help---
+	  Driver for the following Lan Media family of serial boards:
+
+	  - LMC 1000 board allows you to connect synchronous serial devices
+	  (for example base-band modems, or any other device with the X.21,
+	  V.24, V.35 or V.36 interface) to your Linux box.
+
+	  - LMC 1200 with on board DSU board allows you to connect your Linux
+	  box directly to a T1 or E1 circuit.
+
+	  - LMC 5200 board provides a HSSI interface capable of running up to
+	  52 Mbits per second.
+
+	  - LMC 5245 board connects directly to a T3 circuit saving the
+	  additional external hardware.
+
+	  To change setting such as clock source you will need lmcctl.
+	  It is available at <ftp://ftp.lanmedia.com/> (broken link).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called lmc.
+
+# There is no way to detect a Sealevel board. Force it modular
+config SEALEVEL_4021
+	tristate "Sealevel Systems 4021 support"
+	depends on ISA && m && ISA_DMA_API && INET && HDLC
+	help
+	  This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
+
+	  The driver will be compiled as a module: the
+	  module will be called sealevel.
+
+# Generic HDLC
+config HDLC
+	tristate "Generic HDLC layer"
+	help
+	  Say Y to this option if your Linux box contains a WAN (Wide Area
+	  Network) card supported by this driver and you are planning to
+	  connect the box to a WAN.
+
+	  You will need supporting software from
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+	  Generic HDLC driver currently supports raw HDLC, Cisco HDLC, Frame
+	  Relay, synchronous Point-to-Point Protocol (PPP) and X.25.
+
+ 	  To compile this driver as a module, choose M here: the
+	  module will be called hdlc.
+
+	  If unsure, say N.
+
+config HDLC_RAW
+	tristate "Raw HDLC support"
+	depends on HDLC
+	help
+	  Generic HDLC driver supporting raw HDLC over WAN connections.
+
+	  If unsure, say N.
+
+config HDLC_RAW_ETH
+	tristate "Raw HDLC Ethernet device support"
+	depends on HDLC
+	help
+	  Generic HDLC driver supporting raw HDLC Ethernet device emulation
+	  over WAN connections.
+
+	  You will need it for Ethernet over HDLC bridges.
+
+	  If unsure, say N.
+
+config HDLC_CISCO
+	tristate "Cisco HDLC support"
+	depends on HDLC
+	help
+	  Generic HDLC driver supporting Cisco HDLC over WAN connections.
+
+	  If unsure, say N.
+
+config HDLC_FR
+	tristate "Frame Relay support"
+	depends on HDLC
+	help
+	  Generic HDLC driver supporting Frame Relay over WAN connections.
+
+	  If unsure, say N.
+
+config HDLC_PPP
+	tristate "Synchronous Point-to-Point Protocol (PPP) support"
+	depends on HDLC
+	help
+	  Generic HDLC driver supporting PPP over WAN connections.
+
+	  If unsure, say N.
+
+config HDLC_X25
+	tristate "X.25 protocol support"
+	depends on HDLC && (LAPB=m && HDLC=m || LAPB=y)
+	help
+	  Generic HDLC driver supporting X.25 over WAN connections.
+
+	  If unsure, say N.
+
+comment "X.25/LAPB support is disabled"
+	depends on HDLC && (LAPB!=m || HDLC!=m) && LAPB!=y
+
+config PCI200SYN
+	tristate "Goramo PCI200SYN support"
+	depends on HDLC && PCI
+	help
+	  Driver for PCI200SYN cards by Goramo sp. j.
+
+	  If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  To compile this as a module, choose M here: the
+	  module will be called pci200syn.
+
+	  If unsure, say N.
+
+config WANXL
+	tristate "SBE Inc. wanXL support"
+	depends on HDLC && PCI
+	help
+	  Driver for wanXL PCI cards by SBE Inc.
+
+	  If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  To compile this as a module, choose M here: the
+	  module will be called wanxl.
+
+	  If unsure, say N.
+
+config WANXL_BUILD_FIRMWARE
+	bool "rebuild wanXL firmware"
+	depends on WANXL && !PREVENT_FIRMWARE_BUILD
+	help
+	  Allows you to rebuild firmware run by the QUICC processor.
+	  It requires as68k, ld68k and hexdump programs.
+
+	  You should never need this option, say N.
+
+config PC300
+	tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)"
+	depends on HDLC && PCI && BROKEN
+	---help---
+	  This driver is broken because of struct tty_driver change.
+
+	  Driver for the Cyclades-PC300 synchronous communication boards.
+
+	  These boards provide synchronous serial interfaces to your
+	  Linux box (interfaces currently available are RS-232/V.35, X.21 and
+	  T1/E1). If you wish to support Multilink PPP, please select the
+	  option later and read the file README.mlppp provided by PC300
+	  package.
+
+	  To compile this as a module, choose M here: the module
+	  will be called pc300.
+
+	  If unsure, say N.
+
+config PC300_MLPPP
+	bool "Cyclades-PC300 MLPPP support"
+	depends on PC300 && PPP_MULTILINK && PPP_SYNC_TTY && HDLC_PPP
+	help
+	  Multilink PPP over the PC300 synchronous communication boards.
+
+comment "Cyclades-PC300 MLPPP support is disabled."
+	depends on HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
+
+comment "Refer to the file README.mlppp, provided by PC300 package."
+	depends on HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
+
+config PC300TOO
+	tristate "Cyclades PC300 RSV/X21 alternative support"
+	depends on HDLC && PCI
+	help
+	  Alternative driver for PC300 RSV/X21 PCI cards made by
+	  Cyclades, Inc. If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  To compile this as a module, choose M here: the module
+	  will be called pc300too.
+
+	  If unsure, say N here.
+
+config N2
+	tristate "SDL RISCom/N2 support"
+	depends on HDLC && ISA
+	help
+	  Driver for RISCom/N2 single or dual channel ISA cards by
+	  SDL Communications Inc.
+
+	  If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  Note that N2csu and N2dds cards are not supported by this driver.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called n2.
+
+	  If unsure, say N.
+
+config C101
+	tristate "Moxa C101 support"
+	depends on HDLC && ISA
+	help
+	  Driver for C101 SuperSync ISA cards by Moxa Technologies Co., Ltd.
+
+	  If you have such a card, say Y here and see
+	  <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called c101.
+
+	  If unsure, say N.
+
+config FARSYNC
+	tristate "FarSync T-Series support"
+	depends on HDLC && PCI
+	---help---
+	  Support for the FarSync T-Series X.21 (and V.35/V.24) cards by
+	  FarSite Communications Ltd.
+
+	  Synchronous communication is supported on all ports at speeds up to
+	  8Mb/s (128K on V.24) using synchronous PPP, Cisco HDLC, raw HDLC,
+	  Frame Relay or X.25/LAPB.
+
+	  If you want the module to be automatically loaded when the interface
+	  is referenced then you should add "alias hdlcX farsync" to a file
+	  in /etc/modprobe.d/ for each interface, where X is 0, 1, 2, ..., or
+	  simply use "alias hdlc* farsync" to indicate all of them.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called farsync.
+
+config DSCC4
+	tristate "Etinc PCISYNC serial board support"
+	depends on HDLC && PCI && m
+	help
+	  Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
+	  DSCC4 chipset.
+
+	  This is supposed to work with the four port card. Take a look at
+	  <http://www.cogenit.fr/dscc4/> for further information about the
+	  driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called dscc4.
+
+config DSCC4_PCISYNC
+	bool "Etinc PCISYNC features"
+	depends on DSCC4
+	help
+	  Due to Etinc's design choice for its PCISYNC cards, some operations
+	  are only allowed on specific ports of the DSCC4. This option is the
+	  only way for the driver to know that it shouldn't return a success
+	  code for these operations.
+
+	  Please say Y if your card is an Etinc's PCISYNC.
+
+config DSCC4_PCI_RST
+	bool "Hard reset support"
+	depends on DSCC4
+	help
+	  Various DSCC4 bugs forbid any reliable software reset of the ASIC.
+	  As a replacement, some vendors provide a way to assert the PCI #RST
+	  pin of DSCC4 through the GPIO port of the card. If you choose Y,
+	  the driver will make use of this feature before module removal
+	  (i.e. rmmod). The feature is known to be available on Commtech's
+	  cards. Contact your manufacturer for details.
+
+	  Say Y if your card supports this feature.
+
+config IXP4XX_HSS
+	tristate "Intel IXP4xx HSS (synchronous serial port) support"
+	depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+	help
+	  Say Y here if you want to use built-in HSS ports
+	  on IXP4xx processor.
+
+config DLCI
+	tristate "Frame Relay DLCI support"
+	---help---
+	  Support for the Frame Relay protocol.
+
+	  Frame Relay is a fast low-cost way to connect to a remote Internet
+	  access provider or to form a private wide area network. The one
+	  physical line from your box to the local "switch" (i.e. the entry
+	  point to the Frame Relay network, usually at the phone company) can
+	  carry several logical point-to-point connections to other computers
+	  connected to the Frame Relay network. For a general explanation of
+	  the protocol, check out <http://www.mplsforum.org/>.
+
+	  To use frame relay, you need supporting hardware (called FRAD) and
+	  certain programs from the net-tools package as explained in
+	  <file:Documentation/networking/framerelay.txt>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called dlci.
+
+config DLCI_MAX
+	int "Max DLCI per device"
+	depends on DLCI
+	default "8"
+	help
+	  How many logical point-to-point frame relay connections (the
+	  identifiers of which are called DCLIs) should be handled by each
+	  of your hardware frame relay access devices.
+
+	  Go with the default.
+
+config SDLA
+	tristate "SDLA (Sangoma S502/S508) support"
+	depends on DLCI && ISA
+	help
+	  Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access
+	  Devices.
+
+	  These are multi-protocol cards, but only Frame Relay is supported
+	  by the driver at this time. Please read
+	  <file:Documentation/networking/framerelay.txt>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sdla.
+
+# Wan router core.
+config WAN_ROUTER_DRIVERS
+	tristate "WAN router drivers"
+	depends on WAN_ROUTER
+	---help---
+	  Connect LAN to WAN via Linux box.
+
+	  Select driver your card and remember to say Y to "Wan Router."
+	  You will need the wan-tools package which is available from
+	  <ftp://ftp.sangoma.com/>.
+
+	  Note that the answer to this question won't directly affect the
+	  kernel except for how subordinate drivers may be built:
+	  saying N will just cause the configurator to skip all
+	  the questions about WAN router drivers.
+
+	  If unsure, say N.
+
+config CYCLADES_SYNC
+	tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
+	depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
+	---help---
+	  Cyclom 2X from Cyclades Corporation <http://www.avocent.com/> is an
+	  intelligent multiprotocol WAN adapter with data transfer rates up to
+	  512 Kbps. These cards support the X.25 and SNA related protocols.
+
+	  While no documentation is available at this time please grab the
+	  wanconfig tarball in
+	  <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes
+	  to make it compile with the current wanrouter include files; efforts
+	  are being made to use the original package available at
+	  <ftp://ftp.sangoma.com/>).
+
+	  Feel free to contact me or the cycsyn-devel mailing list at
+	  <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for
+	  additional details, I hope to have documentation available as soon as
+	  possible. (Cyclades Brazil is writing the Documentation).
+
+	  The next questions will ask you about the protocols you want the
+	  driver to support (for now only X.25 is supported).
+
+	  If you have one or more of these cards, say Y to this option.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cyclomx.
+
+config CYCLOMX_X25
+	bool "Cyclom 2X X.25 support (EXPERIMENTAL)"
+	depends on CYCLADES_SYNC
+	help
+	  Connect a Cyclom 2X card to an X.25 network.
+
+	  Enabling X.25 support will enlarge your kernel by about 11 kB.
+
+# X.25 network drivers
+config LAPBETHER
+	tristate "LAPB over Ethernet driver (EXPERIMENTAL)"
+	depends on LAPB && X25
+	---help---
+	  Driver for a pseudo device (typically called /dev/lapb0) which allows
+	  you to open an LAPB point-to-point connection to some other computer
+	  on your Ethernet network.
+
+	  In order to do this, you need to say Y or M to the driver for your
+	  Ethernet card as well as to "LAPB Data Link Driver".
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called lapbether.
+
+	  If unsure, say N.
+
+config X25_ASY
+	tristate "X.25 async driver (EXPERIMENTAL)"
+	depends on LAPB && X25
+	---help---
+	  Send and receive X.25 frames over regular asynchronous serial
+	  lines such as telephone lines equipped with ordinary modems.
+
+	  Experts should note that this driver doesn't currently comply with
+	  the asynchronous HDLS framing protocols in CCITT recommendation X.25.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called x25_asy.
+
+	  If unsure, say N.
+
+config SBNI
+	tristate "Granch SBNI12 Leased Line adapter support"
+	depends on X86
+	---help---
+	  Driver for ISA SBNI12-xx cards which are low cost alternatives to
+	  leased line modems.
+
+	  You can find more information and last versions of drivers and
+	  utilities at <http://www.granch.ru/>. If you have any question you
+	  can send email to <sbni@granch.ru>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sbni.
+
+	  If unsure, say N.
+
+config SBNI_MULTILINE
+	bool "Multiple line feature support"
+	depends on SBNI
+	help
+	  Schedule traffic for some parallel lines, via SBNI12 adapters.
+
+	  If you have two computers connected with two parallel lines it's
+	  possible to increase transfer rate nearly twice. You should have
+	  a program named 'sbniconfig' to configure adapters.
+
+	  If unsure, say N.
+
+endif # WAN
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/Makefile b/ap/os/linux/linux-3.4.x/drivers/net/wan/Makefile
new file mode 100644
index 0000000..19d14bc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/Makefile
@@ -0,0 +1,68 @@
+#
+# Makefile for the Linux network (wan) device drivers.
+#
+# 3 Aug 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+cyclomx-y                       := cycx_main.o
+cyclomx-$(CONFIG_CYCLOMX_X25)	+= cycx_x25.o
+cyclomx-objs			:= $(cyclomx-y)  
+
+obj-$(CONFIG_HDLC)		+= hdlc.o
+obj-$(CONFIG_HDLC_RAW)		+= hdlc_raw.o
+obj-$(CONFIG_HDLC_RAW_ETH)	+= hdlc_raw_eth.o
+obj-$(CONFIG_HDLC_CISCO)	+= hdlc_cisco.o
+obj-$(CONFIG_HDLC_FR)		+= hdlc_fr.o
+obj-$(CONFIG_HDLC_PPP)		+= hdlc_ppp.o
+obj-$(CONFIG_HDLC_X25)		+= hdlc_x25.o
+
+pc300-y				:= pc300_drv.o
+pc300-$(CONFIG_PC300_MLPPP)	+= pc300_tty.o
+pc300-objs			:= $(pc300-y)
+
+obj-$(CONFIG_HOSTESS_SV11)	+= z85230.o	hostess_sv11.o
+obj-$(CONFIG_SEALEVEL_4021)	+= z85230.o	sealevel.o
+obj-$(CONFIG_COSA)		+= cosa.o
+obj-$(CONFIG_FARSYNC)		+= farsync.o
+obj-$(CONFIG_DSCC4)             += dscc4.o
+obj-$(CONFIG_X25_ASY)		+= x25_asy.o
+
+obj-$(CONFIG_LANMEDIA)		+= lmc/
+
+obj-$(CONFIG_DLCI)		+= dlci.o 
+obj-$(CONFIG_SDLA)		+= sdla.o
+obj-$(CONFIG_CYCLADES_SYNC)	+= cycx_drv.o cyclomx.o
+obj-$(CONFIG_LAPBETHER)		+= lapbether.o
+obj-$(CONFIG_SBNI)		+= sbni.o
+obj-$(CONFIG_PC300)		+= pc300.o
+obj-$(CONFIG_N2)		+= n2.o
+obj-$(CONFIG_C101)		+= c101.o
+obj-$(CONFIG_WANXL)		+= wanxl.o
+obj-$(CONFIG_PCI200SYN)		+= pci200syn.o
+obj-$(CONFIG_PC300TOO)		+= pc300too.o
+obj-$(CONFIG_IXP4XX_HSS)	+= ixp4xx_hss.o
+
+clean-files := wanxlfw.inc
+$(obj)/wanxl.o:	$(obj)/wanxlfw.inc
+
+ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
+ifeq ($(ARCH),m68k)
+  AS68K = $(AS)
+  LD68K = $(LD)
+else
+  AS68K = as68k
+  LD68K = ld68k
+endif
+
+quiet_cmd_build_wanxlfw = BLD FW  $@
+      cmd_build_wanxlfw = \
+	$(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
+	$(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
+	hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x  ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
+	rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
+
+$(obj)/wanxlfw.inc:	$(src)/wanxlfw.S
+	$(call if_changed_dep,build_wanxlfw)
+targets += wanxlfw.inc
+endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/c101.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/c101.c
new file mode 100644
index 0000000..09a5075
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/c101.c
@@ -0,0 +1,454 @@
+/*
+ * Moxa C101 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ *
+ * Sources of information:
+ *    Hitachi HD64570 SCA User's Manual
+ *    Moxa C101 User's Manual
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hd64570.h"
+
+
+static const char* version = "Moxa C101 driver version: 1.15";
+static const char* devname = "C101";
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define C101_PAGE 0x1D00
+#define C101_DTR 0x1E00
+#define C101_SCA 0x1F00
+#define C101_WINDOW_SIZE 0x2000
+#define C101_MAPPED_RAM_SIZE 0x4000
+
+#define RAM_SIZE (256 * 1024)
+#define TX_RING_BUFFERS 10
+#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) /		\
+			 (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
+
+#define CLOCK_BASE 9830400	/* 9.8304 MHz */
+#define PAGE0_ALWAYS_MAPPED
+
+static char *hw;		/* pointer to hw=xxx command line string */
+
+
+typedef struct card_s {
+	struct net_device *dev;
+	spinlock_t lock;	/* TX lock */
+	u8 __iomem *win0base;	/* ISA window base address */
+	u32 phy_winbase;	/* ISA physical base address */
+	sync_serial_settings settings;
+	int rxpart;		/* partial frame received, next frame invalid*/
+	unsigned short encoding;
+	unsigned short parity;
+	u16 rx_ring_buffers;	/* number of buffers in a ring */
+	u16 tx_ring_buffers;
+	u16 buff_offset;	/* offset of first buffer of first channel */
+	u16 rxin;		/* rx ring buffer 'in' pointer */
+	u16 txin;		/* tx ring buffer 'in' and 'last' pointers */
+	u16 txlast;
+	u8 rxs, txs, tmc;	/* SCA registers */
+	u8 irq;			/* IRQ (3-15) */
+	u8 page;
+
+	struct card_s *next_card;
+}card_t;
+
+typedef card_t port_t;
+
+static card_t *first_card;
+static card_t **new_card = &first_card;
+
+
+#define sca_in(reg, card)	   readb((card)->win0base + C101_SCA + (reg))
+#define sca_out(value, reg, card)  writeb(value, (card)->win0base + C101_SCA + (reg))
+#define sca_inw(reg, card)	   readw((card)->win0base + C101_SCA + (reg))
+
+/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
+#define sca_outw(value, reg, card) do { \
+	writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
+	writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
+} while(0)
+
+#define port_to_card(port)	   (port)
+#define log_node(port)		   (0)
+#define phy_node(port)		   (0)
+#define winsize(card)		   (C101_WINDOW_SIZE)
+#define win0base(card)		   ((card)->win0base)
+#define winbase(card)      	   ((card)->win0base + 0x2000)
+#define get_port(card, port)	   (card)
+static void sca_msci_intr(port_t *port);
+
+
+static inline u8 sca_get_page(card_t *card)
+{
+	return card->page;
+}
+
+static inline void openwin(card_t *card, u8 page)
+{
+	card->page = page;
+	writeb(page, card->win0base + C101_PAGE);
+}
+
+
+#include "hd64570.c"
+
+
+static inline void set_carrier(port_t *port)
+{
+	if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
+		netif_carrier_on(port_to_dev(port));
+	else
+		netif_carrier_off(port_to_dev(port));
+}
+
+
+static void sca_msci_intr(port_t *port)
+{
+	u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
+
+	/* Reset MSCI TX underrun and CDCD (ignored) status bit */
+	sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
+
+	if (stat & ST1_UDRN) {
+		/* TX Underrun error detected */
+		port_to_dev(port)->stats.tx_errors++;
+		port_to_dev(port)->stats.tx_fifo_errors++;
+	}
+
+	stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
+	/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
+	sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
+
+	if (stat & ST1_CDCD)
+		set_carrier(port);
+}
+
+
+static void c101_set_iface(port_t *port)
+{
+	u8 rxs = port->rxs & CLK_BRG_MASK;
+	u8 txs = port->txs & CLK_BRG_MASK;
+
+	switch(port->settings.clock_type) {
+	case CLOCK_INT:
+		rxs |= CLK_BRG_RX; /* TX clock */
+		txs |= CLK_RXCLK_TX; /* BRG output */
+		break;
+
+	case CLOCK_TXINT:
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_BRG_TX; /* BRG output */
+		break;
+
+	case CLOCK_TXFROMRX:
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_RXCLK_TX; /* RX clock */
+		break;
+
+	default:	/* EXTernal clock */
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_LINE_TX; /* TXC input */
+	}
+
+	port->rxs = rxs;
+	port->txs = txs;
+	sca_out(rxs, MSCI1_OFFSET + RXS, port);
+	sca_out(txs, MSCI1_OFFSET + TXS, port);
+	sca_set_port(port);
+}
+
+
+static int c101_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	int result;
+
+	result = hdlc_open(dev);
+	if (result)
+		return result;
+
+	writeb(1, port->win0base + C101_DTR);
+	sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
+	sca_open(dev);
+	/* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
+	sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
+	sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
+
+	set_carrier(port);
+
+	/* enable MSCI1 CDCD interrupt */
+	sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
+	sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
+	sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
+	c101_set_iface(port);
+	return 0;
+}
+
+
+static int c101_close(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+
+	sca_close(dev);
+	writeb(0, port->win0base + C101_DTR);
+	sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
+	hdlc_close(dev);
+	return 0;
+}
+
+
+static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+	if (cmd == SIOCDEVPRIVATE) {
+		sca_dump_rings(dev);
+		printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
+		       sca_in(MSCI1_OFFSET + ST0, port),
+		       sca_in(MSCI1_OFFSET + ST1, port),
+		       sca_in(MSCI1_OFFSET + ST2, port),
+		       sca_in(MSCI1_OFFSET + ST3, port));
+		return 0;
+	}
+#endif
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(line, &port->settings, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_SYNC_SERIAL:
+		if(!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&new_line, line, size))
+			return -EFAULT;
+
+		if (new_line.clock_type != CLOCK_EXT &&
+		    new_line.clock_type != CLOCK_TXFROMRX &&
+		    new_line.clock_type != CLOCK_INT &&
+		    new_line.clock_type != CLOCK_TXINT)
+			return -EINVAL;	/* No such clock setting */
+
+		if (new_line.loopback != 0 && new_line.loopback != 1)
+			return -EINVAL;
+
+		memcpy(&port->settings, &new_line, size); /* Update settings */
+		c101_set_iface(port);
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+
+
+static void c101_destroy_card(card_t *card)
+{
+	readb(card->win0base + C101_PAGE); /* Resets SCA? */
+
+	if (card->irq)
+		free_irq(card->irq, card);
+
+	if (card->win0base) {
+		iounmap(card->win0base);
+		release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
+	}
+
+	free_netdev(card->dev);
+
+	kfree(card);
+}
+
+static const struct net_device_ops c101_ops = {
+	.ndo_open       = c101_open,
+	.ndo_stop       = c101_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = c101_ioctl,
+};
+
+static int __init c101_run(unsigned long irq, unsigned long winbase)
+{
+	struct net_device *dev;
+	hdlc_device *hdlc;
+	card_t *card;
+	int result;
+
+	if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
+		pr_err("invalid IRQ value\n");
+		return -ENODEV;
+	}
+
+	if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
+		pr_err("invalid RAM value\n");
+		return -ENODEV;
+	}
+
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
+	if (card == NULL)
+		return -ENOBUFS;
+
+	card->dev = alloc_hdlcdev(card);
+	if (!card->dev) {
+		pr_err("unable to allocate memory\n");
+		kfree(card);
+		return -ENOBUFS;
+	}
+
+	if (request_irq(irq, sca_intr, 0, devname, card)) {
+		pr_err("could not allocate IRQ\n");
+		c101_destroy_card(card);
+		return -EBUSY;
+	}
+	card->irq = irq;
+
+	if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
+		pr_err("could not request RAM window\n");
+		c101_destroy_card(card);
+		return -EBUSY;
+	}
+	card->phy_winbase = winbase;
+	card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
+	if (!card->win0base) {
+		pr_err("could not map I/O address\n");
+		c101_destroy_card(card);
+		return -EFAULT;
+	}
+
+	card->tx_ring_buffers = TX_RING_BUFFERS;
+	card->rx_ring_buffers = RX_RING_BUFFERS;
+	card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
+
+	readb(card->win0base + C101_PAGE); /* Resets SCA? */
+	udelay(100);
+	writeb(0, card->win0base + C101_PAGE);
+	writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
+
+	sca_init(card, 0);
+
+	dev = port_to_dev(card);
+	hdlc = dev_to_hdlc(dev);
+
+	spin_lock_init(&card->lock);
+	dev->irq = irq;
+	dev->mem_start = winbase;
+	dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
+	dev->tx_queue_len = 50;
+	dev->netdev_ops = &c101_ops;
+	hdlc->attach = sca_attach;
+	hdlc->xmit = sca_xmit;
+	card->settings.clock_type = CLOCK_EXT;
+
+	result = register_hdlc_device(dev);
+	if (result) {
+		pr_warn("unable to register hdlc device\n");
+		c101_destroy_card(card);
+		return result;
+	}
+
+	sca_init_port(card); /* Set up C101 memory */
+	set_carrier(card);
+
+	netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n",
+		    card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+
+	*new_card = card;
+	new_card = &card->next_card;
+	return 0;
+}
+
+
+
+static int __init c101_init(void)
+{
+	if (hw == NULL) {
+#ifdef MODULE
+		pr_info("no card initialized\n");
+#endif
+		return -EINVAL;	/* no parameters specified, abort */
+	}
+
+	pr_info("%s\n", version);
+
+	do {
+		unsigned long irq, ram;
+
+		irq = simple_strtoul(hw, &hw, 0);
+
+		if (*hw++ != ',')
+			break;
+		ram = simple_strtoul(hw, &hw, 0);
+
+		if (*hw == ':' || *hw == '\x0')
+			c101_run(irq, ram);
+
+		if (*hw == '\x0')
+			return first_card ? 0 : -EINVAL;
+	}while(*hw++ == ':');
+
+	pr_err("invalid hardware parameters\n");
+	return first_card ? 0 : -EINVAL;
+}
+
+
+static void __exit c101_cleanup(void)
+{
+	card_t *card = first_card;
+
+	while (card) {
+		card_t *ptr = card;
+		card = card->next_card;
+		unregister_hdlc_device(port_to_dev(ptr));
+		c101_destroy_card(ptr);
+	}
+}
+
+
+module_init(c101_init);
+module_exit(c101_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Moxa C101 serial port driver");
+MODULE_LICENSE("GPL v2");
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.c
new file mode 100644
index 0000000..6aed238
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.c
@@ -0,0 +1,2054 @@
+/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
+
+/*
+ *  Copyright (C) 1995-1997  Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ *  Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * The driver for the SRP and COSA synchronous serial cards.
+ *
+ * HARDWARE INFO
+ *
+ * Both cards are developed at the Institute of Computer Science,
+ * Masaryk University (http://www.ics.muni.cz/). The hardware is
+ * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
+ * and the photo of both cards is available at
+ * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
+ * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
+ * For Linux-specific utilities, see below in the "Software info" section.
+ * If you want to order the card, contact Jiri Novotny.
+ *
+ * The SRP (serial port?, the Czech word "srp" means "sickle") card
+ * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
+ * with V.24 interfaces up to 80kb/s each.
+ *
+ * The COSA (communication serial adapter?, the Czech word "kosa" means
+ * "scythe") is a next-generation sync/async board with two interfaces
+ * - currently any of V.24, X.21, V.35 and V.36 can be selected.
+ * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
+ * The 8-channels version is in development.
+ *
+ * Both types have downloadable firmware and communicate via ISA DMA.
+ * COSA can be also a bus-mastering device.
+ *
+ * SOFTWARE INFO
+ *
+ * The homepage of the Linux driver is at http://www.fi.muni.cz/~kas/cosa/.
+ * The CVS tree of Linux driver can be viewed there, as well as the
+ * firmware binaries and user-space utilities for downloading the firmware
+ * into the card and setting up the card.
+ *
+ * The Linux driver (unlike the present *BSD drivers :-) can work even
+ * for the COSA and SRP in one computer and allows each channel to work
+ * in one of the two modes (character or network device).
+ *
+ * AUTHOR
+ *
+ * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
+ *
+ * You can mail me bugfixes and even success reports. I am especially
+ * interested in the SMP and/or muliti-channel success/failure reports
+ * (I wonder if I did the locking properly :-).
+ *
+ * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
+ *
+ * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
+ * The skeleton.c by Donald Becker
+ * The SDL Riscom/N2 driver by Mike Natale
+ * The Comtrol Hostess SV11 driver by Alan Cox
+ * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#undef COSA_SLOW_IO	/* for testing purposes only */
+
+#include "cosa.h"
+
+/* Maximum length of the identification string. */
+#define COSA_MAX_ID_STRING	128
+
+/* Maximum length of the channel name */
+#define COSA_MAX_NAME		(sizeof("cosaXXXcXXX")+1)
+
+/* Per-channel data structure */
+
+struct channel_data {
+	int usage;	/* Usage count; >0 for chrdev, -1 for netdev */
+	int num;	/* Number of the channel */
+	struct cosa_data *cosa;	/* Pointer to the per-card structure */
+	int txsize;	/* Size of transmitted data */
+	char *txbuf;	/* Transmit buffer */
+	char name[COSA_MAX_NAME];	/* channel name */
+
+	/* The HW layer interface */
+	/* routine called from the RX interrupt */
+	char *(*setup_rx)(struct channel_data *channel, int size);
+	/* routine called when the RX is done (from the EOT interrupt) */
+	int (*rx_done)(struct channel_data *channel);
+	/* routine called when the TX is done (from the EOT interrupt) */
+	int (*tx_done)(struct channel_data *channel, int size);
+
+	/* Character device parts */
+	struct mutex rlock;
+	struct semaphore wsem;
+	char *rxdata;
+	int rxsize;
+	wait_queue_head_t txwaitq, rxwaitq;
+	int tx_status, rx_status;
+
+	/* generic HDLC device parts */
+	struct net_device *netdev;
+	struct sk_buff *rx_skb, *tx_skb;
+};
+
+/* cosa->firmware_status bits */
+#define COSA_FW_RESET		(1<<0)	/* Is the ROM monitor active? */
+#define COSA_FW_DOWNLOAD	(1<<1)	/* Is the microcode downloaded? */
+#define COSA_FW_START		(1<<2)	/* Is the microcode running? */
+
+struct cosa_data {
+	int num;			/* Card number */
+	char name[COSA_MAX_NAME];	/* Card name - e.g "cosa0" */
+	unsigned int datareg, statusreg;	/* I/O ports */
+	unsigned short irq, dma;	/* IRQ and DMA number */
+	unsigned short startaddr;	/* Firmware start address */
+	unsigned short busmaster;	/* Use busmastering? */
+	int nchannels;			/* # of channels on this card */
+	int driver_status;		/* For communicating with firmware */
+	int firmware_status;		/* Downloaded, reseted, etc. */
+	unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */
+	unsigned long rxtx;		/* RX or TX in progress? */
+	int enabled;
+	int usage;				/* usage count */
+	int txchan, txsize, rxsize;
+	struct channel_data *rxchan;
+	char *bouncebuf;
+	char *txbuf, *rxbuf;
+	struct channel_data *chan;
+	spinlock_t lock;	/* For exclusive operations on this structure */
+	char id_string[COSA_MAX_ID_STRING];	/* ROM monitor ID string */
+	char *type;				/* card type */
+};
+
+/*
+ * Define this if you want all the possible ports to be autoprobed.
+ * It is here but it probably is not a good idea to use this.
+ */
+/* #define COSA_ISA_AUTOPROBE	1 */
+
+/*
+ * Character device major number. 117 was allocated for us.
+ * The value of 0 means to allocate a first free one.
+ */
+static DEFINE_MUTEX(cosa_chardev_mutex);
+static int cosa_major = 117;
+
+/*
+ * Encoding of the minor numbers:
+ * The lowest CARD_MINOR_BITS bits means the channel on the single card,
+ * the highest bits means the card number.
+ */
+#define CARD_MINOR_BITS	4	/* How many bits in minor number are reserved
+				 * for the single card */
+/*
+ * The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
+ * macro doesn't like anything other than the raw number as an argument :-(
+ */
+#define MAX_CARDS	16
+/* #define MAX_CARDS	(1 << (8-CARD_MINOR_BITS)) */
+
+#define DRIVER_RX_READY		0x0001
+#define DRIVER_TX_READY		0x0002
+#define DRIVER_TXMAP_SHIFT	2
+#define DRIVER_TXMAP_MASK	0x0c	/* FIXME: 0xfc for 8-channel version */
+
+/*
+ * for cosa->rxtx - indicates whether either transmit or receive is
+ * in progress. These values are mean number of the bit.
+ */
+#define TXBIT 0
+#define RXBIT 1
+#define IRQBIT 2
+
+#define COSA_MTU 2000	/* FIXME: I don't know this exactly */
+
+#undef DEBUG_DATA //1	/* Dump the data read or written to the channel */
+#undef DEBUG_IRQS //1	/* Print the message when the IRQ is received */
+#undef DEBUG_IO   //1	/* Dump the I/O traffic */
+
+#define TX_TIMEOUT	(5*HZ)
+
+/* Maybe the following should be allocated dynamically */
+static struct cosa_data cosa_cards[MAX_CARDS];
+static int nr_cards;
+
+#ifdef COSA_ISA_AUTOPROBE
+static int io[MAX_CARDS+1]  = { 0x220, 0x228, 0x210, 0x218, 0, };
+/* NOTE: DMA is not autoprobed!!! */
+static int dma[MAX_CARDS+1] = { 1, 7, 1, 7, 1, 7, 1, 7, 0, };
+#else
+static int io[MAX_CARDS+1];
+static int dma[MAX_CARDS+1];
+#endif
+/* IRQ can be safely autoprobed */
+static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, };
+
+/* for class stuff*/
+static struct class *cosa_class;
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
+
+MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
+MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
+MODULE_LICENSE("GPL");
+#endif
+
+/* I use this mainly for testing purposes */
+#ifdef COSA_SLOW_IO
+#define cosa_outb outb_p
+#define cosa_outw outw_p
+#define cosa_inb  inb_p
+#define cosa_inw  inw_p
+#else
+#define cosa_outb outb
+#define cosa_outw outw
+#define cosa_inb  inb
+#define cosa_inw  inw
+#endif
+
+#define is_8bit(cosa)		(!(cosa->datareg & 0x08))
+
+#define cosa_getstatus(cosa)	(cosa_inb(cosa->statusreg))
+#define cosa_putstatus(cosa, stat)	(cosa_outb(stat, cosa->statusreg))
+#define cosa_getdata16(cosa)	(cosa_inw(cosa->datareg))
+#define cosa_getdata8(cosa)	(cosa_inb(cosa->datareg))
+#define cosa_putdata16(cosa, dt)	(cosa_outw(dt, cosa->datareg))
+#define cosa_putdata8(cosa, dt)	(cosa_outb(dt, cosa->datareg))
+
+/* Initialization stuff */
+static int cosa_probe(int ioaddr, int irq, int dma);
+
+/* HW interface */
+static void cosa_enable_rx(struct channel_data *chan);
+static void cosa_disable_rx(struct channel_data *chan);
+static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
+static void cosa_kick(struct cosa_data *cosa);
+static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
+
+/* Network device stuff */
+static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
+			   unsigned short parity);
+static int cosa_net_open(struct net_device *d);
+static int cosa_net_close(struct net_device *d);
+static void cosa_net_timeout(struct net_device *d);
+static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d);
+static char *cosa_net_setup_rx(struct channel_data *channel, int size);
+static int cosa_net_rx_done(struct channel_data *channel);
+static int cosa_net_tx_done(struct channel_data *channel, int size);
+static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+/* Character device */
+static char *chrdev_setup_rx(struct channel_data *channel, int size);
+static int chrdev_rx_done(struct channel_data *channel);
+static int chrdev_tx_done(struct channel_data *channel, int size);
+static ssize_t cosa_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos);
+static ssize_t cosa_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static unsigned int cosa_poll(struct file *file, poll_table *poll);
+static int cosa_open(struct inode *inode, struct file *file);
+static int cosa_release(struct inode *inode, struct file *file);
+static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#ifdef COSA_FASYNC_WORKING
+static int cosa_fasync(struct inode *inode, struct file *file, int on);
+#endif
+
+static const struct file_operations cosa_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= cosa_read,
+	.write		= cosa_write,
+	.poll		= cosa_poll,
+	.unlocked_ioctl	= cosa_chardev_ioctl,
+	.open		= cosa_open,
+	.release	= cosa_release,
+#ifdef COSA_FASYNC_WORKING
+	.fasync		= cosa_fasync,
+#endif
+};
+
+/* Ioctls */
+static int cosa_start(struct cosa_data *cosa, int address);
+static int cosa_reset(struct cosa_data *cosa);
+static int cosa_download(struct cosa_data *cosa, void __user *a);
+static int cosa_readmem(struct cosa_data *cosa, void __user *a);
+
+/* COSA/SRP ROM monitor */
+static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
+static int startmicrocode(struct cosa_data *cosa, int address);
+static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
+static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
+
+/* Auxiliary functions */
+static int get_wait_data(struct cosa_data *cosa);
+static int put_wait_data(struct cosa_data *cosa, int data);
+static int puthexnumber(struct cosa_data *cosa, int number);
+static void put_driver_status(struct cosa_data *cosa);
+static void put_driver_status_nolock(struct cosa_data *cosa);
+
+/* Interrupt handling */
+static irqreturn_t cosa_interrupt(int irq, void *cosa);
+
+/* I/O ops debugging */
+#ifdef DEBUG_IO
+static void debug_data_in(struct cosa_data *cosa, int data);
+static void debug_data_out(struct cosa_data *cosa, int data);
+static void debug_data_cmd(struct cosa_data *cosa, int data);
+static void debug_status_in(struct cosa_data *cosa, int status);
+static void debug_status_out(struct cosa_data *cosa, int status);
+#endif
+
+static inline struct channel_data* dev_to_chan(struct net_device *dev)
+{
+	return (struct channel_data *)dev_to_hdlc(dev)->priv;
+}
+
+/* ---------- Initialization stuff ---------- */
+
+static int __init cosa_init(void)
+{
+	int i, err = 0;
+
+	if (cosa_major > 0) {
+		if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
+			pr_warn("unable to get major %d\n", cosa_major);
+			err = -EIO;
+			goto out;
+		}
+	} else {
+		if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
+			pr_warn("unable to register chardev\n");
+			err = -EIO;
+			goto out;
+		}
+	}
+	for (i=0; i<MAX_CARDS; i++)
+		cosa_cards[i].num = -1;
+	for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
+		cosa_probe(io[i], irq[i], dma[i]);
+	if (!nr_cards) {
+		pr_warn("no devices found\n");
+		unregister_chrdev(cosa_major, "cosa");
+		err = -ENODEV;
+		goto out;
+	}
+	cosa_class = class_create(THIS_MODULE, "cosa");
+	if (IS_ERR(cosa_class)) {
+		err = PTR_ERR(cosa_class);
+		goto out_chrdev;
+	}
+	for (i = 0; i < nr_cards; i++)
+		device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL,
+			      "cosa%d", i);
+	err = 0;
+	goto out;
+
+out_chrdev:
+	unregister_chrdev(cosa_major, "cosa");
+out:
+	return err;
+}
+module_init(cosa_init);
+
+static void __exit cosa_exit(void)
+{
+	struct cosa_data *cosa;
+	int i;
+
+	for (i = 0; i < nr_cards; i++)
+		device_destroy(cosa_class, MKDEV(cosa_major, i));
+	class_destroy(cosa_class);
+
+	for (cosa = cosa_cards; nr_cards--; cosa++) {
+		/* Clean up the per-channel data */
+		for (i = 0; i < cosa->nchannels; i++) {
+			/* Chardev driver has no alloc'd per-channel data */
+			unregister_hdlc_device(cosa->chan[i].netdev);
+			free_netdev(cosa->chan[i].netdev);
+		}
+		/* Clean up the per-card data */
+		kfree(cosa->chan);
+		kfree(cosa->bouncebuf);
+		free_irq(cosa->irq, cosa);
+		free_dma(cosa->dma);
+		release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
+	}
+	unregister_chrdev(cosa_major, "cosa");
+}
+module_exit(cosa_exit);
+
+static const struct net_device_ops cosa_ops = {
+	.ndo_open       = cosa_net_open,
+	.ndo_stop       = cosa_net_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = cosa_net_ioctl,
+	.ndo_tx_timeout = cosa_net_timeout,
+};
+
+static int cosa_probe(int base, int irq, int dma)
+{
+	struct cosa_data *cosa = cosa_cards+nr_cards;
+	int i, err = 0;
+
+	memset(cosa, 0, sizeof(struct cosa_data));
+
+	/* Checking validity of parameters: */
+	/* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
+	if ((irq >= 0  && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
+		pr_info("invalid IRQ %d\n", irq);
+		return -1;
+	}
+	/* I/O address should be between 0x100 and 0x3ff and should be
+	 * multiple of 8. */
+	if (base < 0x100 || base > 0x3ff || base & 0x7) {
+		pr_info("invalid I/O address 0x%x\n", base);
+		return -1;
+	}
+	/* DMA should be 0,1 or 3-7 */
+	if (dma < 0 || dma == 4 || dma > 7) {
+		pr_info("invalid DMA %d\n", dma);
+		return -1;
+	}
+	/* and finally, on 16-bit COSA DMA should be 4-7 and 
+	 * I/O base should not be multiple of 0x10 */
+	if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
+		pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
+			base, dma);
+		return -1;
+	}
+
+	cosa->dma = dma;
+	cosa->datareg = base;
+	cosa->statusreg = is_8bit(cosa)?base+1:base+2;
+	spin_lock_init(&cosa->lock);
+
+	if (!request_region(base, is_8bit(cosa)?2:4,"cosa"))
+		return -1;
+	
+	if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
+		printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
+		err = -1;
+		goto err_out;
+	}
+
+	/* Test the validity of identification string */
+	if (!strncmp(cosa->id_string, "SRP", 3))
+		cosa->type = "srp";
+	else if (!strncmp(cosa->id_string, "COSA", 4))
+		cosa->type = is_8bit(cosa)? "cosa8": "cosa16";
+	else {
+/* Print a warning only if we are not autoprobing */
+#ifndef COSA_ISA_AUTOPROBE
+		pr_info("valid signature not found at 0x%x\n", base);
+#endif
+		err = -1;
+		goto err_out;
+	}
+	/* Update the name of the region now we know the type of card */ 
+	release_region(base, is_8bit(cosa)?2:4);
+	if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
+		printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
+		return -1;
+	}
+
+	/* Now do IRQ autoprobe */
+	if (irq < 0) {
+		unsigned long irqs;
+/*		pr_info("IRQ autoprobe\n"); */
+		irqs = probe_irq_on();
+		/* 
+		 * Enable interrupt on tx buffer empty (it sure is) 
+		 * really sure ?
+		 * FIXME: When this code is not used as module, we should
+		 * probably call udelay() instead of the interruptible sleep.
+		 */
+		set_current_state(TASK_INTERRUPTIBLE);
+		cosa_putstatus(cosa, SR_TX_INT_ENA);
+		schedule_timeout(30);
+		irq = probe_irq_off(irqs);
+		/* Disable all IRQs from the card */
+		cosa_putstatus(cosa, 0);
+		/* Empty the received data register */
+		cosa_getdata8(cosa);
+
+		if (irq < 0) {
+			pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
+				irq, cosa->datareg);
+			err = -1;
+			goto err_out;
+		}
+		if (irq == 0) {
+			pr_info("no interrupt obtained (board at 0x%x)\n",
+				cosa->datareg);
+		/*	return -1; */
+		}
+	}
+
+	cosa->irq = irq;
+	cosa->num = nr_cards;
+	cosa->usage = 0;
+	cosa->nchannels = 2;	/* FIXME: how to determine this? */
+
+	if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
+		err = -1;
+		goto err_out;
+	}
+	if (request_dma(cosa->dma, cosa->type)) {
+		err = -1;
+		goto err_out1;
+	}
+	
+	cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL|GFP_DMA);
+	if (!cosa->bouncebuf) {
+		err = -ENOMEM;
+		goto err_out2;
+	}
+	sprintf(cosa->name, "cosa%d", cosa->num);
+
+	/* Initialize the per-channel data */
+	cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
+	if (!cosa->chan) {
+		err = -ENOMEM;
+		goto err_out3;
+	}
+
+	for (i = 0; i < cosa->nchannels; i++) {
+		struct channel_data *chan = &cosa->chan[i];
+
+		chan->cosa = cosa;
+		chan->num = i;
+		sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
+
+		/* Initialize the chardev data structures */
+		mutex_init(&chan->rlock);
+		sema_init(&chan->wsem, 1);
+
+		/* Register the network interface */
+		if (!(chan->netdev = alloc_hdlcdev(chan))) {
+			pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
+			goto err_hdlcdev;
+		}
+		dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
+		dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
+		chan->netdev->netdev_ops = &cosa_ops;
+		chan->netdev->watchdog_timeo = TX_TIMEOUT;
+		chan->netdev->base_addr = chan->cosa->datareg;
+		chan->netdev->irq = chan->cosa->irq;
+		chan->netdev->dma = chan->cosa->dma;
+		if (register_hdlc_device(chan->netdev)) {
+			netdev_warn(chan->netdev,
+				    "register_hdlc_device() failed\n");
+			free_netdev(chan->netdev);
+			goto err_hdlcdev;
+		}
+	}
+
+	pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
+		cosa->num, cosa->id_string, cosa->type,
+		cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
+
+	return nr_cards++;
+
+err_hdlcdev:
+	while (i-- > 0) {
+		unregister_hdlc_device(cosa->chan[i].netdev);
+		free_netdev(cosa->chan[i].netdev);
+	}
+	kfree(cosa->chan);
+err_out3:
+	kfree(cosa->bouncebuf);
+err_out2:
+	free_dma(cosa->dma);
+err_out1:
+	free_irq(cosa->irq, cosa);
+err_out:
+	release_region(cosa->datareg,is_8bit(cosa)?2:4);
+	pr_notice("cosa%d: allocating resources failed\n", cosa->num);
+	return err;
+}
+
+
+/*---------- network device ---------- */
+
+static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
+			   unsigned short parity)
+{
+	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+		return 0;
+	return -EINVAL;
+}
+
+static int cosa_net_open(struct net_device *dev)
+{
+	struct channel_data *chan = dev_to_chan(dev);
+	int err;
+	unsigned long flags;
+
+	if (!(chan->cosa->firmware_status & COSA_FW_START)) {
+		pr_notice("%s: start the firmware first (status %d)\n",
+			  chan->cosa->name, chan->cosa->firmware_status);
+		return -EPERM;
+	}
+	spin_lock_irqsave(&chan->cosa->lock, flags);
+	if (chan->usage != 0) {
+		pr_warn("%s: cosa_net_open called with usage count %d\n",
+			chan->name, chan->usage);
+		spin_unlock_irqrestore(&chan->cosa->lock, flags);
+		return -EBUSY;
+	}
+	chan->setup_rx = cosa_net_setup_rx;
+	chan->tx_done = cosa_net_tx_done;
+	chan->rx_done = cosa_net_rx_done;
+	chan->usage = -1;
+	chan->cosa->usage++;
+	spin_unlock_irqrestore(&chan->cosa->lock, flags);
+
+	err = hdlc_open(dev);
+	if (err) {
+		spin_lock_irqsave(&chan->cosa->lock, flags);
+		chan->usage = 0;
+		chan->cosa->usage--;
+		spin_unlock_irqrestore(&chan->cosa->lock, flags);
+		return err;
+	}
+
+	netif_start_queue(dev);
+	cosa_enable_rx(chan);
+	return 0;
+}
+
+static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
+				     struct net_device *dev)
+{
+	struct channel_data *chan = dev_to_chan(dev);
+
+	netif_stop_queue(dev);
+
+	chan->tx_skb = skb;
+	cosa_start_tx(chan, skb->data, skb->len);
+	return NETDEV_TX_OK;
+}
+
+static void cosa_net_timeout(struct net_device *dev)
+{
+	struct channel_data *chan = dev_to_chan(dev);
+
+	if (test_bit(RXBIT, &chan->cosa->rxtx)) {
+		chan->netdev->stats.rx_errors++;
+		chan->netdev->stats.rx_missed_errors++;
+	} else {
+		chan->netdev->stats.tx_errors++;
+		chan->netdev->stats.tx_aborted_errors++;
+	}
+	cosa_kick(chan->cosa);
+	if (chan->tx_skb) {
+		dev_kfree_skb(chan->tx_skb);
+		chan->tx_skb = NULL;
+	}
+	netif_wake_queue(dev);
+}
+
+static int cosa_net_close(struct net_device *dev)
+{
+	struct channel_data *chan = dev_to_chan(dev);
+	unsigned long flags;
+
+	netif_stop_queue(dev);
+	hdlc_close(dev);
+	cosa_disable_rx(chan);
+	spin_lock_irqsave(&chan->cosa->lock, flags);
+	if (chan->rx_skb) {
+		kfree_skb(chan->rx_skb);
+		chan->rx_skb = NULL;
+	}
+	if (chan->tx_skb) {
+		kfree_skb(chan->tx_skb);
+		chan->tx_skb = NULL;
+	}
+	chan->usage = 0;
+	chan->cosa->usage--;
+	spin_unlock_irqrestore(&chan->cosa->lock, flags);
+	return 0;
+}
+
+static char *cosa_net_setup_rx(struct channel_data *chan, int size)
+{
+	/*
+	 * We can safely fall back to non-dma-able memory, because we have
+	 * the cosa->bouncebuf pre-allocated.
+	 */
+	kfree_skb(chan->rx_skb);
+	chan->rx_skb = dev_alloc_skb(size);
+	if (chan->rx_skb == NULL) {
+		pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
+		chan->netdev->stats.rx_dropped++;
+		return NULL;
+	}
+	chan->netdev->trans_start = jiffies;
+	return skb_put(chan->rx_skb, size);
+}
+
+static int cosa_net_rx_done(struct channel_data *chan)
+{
+	if (!chan->rx_skb) {
+		pr_warn("%s: rx_done with empty skb!\n", chan->name);
+		chan->netdev->stats.rx_errors++;
+		chan->netdev->stats.rx_frame_errors++;
+		return 0;
+	}
+	chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
+	chan->rx_skb->dev = chan->netdev;
+	skb_reset_mac_header(chan->rx_skb);
+	chan->netdev->stats.rx_packets++;
+	chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
+	netif_rx(chan->rx_skb);
+	chan->rx_skb = NULL;
+	return 0;
+}
+
+/* ARGSUSED */
+static int cosa_net_tx_done(struct channel_data *chan, int size)
+{
+	if (!chan->tx_skb) {
+		pr_warn("%s: tx_done with empty skb!\n", chan->name);
+		chan->netdev->stats.tx_errors++;
+		chan->netdev->stats.tx_aborted_errors++;
+		return 1;
+	}
+	dev_kfree_skb_irq(chan->tx_skb);
+	chan->tx_skb = NULL;
+	chan->netdev->stats.tx_packets++;
+	chan->netdev->stats.tx_bytes += size;
+	netif_wake_queue(chan->netdev);
+	return 1;
+}
+
+/*---------- Character device ---------- */
+
+static ssize_t cosa_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long flags;
+	struct channel_data *chan = file->private_data;
+	struct cosa_data *cosa = chan->cosa;
+	char *kbuf;
+
+	if (!(cosa->firmware_status & COSA_FW_START)) {
+		pr_notice("%s: start the firmware first (status %d)\n",
+			  cosa->name, cosa->firmware_status);
+		return -EPERM;
+	}
+	if (mutex_lock_interruptible(&chan->rlock))
+		return -ERESTARTSYS;
+	
+	if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
+		pr_info("%s: cosa_read() - OOM\n", cosa->name);
+		mutex_unlock(&chan->rlock);
+		return -ENOMEM;
+	}
+
+	chan->rx_status = 0;
+	cosa_enable_rx(chan);
+	spin_lock_irqsave(&cosa->lock, flags);
+	add_wait_queue(&chan->rxwaitq, &wait);
+	while (!chan->rx_status) {
+		current->state = TASK_INTERRUPTIBLE;
+		spin_unlock_irqrestore(&cosa->lock, flags);
+		schedule();
+		spin_lock_irqsave(&cosa->lock, flags);
+		if (signal_pending(current) && chan->rx_status == 0) {
+			chan->rx_status = 1;
+			remove_wait_queue(&chan->rxwaitq, &wait);
+			current->state = TASK_RUNNING;
+			spin_unlock_irqrestore(&cosa->lock, flags);
+			mutex_unlock(&chan->rlock);
+			return -ERESTARTSYS;
+		}
+	}
+	remove_wait_queue(&chan->rxwaitq, &wait);
+	current->state = TASK_RUNNING;
+	kbuf = chan->rxdata;
+	count = chan->rxsize;
+	spin_unlock_irqrestore(&cosa->lock, flags);
+	mutex_unlock(&chan->rlock);
+
+	if (copy_to_user(buf, kbuf, count)) {
+		kfree(kbuf);
+		return -EFAULT;
+	}
+	kfree(kbuf);
+	return count;
+}
+
+static char *chrdev_setup_rx(struct channel_data *chan, int size)
+{
+	/* Expect size <= COSA_MTU */
+	chan->rxsize = size;
+	return chan->rxdata;
+}
+
+static int chrdev_rx_done(struct channel_data *chan)
+{
+	if (chan->rx_status) { /* Reader has died */
+		kfree(chan->rxdata);
+		up(&chan->wsem);
+	}
+	chan->rx_status = 1;
+	wake_up_interruptible(&chan->rxwaitq);
+	return 1;
+}
+
+
+static ssize_t cosa_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	struct channel_data *chan = file->private_data;
+	struct cosa_data *cosa = chan->cosa;
+	unsigned long flags;
+	char *kbuf;
+
+	if (!(cosa->firmware_status & COSA_FW_START)) {
+		pr_notice("%s: start the firmware first (status %d)\n",
+			  cosa->name, cosa->firmware_status);
+		return -EPERM;
+	}
+	if (down_interruptible(&chan->wsem))
+		return -ERESTARTSYS;
+
+	if (count > COSA_MTU)
+		count = COSA_MTU;
+	
+	/* Allocate the buffer */
+	if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
+		pr_notice("%s: cosa_write() OOM - dropping packet\n",
+			  cosa->name);
+		up(&chan->wsem);
+		return -ENOMEM;
+	}
+	if (copy_from_user(kbuf, buf, count)) {
+		up(&chan->wsem);
+		kfree(kbuf);
+		return -EFAULT;
+	}
+	chan->tx_status=0;
+	cosa_start_tx(chan, kbuf, count);
+
+	spin_lock_irqsave(&cosa->lock, flags);
+	add_wait_queue(&chan->txwaitq, &wait);
+	while (!chan->tx_status) {
+		current->state = TASK_INTERRUPTIBLE;
+		spin_unlock_irqrestore(&cosa->lock, flags);
+		schedule();
+		spin_lock_irqsave(&cosa->lock, flags);
+		if (signal_pending(current) && chan->tx_status == 0) {
+			chan->tx_status = 1;
+			remove_wait_queue(&chan->txwaitq, &wait);
+			current->state = TASK_RUNNING;
+			chan->tx_status = 1;
+			spin_unlock_irqrestore(&cosa->lock, flags);
+			up(&chan->wsem);
+			return -ERESTARTSYS;
+		}
+	}
+	remove_wait_queue(&chan->txwaitq, &wait);
+	current->state = TASK_RUNNING;
+	up(&chan->wsem);
+	spin_unlock_irqrestore(&cosa->lock, flags);
+	kfree(kbuf);
+	return count;
+}
+
+static int chrdev_tx_done(struct channel_data *chan, int size)
+{
+	if (chan->tx_status) { /* Writer was interrupted */
+		kfree(chan->txbuf);
+		up(&chan->wsem);
+	}
+	chan->tx_status = 1;
+	wake_up_interruptible(&chan->txwaitq);
+	return 1;
+}
+
+static unsigned int cosa_poll(struct file *file, poll_table *poll)
+{
+	pr_info("cosa_poll is here\n");
+	return 0;
+}
+
+static int cosa_open(struct inode *inode, struct file *file)
+{
+	struct cosa_data *cosa;
+	struct channel_data *chan;
+	unsigned long flags;
+	int n;
+	int ret = 0;
+
+	mutex_lock(&cosa_chardev_mutex);
+	if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
+		>= nr_cards) {
+		ret = -ENODEV;
+		goto out;
+	}
+	cosa = cosa_cards+n;
+
+	if ((n=iminor(file->f_path.dentry->d_inode)
+		& ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
+		ret = -ENODEV;
+		goto out;
+	}
+	chan = cosa->chan + n;
+	
+	file->private_data = chan;
+
+	spin_lock_irqsave(&cosa->lock, flags);
+
+	if (chan->usage < 0) { /* in netdev mode */
+		spin_unlock_irqrestore(&cosa->lock, flags);
+		ret = -EBUSY;
+		goto out;
+	}
+	cosa->usage++;
+	chan->usage++;
+
+	chan->tx_done = chrdev_tx_done;
+	chan->setup_rx = chrdev_setup_rx;
+	chan->rx_done = chrdev_rx_done;
+	spin_unlock_irqrestore(&cosa->lock, flags);
+out:
+	mutex_unlock(&cosa_chardev_mutex);
+	return ret;
+}
+
+static int cosa_release(struct inode *inode, struct file *file)
+{
+	struct channel_data *channel = file->private_data;
+	struct cosa_data *cosa;
+	unsigned long flags;
+
+	cosa = channel->cosa;
+	spin_lock_irqsave(&cosa->lock, flags);
+	cosa->usage--;
+	channel->usage--;
+	spin_unlock_irqrestore(&cosa->lock, flags);
+	return 0;
+}
+
+#ifdef COSA_FASYNC_WORKING
+static struct fasync_struct *fasync[256] = { NULL, };
+
+/* To be done ... */
+static int cosa_fasync(struct inode *inode, struct file *file, int on)
+{
+        int port = iminor(inode);
+
+	return fasync_helper(inode, file, on, &fasync[port]);
+}
+#endif
+
+
+/* ---------- Ioctls ---------- */
+
+/*
+ * Ioctl subroutines can safely be made inline, because they are called
+ * only from cosa_ioctl().
+ */
+static inline int cosa_reset(struct cosa_data *cosa)
+{
+	char idstring[COSA_MAX_ID_STRING];
+	if (cosa->usage > 1)
+		pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+			cosa->num, cosa->usage);
+	cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
+	if (cosa_reset_and_read_id(cosa, idstring) < 0) {
+		pr_notice("cosa%d: reset failed\n", cosa->num);
+		return -EIO;
+	}
+	pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
+	cosa->firmware_status |= COSA_FW_RESET;
+	return 0;
+}
+
+/* High-level function to download data into COSA memory. Calls download() */
+static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
+{
+	struct cosa_download d;
+	int i;
+
+	if (cosa->usage > 1)
+		pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+			cosa->name, cosa->usage);
+	if (!(cosa->firmware_status & COSA_FW_RESET)) {
+		pr_notice("%s: reset the card first (status %d)\n",
+			  cosa->name, cosa->firmware_status);
+		return -EPERM;
+	}
+	
+	if (copy_from_user(&d, arg, sizeof(d)))
+		return -EFAULT;
+
+	if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
+		return -EINVAL;
+	if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
+		return -EINVAL;
+
+
+	/* If something fails, force the user to reset the card */
+	cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_DOWNLOAD);
+
+	i = download(cosa, d.code, d.len, d.addr);
+	if (i < 0) {
+		pr_notice("cosa%d: microcode download failed: %d\n",
+			  cosa->num, i);
+		return -EIO;
+	}
+	pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
+		cosa->num, d.len, d.addr);
+	cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
+	return 0;
+}
+
+/* High-level function to read COSA memory. Calls readmem() */
+static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
+{
+	struct cosa_download d;
+	int i;
+
+	if (cosa->usage > 1)
+		pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+			cosa->num, cosa->usage);
+	if (!(cosa->firmware_status & COSA_FW_RESET)) {
+		pr_notice("%s: reset the card first (status %d)\n",
+			  cosa->name, cosa->firmware_status);
+		return -EPERM;
+	}
+
+	if (copy_from_user(&d, arg, sizeof(d)))
+		return -EFAULT;
+
+	/* If something fails, force the user to reset the card */
+	cosa->firmware_status &= ~COSA_FW_RESET;
+
+	i = readmem(cosa, d.code, d.len, d.addr);
+	if (i < 0) {
+		pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
+		return -EIO;
+	}
+	pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
+		cosa->num, d.len, d.addr);
+	cosa->firmware_status |= COSA_FW_RESET;
+	return 0;
+}
+
+/* High-level function to start microcode. Calls startmicrocode(). */
+static inline int cosa_start(struct cosa_data *cosa, int address)
+{
+	int i;
+
+	if (cosa->usage > 1)
+		pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+			cosa->num, cosa->usage);
+
+	if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
+		!= (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
+		pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
+			  cosa->name, cosa->firmware_status);
+		return -EPERM;
+	}
+	cosa->firmware_status &= ~COSA_FW_RESET;
+	if ((i=startmicrocode(cosa, address)) < 0) {
+		pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
+			  cosa->num, address, i);
+		return -EIO;
+	}
+	pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
+	cosa->startaddr = address;
+	cosa->firmware_status |= COSA_FW_START;
+	return 0;
+}
+		
+/* Buffer of size at least COSA_MAX_ID_STRING is expected */
+static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
+{
+	int l = strlen(cosa->id_string)+1;
+	if (copy_to_user(string, cosa->id_string, l))
+		return -EFAULT;
+	return l;
+}
+
+/* Buffer of size at least COSA_MAX_ID_STRING is expected */
+static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
+{
+	int l = strlen(cosa->type)+1;
+	if (copy_to_user(string, cosa->type, l))
+		return -EFAULT;
+	return l;
+}
+
+static int cosa_ioctl_common(struct cosa_data *cosa,
+	struct channel_data *channel, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	switch (cmd) {
+	case COSAIORSET:	/* Reset the device */
+		if (!capable(CAP_NET_ADMIN))
+			return -EACCES;
+		return cosa_reset(cosa);
+	case COSAIOSTRT:	/* Start the firmware */
+		if (!capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		return cosa_start(cosa, arg);
+	case COSAIODOWNLD:	/* Download the firmware */
+		if (!capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		
+		return cosa_download(cosa, argp);
+	case COSAIORMEM:
+		if (!capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		return cosa_readmem(cosa, argp);
+	case COSAIORTYPE:
+		return cosa_gettype(cosa, argp);
+	case COSAIORIDSTR:
+		return cosa_getidstr(cosa, argp);
+	case COSAIONRCARDS:
+		return nr_cards;
+	case COSAIONRCHANS:
+		return cosa->nchannels;
+	case COSAIOBMSET:
+		if (!capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		if (is_8bit(cosa))
+			return -EINVAL;
+		if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
+			return -EINVAL;
+		cosa->busmaster = arg;
+		return 0;
+	case COSAIOBMGET:
+		return cosa->busmaster;
+	}
+	return -ENOIOCTLCMD;
+}
+
+static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rv;
+	struct channel_data *chan = dev_to_chan(dev);
+	rv = cosa_ioctl_common(chan->cosa, chan, cmd,
+			       (unsigned long)ifr->ifr_data);
+	if (rv != -ENOIOCTLCMD)
+		return rv;
+	return hdlc_ioctl(dev, ifr, cmd);
+}
+
+static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	struct channel_data *channel = file->private_data;
+	struct cosa_data *cosa;
+	long ret;
+
+	mutex_lock(&cosa_chardev_mutex);
+	cosa = channel->cosa;
+	ret = cosa_ioctl_common(cosa, channel, cmd, arg);
+	mutex_unlock(&cosa_chardev_mutex);
+	return ret;
+}
+
+
+/*---------- HW layer interface ---------- */
+
+/*
+ * The higher layer can bind itself to the HW layer by setting the callbacks
+ * in the channel_data structure and by using these routines.
+ */
+static void cosa_enable_rx(struct channel_data *chan)
+{
+	struct cosa_data *cosa = chan->cosa;
+
+	if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
+		put_driver_status(cosa);
+}
+
+static void cosa_disable_rx(struct channel_data *chan)
+{
+	struct cosa_data *cosa = chan->cosa;
+
+	if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
+		put_driver_status(cosa);
+}
+
+/*
+ * FIXME: This routine probably should check for cosa_start_tx() called when
+ * the previous transmit is still unfinished. In this case the non-zero
+ * return value should indicate to the caller that the queuing(sp?) up
+ * the transmit has failed.
+ */
+static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
+{
+	struct cosa_data *cosa = chan->cosa;
+	unsigned long flags;
+#ifdef DEBUG_DATA
+	int i;
+
+	pr_info("cosa%dc%d: starting tx(0x%x)",
+		chan->cosa->num, chan->num, len);
+	for (i=0; i<len; i++)
+		pr_cont(" %02x", buf[i]&0xff);
+	pr_cont("\n");
+#endif
+	spin_lock_irqsave(&cosa->lock, flags);
+	chan->txbuf = buf;
+	chan->txsize = len;
+	if (len > COSA_MTU)
+		chan->txsize = COSA_MTU;
+	spin_unlock_irqrestore(&cosa->lock, flags);
+
+	/* Tell the firmware we are ready */
+	set_bit(chan->num, &cosa->txbitmap);
+	put_driver_status(cosa);
+
+	return 0;
+}
+
+static void put_driver_status(struct cosa_data *cosa)
+{
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(&cosa->lock, flags);
+
+	status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
+		| (cosa->txbitmap ? DRIVER_TX_READY : 0)
+		| (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
+			&DRIVER_TXMAP_MASK : 0);
+	if (!cosa->rxtx) {
+		if (cosa->rxbitmap|cosa->txbitmap) {
+			if (!cosa->enabled) {
+				cosa_putstatus(cosa, SR_RX_INT_ENA);
+#ifdef DEBUG_IO
+				debug_status_out(cosa, SR_RX_INT_ENA);
+#endif
+				cosa->enabled = 1;
+			}
+		} else if (cosa->enabled) {
+			cosa->enabled = 0;
+			cosa_putstatus(cosa, 0);
+#ifdef DEBUG_IO
+			debug_status_out(cosa, 0);
+#endif
+		}
+		cosa_putdata8(cosa, status);
+#ifdef DEBUG_IO
+		debug_data_cmd(cosa, status);
+#endif
+	}
+	spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static void put_driver_status_nolock(struct cosa_data *cosa)
+{
+	int status;
+
+	status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
+		| (cosa->txbitmap ? DRIVER_TX_READY : 0)
+		| (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
+			&DRIVER_TXMAP_MASK : 0);
+
+	if (cosa->rxbitmap|cosa->txbitmap) {
+		cosa_putstatus(cosa, SR_RX_INT_ENA);
+#ifdef DEBUG_IO
+		debug_status_out(cosa, SR_RX_INT_ENA);
+#endif
+		cosa->enabled = 1;
+	} else {
+		cosa_putstatus(cosa, 0);
+#ifdef DEBUG_IO
+		debug_status_out(cosa, 0);
+#endif
+		cosa->enabled = 0;
+	}
+	cosa_putdata8(cosa, status);
+#ifdef DEBUG_IO
+	debug_data_cmd(cosa, status);
+#endif
+}
+
+/*
+ * The "kickme" function: When the DMA times out, this is called to
+ * clean up the driver status.
+ * FIXME: Preliminary support, the interface is probably wrong.
+ */
+static void cosa_kick(struct cosa_data *cosa)
+{
+	unsigned long flags, flags1;
+	char *s = "(probably) IRQ";
+
+	if (test_bit(RXBIT, &cosa->rxtx))
+		s = "RX DMA";
+	if (test_bit(TXBIT, &cosa->rxtx))
+		s = "TX DMA";
+
+	pr_info("%s: %s timeout - restarting\n", cosa->name, s);
+	spin_lock_irqsave(&cosa->lock, flags);
+	cosa->rxtx = 0;
+
+	flags1 = claim_dma_lock();
+	disable_dma(cosa->dma);
+	clear_dma_ff(cosa->dma);
+	release_dma_lock(flags1);
+
+	/* FIXME: Anything else? */
+	udelay(100);
+	cosa_putstatus(cosa, 0);
+	udelay(100);
+	(void) cosa_getdata8(cosa);
+	udelay(100);
+	cosa_putdata8(cosa, 0);
+	udelay(100);
+	put_driver_status_nolock(cosa);
+	spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+/*
+ * Check if the whole buffer is DMA-able. It means it is below the 16M of
+ * physical memory and doesn't span the 64k boundary. For now it seems
+ * SKB's never do this, but we'll check this anyway.
+ */
+static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
+{
+	static int count;
+	unsigned long b = (unsigned long)buf;
+	if (b+len >= MAX_DMA_ADDRESS)
+		return 0;
+	if ((b^ (b+len)) & 0x10000) {
+		if (count++ < 5)
+			pr_info("%s: packet spanning a 64k boundary\n",
+				chan->name);
+		return 0;
+	}
+	return 1;
+}
+
+
+/* ---------- The SRP/COSA ROM monitor functions ---------- */
+
+/*
+ * Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
+ * drivers need to say 4-digit hex number meaning start address of the microcode
+ * separated by a single space. Monitor replies by saying " =". Now driver
+ * has to write 4-digit hex number meaning the last byte address ended
+ * by a single space. Monitor has to reply with a space. Now the download
+ * begins. After the download monitor replies with "\r\n." (CR LF dot).
+ */
+static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
+{
+	int i;
+
+	if (put_wait_data(cosa, 'w') == -1) return -1;
+	if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
+	if (get_wait_data(cosa) != '=') return -3;
+
+	if (puthexnumber(cosa, address) < 0) return -4;
+	if (put_wait_data(cosa, ' ') == -1) return -10;
+	if (get_wait_data(cosa) != ' ') return -11;
+	if (get_wait_data(cosa) != '=') return -12;
+
+	if (puthexnumber(cosa, address+length-1) < 0) return -13;
+	if (put_wait_data(cosa, ' ') == -1) return -18;
+	if (get_wait_data(cosa) != ' ') return -19;
+
+	while (length--) {
+		char c;
+#ifndef SRP_DOWNLOAD_AT_BOOT
+		if (get_user(c, microcode))
+			return -23; /* ??? */
+#else
+		c = *microcode;
+#endif
+		if (put_wait_data(cosa, c) == -1)
+			return -20;
+		microcode++;
+	}
+
+	if (get_wait_data(cosa) != '\r') return -21;
+	if (get_wait_data(cosa) != '\n') return -22;
+	if (get_wait_data(cosa) != '.') return -23;
+#if 0
+	printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
+#endif
+	return 0;
+}
+
+
+/*
+ * Starting microcode is done via the "g" command of the SRP monitor.
+ * The chat should be the following: "g" "g=" "<addr><CR>"
+ * "<CR><CR><LF><CR><LF>".
+ */
+static int startmicrocode(struct cosa_data *cosa, int address)
+{
+	if (put_wait_data(cosa, 'g') == -1) return -1;
+	if (get_wait_data(cosa) != 'g') return -2;
+	if (get_wait_data(cosa) != '=') return -3;
+
+	if (puthexnumber(cosa, address) < 0) return -4;
+	if (put_wait_data(cosa, '\r') == -1) return -5;
+	
+	if (get_wait_data(cosa) != '\r') return -6;
+	if (get_wait_data(cosa) != '\r') return -7;
+	if (get_wait_data(cosa) != '\n') return -8;
+	if (get_wait_data(cosa) != '\r') return -9;
+	if (get_wait_data(cosa) != '\n') return -10;
+#if 0
+	printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
+#endif
+	return 0;
+}
+
+/*
+ * Reading memory is done via the "r" command of the SRP monitor.
+ * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
+ * Then driver can read the data and the conversation is finished
+ * by SRP monitor sending "<CR><LF>." (dot at the end).
+ *
+ * This routine is not needed during the normal operation and serves
+ * for debugging purposes only.
+ */
+static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
+{
+	if (put_wait_data(cosa, 'r') == -1) return -1;
+	if ((get_wait_data(cosa)) != 'r') return -2;
+	if ((get_wait_data(cosa)) != '=') return -3;
+
+	if (puthexnumber(cosa, address) < 0) return -4;
+	if (put_wait_data(cosa, ' ') == -1) return -5;
+	if (get_wait_data(cosa) != ' ') return -6;
+	if (get_wait_data(cosa) != '=') return -7;
+
+	if (puthexnumber(cosa, address+length-1) < 0) return -8;
+	if (put_wait_data(cosa, ' ') == -1) return -9;
+	if (get_wait_data(cosa) != ' ') return -10;
+
+	while (length--) {
+		char c;
+		int i;
+		if ((i=get_wait_data(cosa)) == -1) {
+			pr_info("0x%04x bytes remaining\n", length);
+			return -11;
+		}
+		c=i;
+#if 1
+		if (put_user(c, microcode))
+			return -23; /* ??? */
+#else
+		*microcode = c;
+#endif
+		microcode++;
+	}
+
+	if (get_wait_data(cosa) != '\r') return -21;
+	if (get_wait_data(cosa) != '\n') return -22;
+	if (get_wait_data(cosa) != '.') return -23;
+#if 0
+	printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
+#endif
+	return 0;
+}
+
+/*
+ * This function resets the device and reads the initial prompt
+ * of the device's ROM monitor.
+ */
+static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
+{
+	int i=0, id=0, prev=0, curr=0;
+
+	/* Reset the card ... */
+	cosa_putstatus(cosa, 0);
+	cosa_getdata8(cosa);
+	cosa_putstatus(cosa, SR_RST);
+#ifdef MODULE
+	msleep(500);
+#else
+	udelay(5*100000);
+#endif
+	/* Disable all IRQs from the card */
+	cosa_putstatus(cosa, 0);
+
+	/*
+	 * Try to read the ID string. The card then prints out the
+	 * identification string ended by the "\n\x2e".
+	 *
+	 * The following loop is indexed through i (instead of id)
+	 * to avoid looping forever when for any reason
+	 * the port returns '\r', '\n' or '\x2e' permanently.
+	 */
+	for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
+		if ((curr = get_wait_data(cosa)) == -1) {
+			return -1;
+		}
+		curr &= 0xff;
+		if (curr != '\r' && curr != '\n' && curr != 0x2e)
+			idstring[id++] = curr;
+		if (curr == 0x2e && prev == '\n')
+			break;
+	}
+	/* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
+	idstring[id] = '\0';
+	return id;
+}
+
+
+/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
+
+/*
+ * This routine gets the data byte from the card waiting for the SR_RX_RDY
+ * bit to be set in a loop. It should be used in the exceptional cases
+ * only (for example when resetting the card or downloading the firmware.
+ */
+static int get_wait_data(struct cosa_data *cosa)
+{
+	int retries = 1000;
+
+	while (--retries) {
+		/* read data and return them */
+		if (cosa_getstatus(cosa) & SR_RX_RDY) {
+			short r;
+			r = cosa_getdata8(cosa);
+#if 0
+			pr_info("get_wait_data returning after %d retries\n",
+				999-retries);
+#endif
+			return r;
+		}
+		/* sleep if not ready to read */
+		schedule_timeout_interruptible(1);
+	}
+	pr_info("timeout in get_wait_data (status 0x%x)\n",
+		cosa_getstatus(cosa));
+	return -1;
+}
+
+/*
+ * This routine puts the data byte to the card waiting for the SR_TX_RDY
+ * bit to be set in a loop. It should be used in the exceptional cases
+ * only (for example when resetting the card or downloading the firmware).
+ */
+static int put_wait_data(struct cosa_data *cosa, int data)
+{
+	int retries = 1000;
+	while (--retries) {
+		/* read data and return them */
+		if (cosa_getstatus(cosa) & SR_TX_RDY) {
+			cosa_putdata8(cosa, data);
+#if 0
+			pr_info("Putdata: %d retries\n", 999-retries);
+#endif
+			return 0;
+		}
+#if 0
+		/* sleep if not ready to read */
+		schedule_timeout_interruptible(1);
+#endif
+	}
+	pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
+		cosa->num, cosa_getstatus(cosa));
+	return -1;
+}
+	
+/* 
+ * The following routine puts the hexadecimal number into the SRP monitor
+ * and verifies the proper echo of the sent bytes. Returns 0 on success,
+ * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
+ * (-2,-4,-6,-8) means that reading echo failed.
+ */
+static int puthexnumber(struct cosa_data *cosa, int number)
+{
+	char temp[5];
+	int i;
+
+	/* Well, I should probably replace this by something faster. */
+	sprintf(temp, "%04X", number);
+	for (i=0; i<4; i++) {
+		if (put_wait_data(cosa, temp[i]) == -1) {
+			pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
+				  cosa->num, i);
+			return -1-2*i;
+		}
+		if (get_wait_data(cosa) != temp[i]) {
+			pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
+				  cosa->num, i);
+			return -2-2*i;
+		}
+	}
+	return 0;
+}
+
+
+/* ---------- Interrupt routines ---------- */
+
+/*
+ * There are three types of interrupt:
+ * At the beginning of transmit - this handled is in tx_interrupt(),
+ * at the beginning of receive - it is in rx_interrupt() and
+ * at the end of transmit/receive - it is the eot_interrupt() function.
+ * These functions are multiplexed by cosa_interrupt() according to the
+ * COSA status byte. I have moved the rx/tx/eot interrupt handling into
+ * separate functions to make it more readable. These functions are inline,
+ * so there should be no overhead of function call.
+ * 
+ * In the COSA bus-master mode, we need to tell the card the address of a
+ * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
+ * It's time to use the bottom half :-(
+ */
+
+/*
+ * Transmit interrupt routine - called when COSA is willing to obtain
+ * data from the OS. The most tricky part of the routine is selection
+ * of channel we (OS) want to send packet for. For SRP we should probably
+ * use the round-robin approach. The newer COSA firmwares have a simple
+ * flow-control - in the status word has bits 2 and 3 set to 1 means that the
+ * channel 0 or 1 doesn't want to receive data.
+ *
+ * It seems there is a bug in COSA firmware (need to trace it further):
+ * When the driver status says that the kernel has no more data for transmit
+ * (e.g. at the end of TX DMA) and then the kernel changes its mind
+ * (e.g. new packet is queued to hard_start_xmit()), the card issues
+ * the TX interrupt but does not mark the channel as ready-to-transmit.
+ * The fix seems to be to push the packet to COSA despite its request.
+ * We first try to obey the card's opinion, and then fall back to forced TX.
+ */
+static inline void tx_interrupt(struct cosa_data *cosa, int status)
+{
+	unsigned long flags, flags1;
+#ifdef DEBUG_IRQS
+	pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
+#endif
+	spin_lock_irqsave(&cosa->lock, flags);
+	set_bit(TXBIT, &cosa->rxtx);
+	if (!test_bit(IRQBIT, &cosa->rxtx)) {
+		/* flow control, see the comment above */
+		int i=0;
+		if (!cosa->txbitmap) {
+			pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
+				cosa->name);
+			put_driver_status_nolock(cosa);
+			clear_bit(TXBIT, &cosa->rxtx);
+			spin_unlock_irqrestore(&cosa->lock, flags);
+			return;
+		}
+		while (1) {
+			cosa->txchan++;
+			i++;
+			if (cosa->txchan >= cosa->nchannels)
+				cosa->txchan = 0;
+			if (!(cosa->txbitmap & (1<<cosa->txchan)))
+				continue;
+			if (~status & (1 << (cosa->txchan+DRIVER_TXMAP_SHIFT)))
+				break;
+			/* in second pass, accept first ready-to-TX channel */
+			if (i > cosa->nchannels) {
+				/* Can be safely ignored */
+#ifdef DEBUG_IRQS
+				printk(KERN_DEBUG "%s: Forcing TX "
+					"to not-ready channel %d\n",
+					cosa->name, cosa->txchan);
+#endif
+				break;
+			}
+		}
+
+		cosa->txsize = cosa->chan[cosa->txchan].txsize;
+		if (cosa_dma_able(cosa->chan+cosa->txchan,
+			cosa->chan[cosa->txchan].txbuf, cosa->txsize)) {
+			cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
+		} else {
+			memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
+				cosa->txsize);
+			cosa->txbuf = cosa->bouncebuf;
+		}
+	}
+
+	if (is_8bit(cosa)) {
+		if (!test_bit(IRQBIT, &cosa->rxtx)) {
+			cosa_putstatus(cosa, SR_TX_INT_ENA);
+			cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0)|
+				((cosa->txsize >> 8) & 0x1f));
+#ifdef DEBUG_IO
+			debug_status_out(cosa, SR_TX_INT_ENA);
+			debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0)|
+                                ((cosa->txsize >> 8) & 0x1f));
+			debug_data_in(cosa, cosa_getdata8(cosa));
+#else
+			cosa_getdata8(cosa);
+#endif
+			set_bit(IRQBIT, &cosa->rxtx);
+			spin_unlock_irqrestore(&cosa->lock, flags);
+			return;
+		} else {
+			clear_bit(IRQBIT, &cosa->rxtx);
+			cosa_putstatus(cosa, 0);
+			cosa_putdata8(cosa, cosa->txsize&0xff);
+#ifdef DEBUG_IO
+			debug_status_out(cosa, 0);
+			debug_data_out(cosa, cosa->txsize&0xff);
+#endif
+		}
+	} else {
+		cosa_putstatus(cosa, SR_TX_INT_ENA);
+		cosa_putdata16(cosa, ((cosa->txchan<<13) & 0xe000)
+			| (cosa->txsize & 0x1fff));
+#ifdef DEBUG_IO
+		debug_status_out(cosa, SR_TX_INT_ENA);
+		debug_data_out(cosa, ((cosa->txchan<<13) & 0xe000)
+                        | (cosa->txsize & 0x1fff));
+		debug_data_in(cosa, cosa_getdata8(cosa));
+		debug_status_out(cosa, 0);
+#else
+		cosa_getdata8(cosa);
+#endif
+		cosa_putstatus(cosa, 0);
+	}
+
+	if (cosa->busmaster) {
+		unsigned long addr = virt_to_bus(cosa->txbuf);
+		int count=0;
+		pr_info("busmaster IRQ\n");
+		while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+			count++;
+			udelay(10);
+			if (count > 1000) break;
+		}
+		pr_info("status %x\n", cosa_getstatus(cosa));
+		pr_info("ready after %d loops\n", count);
+		cosa_putdata16(cosa, (addr >> 16)&0xffff);
+
+		count = 0;
+		while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+			count++;
+			if (count > 1000) break;
+			udelay(10);
+		}
+		pr_info("ready after %d loops\n", count);
+		cosa_putdata16(cosa, addr &0xffff);
+		flags1 = claim_dma_lock();
+		set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
+		enable_dma(cosa->dma);
+		release_dma_lock(flags1);
+	} else {
+		/* start the DMA */
+		flags1 = claim_dma_lock();
+		disable_dma(cosa->dma);
+		clear_dma_ff(cosa->dma);
+		set_dma_mode(cosa->dma, DMA_MODE_WRITE);
+		set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
+		set_dma_count(cosa->dma, cosa->txsize);
+		enable_dma(cosa->dma);
+		release_dma_lock(flags1);
+	}
+	cosa_putstatus(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+#ifdef DEBUG_IO
+	debug_status_out(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+#endif
+	spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static inline void rx_interrupt(struct cosa_data *cosa, int status)
+{
+	unsigned long flags;
+#ifdef DEBUG_IRQS
+	pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
+#endif
+
+	spin_lock_irqsave(&cosa->lock, flags);
+	set_bit(RXBIT, &cosa->rxtx);
+
+	if (is_8bit(cosa)) {
+		if (!test_bit(IRQBIT, &cosa->rxtx)) {
+			set_bit(IRQBIT, &cosa->rxtx);
+			put_driver_status_nolock(cosa);
+			cosa->rxsize = cosa_getdata8(cosa) <<8;
+#ifdef DEBUG_IO
+			debug_data_in(cosa, cosa->rxsize >> 8);
+#endif
+			spin_unlock_irqrestore(&cosa->lock, flags);
+			return;
+		} else {
+			clear_bit(IRQBIT, &cosa->rxtx);
+			cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
+#ifdef DEBUG_IO
+			debug_data_in(cosa, cosa->rxsize & 0xff);
+#endif
+#if 0
+			pr_info("cosa%d: receive rxsize = (0x%04x)\n",
+				cosa->num, cosa->rxsize);
+#endif
+		}
+	} else {
+		cosa->rxsize = cosa_getdata16(cosa);
+#ifdef DEBUG_IO
+		debug_data_in(cosa, cosa->rxsize);
+#endif
+#if 0
+		pr_info("cosa%d: receive rxsize = (0x%04x)\n",
+			cosa->num, cosa->rxsize);
+#endif
+	}
+	if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
+		pr_warn("%s: rx for unknown channel (0x%04x)\n",
+			cosa->name, cosa->rxsize);
+		spin_unlock_irqrestore(&cosa->lock, flags);
+		goto reject;
+	}
+	cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
+	cosa->rxsize &= 0x1fff;
+	spin_unlock_irqrestore(&cosa->lock, flags);
+
+	cosa->rxbuf = NULL;
+	if (cosa->rxchan->setup_rx)
+		cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
+
+	if (!cosa->rxbuf) {
+reject:		/* Reject the packet */
+		pr_info("cosa%d: rejecting packet on channel %d\n",
+			cosa->num, cosa->rxchan->num);
+		cosa->rxbuf = cosa->bouncebuf;
+	}
+
+	/* start the DMA */
+	flags = claim_dma_lock();
+	disable_dma(cosa->dma);
+	clear_dma_ff(cosa->dma);
+	set_dma_mode(cosa->dma, DMA_MODE_READ);
+	if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) {
+		set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
+	} else {
+		set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
+	}
+	set_dma_count(cosa->dma, (cosa->rxsize&0x1fff));
+	enable_dma(cosa->dma);
+	release_dma_lock(flags);
+	spin_lock_irqsave(&cosa->lock, flags);
+	cosa_putstatus(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+	if (!is_8bit(cosa) && (status & SR_TX_RDY))
+		cosa_putdata8(cosa, DRIVER_RX_READY);
+#ifdef DEBUG_IO
+	debug_status_out(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+	if (!is_8bit(cosa) && (status & SR_TX_RDY))
+		debug_data_cmd(cosa, DRIVER_RX_READY);
+#endif
+	spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static inline void eot_interrupt(struct cosa_data *cosa, int status)
+{
+	unsigned long flags, flags1;
+	spin_lock_irqsave(&cosa->lock, flags);
+	flags1 = claim_dma_lock();
+	disable_dma(cosa->dma);
+	clear_dma_ff(cosa->dma);
+	release_dma_lock(flags1);
+	if (test_bit(TXBIT, &cosa->rxtx)) {
+		struct channel_data *chan = cosa->chan+cosa->txchan;
+		if (chan->tx_done)
+			if (chan->tx_done(chan, cosa->txsize))
+				clear_bit(chan->num, &cosa->txbitmap);
+	} else if (test_bit(RXBIT, &cosa->rxtx)) {
+#ifdef DEBUG_DATA
+	{
+		int i;
+		pr_info("cosa%dc%d: done rx(0x%x)",
+			cosa->num, cosa->rxchan->num, cosa->rxsize);
+		for (i=0; i<cosa->rxsize; i++)
+			pr_cont(" %02x", cosa->rxbuf[i]&0xff);
+		pr_cont("\n");
+	}
+#endif
+		/* Packet for unknown channel? */
+		if (cosa->rxbuf == cosa->bouncebuf)
+			goto out;
+		if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
+			memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
+		if (cosa->rxchan->rx_done)
+			if (cosa->rxchan->rx_done(cosa->rxchan))
+				clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
+	} else {
+		pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
+	}
+	/*
+	 * Clear the RXBIT, TXBIT and IRQBIT (the latest should be
+	 * cleared anyway). We should do it as soon as possible
+	 * so that we can tell the COSA we are done and to give it a time
+	 * for recovery.
+	 */
+out:
+	cosa->rxtx = 0;
+	put_driver_status_nolock(cosa);
+	spin_unlock_irqrestore(&cosa->lock, flags);
+}
+
+static irqreturn_t cosa_interrupt(int irq, void *cosa_)
+{
+	unsigned status;
+	int count = 0;
+	struct cosa_data *cosa = cosa_;
+again:
+	status = cosa_getstatus(cosa);
+#ifdef DEBUG_IRQS
+	pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
+#endif
+#ifdef DEBUG_IO
+	debug_status_in(cosa, status);
+#endif
+	switch (status & SR_CMD_FROM_SRP_MASK) {
+	case SR_DOWN_REQUEST:
+		tx_interrupt(cosa, status);
+		break;
+	case SR_UP_REQUEST:
+		rx_interrupt(cosa, status);
+		break;
+	case SR_END_OF_TRANSFER:
+		eot_interrupt(cosa, status);
+		break;
+	default:
+		/* We may be too fast for SRP. Try to wait a bit more. */
+		if (count++ < 100) {
+			udelay(100);
+			goto again;
+		}
+		pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
+			cosa->num, status & 0xff, count);
+	}
+#ifdef DEBUG_IRQS
+	if (count)
+		pr_info("%s: %d-times got unknown status in IRQ\n",
+			cosa->name, count);
+	else
+		pr_info("%s: returning from IRQ\n", cosa->name);
+#endif
+	return IRQ_HANDLED;
+}
+
+
+/* ---------- I/O debugging routines ---------- */
+/*
+ * These routines can be used to monitor COSA/SRP I/O and to printk()
+ * the data being transferred on the data and status I/O port in a
+ * readable way.
+ */
+
+#ifdef DEBUG_IO
+static void debug_status_in(struct cosa_data *cosa, int status)
+{
+	char *s;
+	switch (status & SR_CMD_FROM_SRP_MASK) {
+	case SR_UP_REQUEST:
+		s = "RX_REQ";
+		break;
+	case SR_DOWN_REQUEST:
+		s = "TX_REQ";
+		break;
+	case SR_END_OF_TRANSFER:
+		s = "ET_REQ";
+		break;
+	default:
+		s = "NO_REQ";
+		break;
+	}
+	pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
+		cosa->name,
+		status,
+		status & SR_USR_RQ ? "USR_RQ|" : "",
+		status & SR_TX_RDY ? "TX_RDY|" : "",
+		status & SR_RX_RDY ? "RX_RDY|" : "",
+		s);
+}
+
+static void debug_status_out(struct cosa_data *cosa, int status)
+{
+	pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
+		cosa->name,
+		status,
+		status & SR_RX_DMA_ENA  ? "RXDMA|"  : "!rxdma|",
+		status & SR_TX_DMA_ENA  ? "TXDMA|"  : "!txdma|",
+		status & SR_RST         ? "RESET|"  : "",
+		status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
+		status & SR_TX_INT_ENA  ? "TXINT|"  : "!txint|",
+		status & SR_RX_INT_ENA  ? "RXINT"   : "!rxint");
+}
+
+static void debug_data_in(struct cosa_data *cosa, int data)
+{
+	pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
+}
+
+static void debug_data_out(struct cosa_data *cosa, int data)
+{
+	pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
+}
+
+static void debug_data_cmd(struct cosa_data *cosa, int data)
+{
+	pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
+		cosa->name, data,
+		data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
+		data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
+}
+#endif
+
+/* EOF -- this file has not been truncated */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.h
new file mode 100644
index 0000000..028f3d9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/cosa.h
@@ -0,0 +1,117 @@
+/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
+
+/*
+ *  Copyright (C) 1995-1997  Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef COSA_H__
+#define COSA_H__
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+/* status register - output bits */
+#define SR_RX_DMA_ENA   0x04    /* receiver DMA enable bit */
+#define SR_TX_DMA_ENA   0x08    /* transmitter DMA enable bit */
+#define SR_RST          0x10    /* SRP reset */
+#define SR_USR_INT_ENA  0x20    /* user interrupt enable bit */
+#define SR_TX_INT_ENA   0x40    /* transmitter interrupt enable bit */
+#define SR_RX_INT_ENA   0x80    /* receiver interrupt enable bit */
+
+/* status register - input bits */
+#define SR_USR_RQ       0x20    /* user interrupt request pending */
+#define SR_TX_RDY       0x40    /* transmitter empty (ready) */
+#define SR_RX_RDY       0x80    /* receiver data ready */
+
+#define SR_UP_REQUEST   0x02    /* request from SRP to transfer data
+                                   up to PC */
+#define SR_DOWN_REQUEST 0x01    /* SRP is able to transfer data down
+                                   from PC to SRP */
+#define SR_END_OF_TRANSFER      0x03    /* SRP signalize end of
+                                           transfer (up or down) */
+
+#define SR_CMD_FROM_SRP_MASK    0x03    /* mask to get SRP command */
+
+/* bits in driver status byte definitions : */
+#define SR_RDY_RCV      0x01    /* ready to receive packet */
+#define SR_RDY_SND      0x02    /* ready to send packet */
+#define SR_CMD_PND      0x04    /* command pending */ /* not currently used */
+
+/* ???? */
+#define SR_PKT_UP       0x01    /* transfer of packet up in progress */
+#define SR_PKT_DOWN     0x02    /* transfer of packet down in progress */
+
+#endif /* __KERNEL__ */
+
+#define SR_LOAD_ADDR    0x4400  /* SRP microcode load address */
+#define SR_START_ADDR   0x4400  /* SRP microcode start address */
+
+#define COSA_LOAD_ADDR    0x400  /* SRP microcode load address */
+#define COSA_MAX_FIRMWARE_SIZE	0x10000
+
+/* ioctls */
+struct cosa_download {
+	int addr, len;
+	char __user *code;
+};
+
+/* Reset the device */
+#define COSAIORSET	_IO('C',0xf0)
+
+/* Start microcode at given address */
+#define COSAIOSTRT	_IOW('C',0xf1, int)
+
+/* Read the block from the device memory */
+#define COSAIORMEM	_IOWR('C',0xf2, struct cosa_download *)
+	/* actually the struct cosa_download itself; this is to keep
+	 * the ioctl number same as in 2.4 in order to keep the user-space
+	 * utils compatible. */
+
+/* Write the block to the device memory (i.e. download the microcode) */
+#define COSAIODOWNLD	_IOW('C',0xf2, struct cosa_download *)
+	/* actually the struct cosa_download itself; this is to keep
+	 * the ioctl number same as in 2.4 in order to keep the user-space
+	 * utils compatible. */
+
+/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
+#define COSAIORTYPE	_IOR('C',0xf3, char *)
+
+/* Read the device identification string */
+#define COSAIORIDSTR	_IOR('C',0xf4, char *)
+/* Maximum length of the identification string. */
+#define COSA_MAX_ID_STRING 128
+
+/* Increment/decrement the module usage count :-) */
+/* #define COSAIOMINC	_IO('C',0xf5) */
+/* #define COSAIOMDEC	_IO('C',0xf6) */
+
+/* Get the total number of cards installed */
+#define COSAIONRCARDS	_IO('C',0xf7)
+
+/* Get the number of channels on this card */
+#define COSAIONRCHANS	_IO('C',0xf8)
+
+/* Set the driver for the bus-master operations */
+#define COSAIOBMSET	_IOW('C', 0xf9, unsigned short)
+
+#define COSA_BM_OFF	0	/* Bus-mastering off - use ISA DMA (default) */
+#define COSA_BM_ON	1	/* Bus-mastering on - faster but untested */
+
+/* Gets the busmaster status */
+#define COSAIOBMGET	_IO('C', 0xfa)
+
+#endif /* !COSA_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_drv.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_drv.c
new file mode 100644
index 0000000..2a3ecae
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_drv.c
@@ -0,0 +1,569 @@
+/*
+* cycx_drv.c	Cyclom 2X Support Module.
+*
+*		This module is a library of common hardware specific
+*		functions used by the Cyclades Cyclom 2X sync card.
+*
+* Author:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright:	(c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
+*
+*		This program is free software; you can redistribute it and/or
+*		modify it under the terms of the GNU General Public License
+*		as published by the Free Software Foundation; either version
+*		2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/11/11	acme		set_current_state(TASK_INTERRUPTIBLE), code
+*				cleanup
+* 1999/11/08	acme		init_cyc2x deleted, doing nothing
+* 1999/11/06	acme		back to read[bw], write[bw] and memcpy_to and
+*				fromio to use dpmbase ioremaped
+* 1999/10/26	acme		use isa_read[bw], isa_write[bw] & isa_memcpy_to
+*				& fromio
+* 1999/10/23	acme		cleanup to only supports cyclom2x: all the other
+*				boards are no longer manufactured by cyclades,
+*				if someone wants to support them... be my guest!
+* 1999/05/28    acme		cycx_intack & cycx_intde gone for good
+* 1999/05/18	acme		lots of unlogged work, submitting to Linus...
+* 1999/01/03	acme		more judicious use of data types
+* 1999/01/03	acme		judicious use of data types :>
+*				cycx_inten trying to reset pending interrupts
+*				from cyclom 2x - I think this isn't the way to
+*				go, but for now...
+* 1999/01/02	acme		cycx_intack ok, I think there's nothing to do
+*				to ack an int in cycx_drv.c, only handle it in
+*				cyx_isr (or in the other protocols: cyp_isr,
+*				cyf_isr, when they get implemented.
+* Dec 31, 1998	acme		cycx_data_boot & cycx_code_boot fixed, crossing
+*				fingers to see x25_configure in cycx_x25.c
+*				work... :)
+* Dec 26, 1998	acme		load implementation fixed, seems to work! :)
+*				cycx_2x_dpmbase_options with all the possible
+*				DPM addresses (20).
+*				cycx_intr implemented (test this!)
+*				general code cleanup
+* Dec  8, 1998	Ivan Passos	Cyclom-2X firmware load implementation.
+* Aug  8, 1998	acme		Initial version.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>		/* __init */
+#include <linux/module.h>
+#include <linux/kernel.h>	/* printk(), and other useful stuff */
+#include <linux/stddef.h>	/* offsetof(), etc. */
+#include <linux/errno.h>	/* return codes */
+#include <linux/cycx_drv.h>	/* API definitions */
+#include <linux/cycx_cfm.h>	/* CYCX firmware module definitions */
+#include <linux/delay.h>	/* udelay, msleep_interruptible */
+#include <asm/io.h>		/* read[wl], write[wl], ioremap, iounmap */
+
+#define	MOD_VERSION	0
+#define	MOD_RELEASE	6
+
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
+MODULE_LICENSE("GPL");
+
+/* Hardware-specific functions */
+static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len);
+static void cycx_bootcfg(struct cycx_hw *hw);
+
+static int reset_cyc2x(void __iomem *addr);
+static int detect_cyc2x(void __iomem *addr);
+
+/* Miscellaneous functions */
+static int get_option_index(const long *optlist, long optval);
+static u16 checksum(u8 *buf, u32 len);
+
+#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
+
+/* Global Data */
+
+/* private data */
+static const char fullname[] = "Cyclom 2X Support Module";
+static const char copyright[] =
+	"(c) 1998-2003 Arnaldo Carvalho de Melo <acme@conectiva.com.br>";
+
+/* Hardware configuration options.
+ * These are arrays of configuration options used by verification routines.
+ * The first element of each array is its size (i.e. number of options).
+ */
+static const long cyc2x_dpmbase_options[] = {
+	20,
+	0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
+	0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
+	0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
+};
+
+static const long cycx_2x_irq_options[]  = { 7, 3, 5, 9, 10, 11, 12, 15 };
+
+/* Kernel Loadable Module Entry Points */
+/* Module 'insert' entry point.
+ * o print announcement
+ * o initialize static data
+ *
+ * Return:	0	Ok
+ *		< 0	error.
+ * Context:	process */
+
+static int __init cycx_drv_init(void)
+{
+	pr_info("%s v%u.%u %s\n",
+		fullname, MOD_VERSION, MOD_RELEASE, copyright);
+
+	return 0;
+}
+
+/* Module 'remove' entry point.
+ * o release all remaining system resources */
+static void cycx_drv_cleanup(void)
+{
+}
+
+/* Kernel APIs */
+/* Set up adapter.
+ * o detect adapter type
+ * o verify hardware configuration options
+ * o check for hardware conflicts
+ * o set up adapter shared memory
+ * o test adapter memory
+ * o load firmware
+ * Return:	0	ok.
+ *		< 0	error */
+EXPORT_SYMBOL(cycx_setup);
+int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
+{
+	int err;
+
+	/* Verify IRQ configuration options */
+	if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
+		pr_err("IRQ %d is invalid!\n", hw->irq);
+		return -EINVAL;
+	}
+
+	/* Setup adapter dual-port memory window and test memory */
+	if (!dpmbase) {
+		pr_err("you must specify the dpm address!\n");
+ 		return -EINVAL;
+	} else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
+		pr_err("memory address 0x%lX is invalid!\n", dpmbase);
+		return -EINVAL;
+	}
+
+	hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE);
+	hw->dpmsize = CYCX_WINDOWSIZE;
+
+	if (!detect_cyc2x(hw->dpmbase)) {
+		pr_err("adapter Cyclom 2X not found at address 0x%lX!\n",
+		       dpmbase);
+		return -EINVAL;
+	}
+
+	pr_info("found Cyclom 2X card at address 0x%lX\n", dpmbase);
+
+	/* Load firmware. If loader fails then shut down adapter */
+	err = load_cyc2x(hw, cfm, len);
+
+	if (err)
+		cycx_down(hw);         /* shutdown adapter */
+
+	return err;
+}
+
+EXPORT_SYMBOL(cycx_down);
+int cycx_down(struct cycx_hw *hw)
+{
+	iounmap(hw->dpmbase);
+	return 0;
+}
+
+/* Enable interrupt generation.  */
+static void cycx_inten(struct cycx_hw *hw)
+{
+	writeb(0, hw->dpmbase);
+}
+
+/* Generate an interrupt to adapter's CPU. */
+EXPORT_SYMBOL(cycx_intr);
+void cycx_intr(struct cycx_hw *hw)
+{
+	writew(0, hw->dpmbase + GEN_CYCX_INTR);
+}
+
+/* Execute Adapter Command.
+ * o Set exec flag.
+ * o Busy-wait until flag is reset. */
+EXPORT_SYMBOL(cycx_exec);
+int cycx_exec(void __iomem *addr)
+{
+	u16 i = 0;
+	/* wait till addr content is zeroed */
+
+	while (readw(addr)) {
+		udelay(1000);
+
+		if (++i > 50)
+			return -1;
+	}
+
+	return 0;
+}
+
+/* Read absolute adapter memory.
+ * Transfer data from adapter's memory to data buffer. */
+EXPORT_SYMBOL(cycx_peek);
+int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
+{
+	if (len == 1)
+		*(u8*)buf = readb(hw->dpmbase + addr);
+	else
+		memcpy_fromio(buf, hw->dpmbase + addr, len);
+
+	return 0;
+}
+
+/* Write Absolute Adapter Memory.
+ * Transfer data from data buffer to adapter's memory. */
+EXPORT_SYMBOL(cycx_poke);
+int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
+{
+	if (len == 1)
+		writeb(*(u8*)buf, hw->dpmbase + addr);
+	else
+		memcpy_toio(hw->dpmbase + addr, buf, len);
+
+	return 0;
+}
+
+/* Hardware-Specific Functions */
+
+/* Load Aux Routines */
+/* Reset board hardware.
+   return 1 if memory exists at addr and 0 if not. */
+static int memory_exists(void __iomem *addr)
+{
+	int tries = 0;
+
+	for (; tries < 3 ; tries++) {
+		writew(TEST_PATTERN, addr + 0x10);
+
+		if (readw(addr + 0x10) == TEST_PATTERN)
+			if (readw(addr + 0x10) == TEST_PATTERN)
+				return 1;
+
+		msleep_interruptible(1 * 1000);
+	}
+
+	return 0;
+}
+
+/* Load reset code. */
+static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt)
+{
+	void __iomem *pt_code = addr + RESET_OFFSET;
+	u16 i; /*, j; */
+
+	for (i = 0 ; i < cnt ; i++) {
+/*		for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */
+		writeb(*buffer++, pt_code++);
+	}
+}
+
+/* Load buffer using boot interface.
+ * o copy data from buffer to Cyclom-X memory
+ * o wait for reset code to copy it to right portion of memory */
+static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt)
+{
+	memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
+	writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
+
+	return wait_cyc(addr);
+}
+
+/* Set up entry point and kick start Cyclom-X CPU. */
+static void cycx_start(void __iomem *addr)
+{
+	/* put in 0x30 offset the jump instruction to the code entry point */
+	writeb(0xea, addr + 0x30);
+	writeb(0x00, addr + 0x31);
+	writeb(0xc4, addr + 0x32);
+	writeb(0x00, addr + 0x33);
+	writeb(0x00, addr + 0x34);
+
+	/* cmd to start executing code */
+	writew(GEN_START, addr + CMD_OFFSET);
+}
+
+/* Load and boot reset code. */
+static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
+{
+	void __iomem *pt_start = addr + START_OFFSET;
+
+	writeb(0xea, pt_start++); /* jmp to f000:3f00 */
+	writeb(0x00, pt_start++);
+	writeb(0xfc, pt_start++);
+	writeb(0x00, pt_start++);
+	writeb(0xf0, pt_start);
+	reset_load(addr, code, len);
+
+	/* 80186 was in hold, go */
+	writeb(0, addr + START_CPU);
+	msleep_interruptible(1 * 1000);
+}
+
+/* Load data.bin file through boot (reset) interface. */
+static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
+{
+	void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
+	u32 i;
+
+	/* boot buffer length */
+	writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+	writew(GEN_DEFPAR, pt_boot_cmd);
+
+	if (wait_cyc(addr) < 0)
+		return -1;
+
+	writew(0, pt_boot_cmd + sizeof(u16));
+	writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
+	writew(GEN_SET_SEG, pt_boot_cmd);
+
+	if (wait_cyc(addr) < 0)
+		return -1;
+
+	for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+		if (buffer_load(addr, code + i,
+				min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
+			pr_err("Error !!\n");
+			return -1;
+		}
+
+	return 0;
+}
+
+
+/* Load code.bin file through boot (reset) interface. */
+static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
+{
+	void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
+	u32 i;
+
+	/* boot buffer length */
+	writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+	writew(GEN_DEFPAR, pt_boot_cmd);
+
+	if (wait_cyc(addr) < 0)
+		return -1;
+
+	writew(0x0000, pt_boot_cmd + sizeof(u16));
+	writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
+	writew(GEN_SET_SEG, pt_boot_cmd);
+
+	if (wait_cyc(addr) < 0)
+		return -1;
+
+	for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+		if (buffer_load(addr, code + i,
+				min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
+			pr_err("Error !!\n");
+			return -1;
+		}
+
+	return 0;
+}
+
+/* Load adapter from the memory image of the CYCX firmware module.
+ * o verify firmware integrity and compatibility
+ * o start adapter up */
+static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
+{
+	int i, j;
+	struct cycx_fw_header *img_hdr;
+	u8 *reset_image,
+	   *data_image,
+	   *code_image;
+	void __iomem *pt_cycld = hw->dpmbase + 0x400;
+	u16 cksum;
+
+	/* Announce */
+	pr_info("firmware signature=\"%s\"\n", cfm->signature);
+
+	/* Verify firmware signature */
+	if (strcmp(cfm->signature, CFM_SIGNATURE)) {
+		pr_err("load_cyc2x: not Cyclom-2X firmware!\n");
+		return -EINVAL;
+	}
+
+	pr_info("firmware version=%u\n", cfm->version);
+
+	/* Verify firmware module format version */
+	if (cfm->version != CFM_VERSION) {
+		pr_err("%s: firmware format %u rejected! Expecting %u.\n",
+		       __func__, cfm->version, CFM_VERSION);
+		return -EINVAL;
+	}
+
+	/* Verify firmware module length and checksum */
+	cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) +
+					  cfm->info.codesize);
+/*
+	FIXME cfm->info.codesize is off by 2
+	if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
+*/
+	if (cksum != cfm->checksum) {
+		pr_err("%s: firmware corrupted!\n", __func__);
+		pr_err(" cdsize = 0x%x (expected 0x%lx)\n",
+		       len - (int)sizeof(struct cycx_firmware) - 1,
+		       cfm->info.codesize);
+		pr_err(" chksum = 0x%x (expected 0x%x)\n",
+		       cksum, cfm->checksum);
+		return -EINVAL;
+	}
+
+	/* If everything is ok, set reset, data and code pointers */
+	img_hdr = (struct cycx_fw_header *)&cfm->image;
+#ifdef FIRMWARE_DEBUG
+	pr_info("%s: image sizes\n", __func__);
+	pr_info(" reset=%lu\n", img_hdr->reset_size);
+	pr_info("  data=%lu\n", img_hdr->data_size);
+	pr_info("  code=%lu\n", img_hdr->code_size);
+#endif
+	reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
+	data_image = reset_image + img_hdr->reset_size;
+	code_image = data_image + img_hdr->data_size;
+
+	/*---- Start load ----*/
+	/* Announce */
+	pr_info("loading firmware %s (ID=%u)...\n",
+		cfm->descr[0] ? cfm->descr : "unknown firmware",
+		cfm->info.codeid);
+
+	for (i = 0 ; i < 5 ; i++) {
+		/* Reset Cyclom hardware */
+		if (!reset_cyc2x(hw->dpmbase)) {
+			pr_err("dpm problem or board not found\n");
+			return -EINVAL;
+		}
+
+		/* Load reset.bin */
+		cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
+		/* reset is waiting for boot */
+		writew(GEN_POWER_ON, pt_cycld);
+		msleep_interruptible(1 * 1000);
+
+		for (j = 0 ; j < 3 ; j++)
+			if (!readw(pt_cycld))
+				goto reset_loaded;
+			else
+				msleep_interruptible(1 * 1000);
+	}
+
+	pr_err("reset not started\n");
+	return -EINVAL;
+
+reset_loaded:
+	/* Load data.bin */
+	if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
+		pr_err("cannot load data file\n");
+		return -EINVAL;
+	}
+
+	/* Load code.bin */
+	if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
+		pr_err("cannot load code file\n");
+		return -EINVAL;
+	}
+
+	/* Prepare boot-time configuration data */
+	cycx_bootcfg(hw);
+
+	/* kick-off CPU */
+	cycx_start(hw->dpmbase);
+
+	/* Arthur Ganzert's tip: wait a while after the firmware loading...
+	   seg abr 26 17:17:12 EST 1999 - acme */
+	msleep_interruptible(7 * 1000);
+	pr_info("firmware loaded!\n");
+
+	/* enable interrupts */
+	cycx_inten(hw);
+
+	return 0;
+}
+
+/* Prepare boot-time firmware configuration data.
+ * o initialize configuration data area
+   From async.doc - V_3.4.0 - 07/18/1994
+   - As of now, only static buffers are available to the user.
+     So, the bit VD_RXDIRC must be set in 'valid'. That means that user
+     wants to use the static transmission and reception buffers. */
+static void cycx_bootcfg(struct cycx_hw *hw)
+{
+	/* use fixed buffers */
+	writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
+}
+
+/* Detect Cyclom 2x adapter.
+ *	Following tests are used to detect Cyclom 2x adapter:
+ *       to be completed based on the tests done below
+ *	Return 1 if detected o.k. or 0 if failed.
+ *	Note:	This test is destructive! Adapter will be left in shutdown
+ *		state after the test. */
+static int detect_cyc2x(void __iomem *addr)
+{
+	reset_cyc2x(addr);
+
+	return memory_exists(addr);
+}
+
+/* Miscellaneous */
+/* Get option's index into the options list.
+ *	Return option's index (1 .. N) or zero if option is invalid. */
+static int get_option_index(const long *optlist, long optval)
+{
+	int i = 1;
+
+	for (; i <= optlist[0]; ++i)
+		if (optlist[i] == optval)
+			return i;
+
+	return 0;
+}
+
+/* Reset adapter's CPU. */
+static int reset_cyc2x(void __iomem *addr)
+{
+	writeb(0, addr + RST_ENABLE);
+	msleep_interruptible(2 * 1000);
+	writeb(0, addr + RST_DISABLE);
+	msleep_interruptible(2 * 1000);
+
+	return memory_exists(addr);
+}
+
+/* Calculate 16-bit CRC using CCITT polynomial. */
+static u16 checksum(u8 *buf, u32 len)
+{
+	u16 crc = 0;
+	u16 mask, flag;
+
+	for (; len; --len, ++buf)
+		for (mask = 0x80; mask; mask >>= 1) {
+			flag = (crc & 0x8000);
+			crc <<= 1;
+			crc |= ((*buf & mask) ? 1 : 0);
+
+			if (flag)
+				crc ^= 0x1021;
+		}
+
+	return crc;
+}
+
+module_init(cycx_drv_init);
+module_exit(cycx_drv_cleanup);
+
+/* End */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_main.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_main.c
new file mode 100644
index 0000000..81fbbad
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_main.c
@@ -0,0 +1,346 @@
+/*
+* cycx_main.c	Cyclades Cyclom 2X WAN Link Driver. Main module.
+*
+* Author:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright:	(c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
+*			 Jaspreet Singh	<jaspreet@sangoma.com>
+*
+*		This program is free software; you can redistribute it and/or
+*		modify it under the terms of the GNU General Public License
+*		as published by the Free Software Foundation; either version
+*		2 of the License, or (at your option) any later version.
+* ============================================================================
+* Please look at the bitkeeper changelog (or any other scm tool that ends up
+* importing bitkeeper changelog or that replaces bitkeeper in the future as
+* main tool for linux development).
+* 
+* 2001/05/09	acme		Fix MODULE_DESC for debug, .bss nitpicks,
+* 				some cleanups
+* 2000/07/13	acme		remove useless #ifdef MODULE and crap
+*				#if KERNEL_VERSION > blah
+* 2000/07/06	acme		__exit at cyclomx_cleanup
+* 2000/04/02	acme		dprintk and cycx_debug
+* 				module_init/module_exit
+* 2000/01/21	acme		rename cyclomx_open to cyclomx_mod_inc_use_count
+*				and cyclomx_close to cyclomx_mod_dec_use_count
+* 2000/01/08	acme		cleanup
+* 1999/11/06	acme		cycx_down back to life (it needs to be
+*				called to iounmap the dpmbase)
+* 1999/08/09	acme		removed references to enable_tx_int
+*				use spinlocks instead of cli/sti in
+*				cyclomx_set_state
+* 1999/05/19	acme		works directly linked into the kernel
+*				init_waitqueue_head for 2.3.* kernel
+* 1999/05/18	acme		major cleanup (polling not needed), etc
+* 1998/08/28	acme		minor cleanup (ioctls for firmware deleted)
+*				queue_task activated
+* 1998/08/08	acme		Initial version.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/stddef.h>	/* offsetof(), etc. */
+#include <linux/errno.h>	/* return codes */
+#include <linux/string.h>	/* inline memset(), etc. */
+#include <linux/slab.h>		/* kmalloc(), kfree() */
+#include <linux/kernel.h>	/* printk(), and other useful stuff */
+#include <linux/module.h>	/* support for loadable modules */
+#include <linux/ioport.h>	/* request_region(), release_region() */
+#include <linux/wanrouter.h>	/* WAN router definitions */
+#include <linux/cyclomx.h>	/* cyclomx common user API definitions */
+#include <linux/init.h>         /* __init (when not using as a module) */
+#include <linux/interrupt.h>
+
+unsigned int cycx_debug;
+
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
+MODULE_LICENSE("GPL");
+module_param(cycx_debug, int, 0);
+MODULE_PARM_DESC(cycx_debug, "cyclomx debug level");
+
+/* Defines & Macros */
+
+#define	CYCX_DRV_VERSION	0	/* version number */
+#define	CYCX_DRV_RELEASE	11	/* release (minor version) number */
+#define	CYCX_MAX_CARDS		1	/* max number of adapters */
+
+#define	CONFIG_CYCX_CARDS 1
+
+/* Function Prototypes */
+
+/* WAN link driver entry points */
+static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf);
+static int cycx_wan_shutdown(struct wan_device *wandev);
+
+/* Miscellaneous functions */
+static irqreturn_t cycx_isr(int irq, void *dev_id);
+
+/* Global Data
+ * Note: All data must be explicitly initialized!!!
+ */
+
+/* private data */
+static const char cycx_drvname[] = "cyclomx";
+static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
+static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+			  "<acme@conectiva.com.br>";
+static int cycx_ncards = CONFIG_CYCX_CARDS;
+static struct cycx_device *cycx_card_array;	/* adapter data space */
+
+/* Kernel Loadable Module Entry Points */
+
+/*
+ * Module 'insert' entry point.
+ * o print announcement
+ * o allocate adapter data space
+ * o initialize static data
+ * o register all cards with WAN router
+ * o calibrate Cyclom 2X shared memory access delay.
+ *
+ * Return:	0	Ok
+ *		< 0	error.
+ * Context:	process
+ */
+static int __init cycx_init(void)
+{
+	int cnt, err = -ENOMEM;
+
+	pr_info("%s v%u.%u %s\n",
+		cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
+		cycx_copyright);
+
+	/* Verify number of cards and allocate adapter data space */
+	cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
+	cycx_ncards = max_t(int, cycx_ncards, 1);
+	cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
+	if (!cycx_card_array)
+		goto out;
+
+
+	/* Register adapters with WAN router */
+	for (cnt = 0; cnt < cycx_ncards; ++cnt) {
+		struct cycx_device *card = &cycx_card_array[cnt];
+		struct wan_device *wandev = &card->wandev;
+
+		sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1);
+		wandev->magic    = ROUTER_MAGIC;
+		wandev->name     = card->devname;
+		wandev->private  = card;
+		wandev->setup    = cycx_wan_setup;
+		wandev->shutdown = cycx_wan_shutdown;
+		err = register_wan_device(wandev);
+
+		if (err) {
+			pr_err("%s registration failed with error %d!\n",
+			       card->devname, err);
+			break;
+		}
+	}
+
+	err = -ENODEV;
+	if (!cnt) {
+		kfree(cycx_card_array);
+		goto out;
+	}
+	err = 0;
+	cycx_ncards = cnt;	/* adjust actual number of cards */
+out:	return err;
+}
+
+/*
+ * Module 'remove' entry point.
+ * o unregister all adapters from the WAN router
+ * o release all remaining system resources
+ */
+static void __exit cycx_exit(void)
+{
+	int i = 0;
+
+	for (; i < cycx_ncards; ++i) {
+		struct cycx_device *card = &cycx_card_array[i];
+		unregister_wan_device(card->devname);
+	}
+
+	kfree(cycx_card_array);
+}
+
+/* WAN Device Driver Entry Points */
+/*
+ * Setup/configure WAN link driver.
+ * o check adapter state
+ * o make sure firmware is present in configuration
+ * o allocate interrupt vector
+ * o setup Cyclom 2X hardware
+ * o call appropriate routine to perform protocol-specific initialization
+ *
+ * This function is called when router handles ROUTER_SETUP IOCTL. The
+ * configuration structure is in kernel memory (including extended data, if
+ * any).
+ */
+static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
+{
+	int rc = -EFAULT;
+	struct cycx_device *card;
+	int irq;
+
+	/* Sanity checks */
+
+	if (!wandev || !wandev->private || !conf)
+		goto out;
+
+	card = wandev->private;
+	rc = -EBUSY;
+	if (wandev->state != WAN_UNCONFIGURED)
+		goto out;
+
+	rc = -EINVAL;
+	if (!conf->data_size || !conf->data) {
+		pr_err("%s: firmware not found in configuration data!\n",
+		       wandev->name);
+		goto out;
+	}
+
+	if (conf->irq <= 0) {
+		pr_err("%s: can't configure without IRQ!\n", wandev->name);
+		goto out;
+	}
+
+	/* Allocate IRQ */
+	irq = conf->irq == 2 ? 9 : conf->irq;	/* IRQ2 -> IRQ9 */
+
+	if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
+		pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq);
+		goto out;
+	}
+
+	/* Configure hardware, load firmware, etc. */
+	memset(&card->hw, 0, sizeof(card->hw));
+	card->hw.irq	 = irq;
+	card->hw.dpmsize = CYCX_WINDOWSIZE;
+	card->hw.fwid	 = CFID_X25_2X;
+	spin_lock_init(&card->lock);
+	init_waitqueue_head(&card->wait_stats);
+
+	rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr);
+	if (rc)
+		goto out_irq;
+
+	/* Initialize WAN device data space */
+	wandev->irq       = irq;
+	wandev->dma       = wandev->ioport = 0;
+	wandev->maddr     = (unsigned long)card->hw.dpmbase;
+	wandev->msize     = card->hw.dpmsize;
+	wandev->hw_opt[2] = 0;
+	wandev->hw_opt[3] = card->hw.fwid;
+
+	/* Protocol-specific initialization */
+	switch (card->hw.fwid) {
+#ifdef CONFIG_CYCLOMX_X25
+	case CFID_X25_2X:
+		rc = cycx_x25_wan_init(card, conf);
+		break;
+#endif
+	default:
+		pr_err("%s: this firmware is not supported!\n", wandev->name);
+		rc = -EINVAL;
+	}
+
+	if (rc) {
+		cycx_down(&card->hw);
+		goto out_irq;
+	}
+
+	rc = 0;
+out:
+	return rc;
+out_irq:
+	free_irq(irq, card);
+	goto out;
+}
+
+/*
+ * Shut down WAN link driver.
+ * o shut down adapter hardware
+ * o release system resources.
+ *
+ * This function is called by the router when device is being unregistered or
+ * when it handles ROUTER_DOWN IOCTL.
+ */
+static int cycx_wan_shutdown(struct wan_device *wandev)
+{
+	int ret = -EFAULT;
+	struct cycx_device *card;
+
+	/* sanity checks */
+	if (!wandev || !wandev->private)
+		goto out;
+
+	ret = 0;
+	if (wandev->state == WAN_UNCONFIGURED)
+		goto out;
+
+	card = wandev->private;
+	wandev->state = WAN_UNCONFIGURED;
+	cycx_down(&card->hw);
+	pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq);
+	free_irq(wandev->irq, card);
+out:	return ret;
+}
+
+/* Miscellaneous */
+/*
+ * Cyclom 2X Interrupt Service Routine.
+ * o acknowledge Cyclom 2X hardware interrupt.
+ * o call protocol-specific interrupt service routine, if any.
+ */
+static irqreturn_t cycx_isr(int irq, void *dev_id)
+{
+	struct cycx_device *card = dev_id;
+
+	if (card->wandev.state == WAN_UNCONFIGURED)
+		goto out;
+
+	if (card->in_isr) {
+		pr_warn("%s: interrupt re-entrancy on IRQ %d!\n",
+			card->devname, card->wandev.irq);
+		goto out;
+	}
+
+	if (card->isr)
+		card->isr(card);
+	return IRQ_HANDLED;
+out:
+	return IRQ_NONE;
+}
+
+/* Set WAN device state.  */
+void cycx_set_state(struct cycx_device *card, int state)
+{
+	unsigned long flags;
+	char *string_state = NULL;
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	if (card->wandev.state != state) {
+		switch (state) {
+		case WAN_CONNECTED:
+			string_state = "connected!";
+			break;
+		case WAN_DISCONNECTED:
+			string_state = "disconnected!";
+			break;
+		}
+		pr_info("%s: link %s\n", card->devname, string_state);
+		card->wandev.state = state;
+	}
+
+	card->state_tick = jiffies;
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
+module_init(cycx_init);
+module_exit(cycx_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_x25.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_x25.c
new file mode 100644
index 0000000..06f3f63
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/cycx_x25.c
@@ -0,0 +1,1602 @@
+/*
+* cycx_x25.c	Cyclom 2X WAN Link Driver.  X.25 module.
+*
+* Author:	Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright:	(c) 1998-2003 Arnaldo Carvalho de Melo
+*
+* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
+*
+*		This program is free software; you can redistribute it and/or
+*		modify it under the terms of the GNU General Public License
+*		as published by the Free Software Foundation; either version
+*		2 of the License, or (at your option) any later version.
+* ============================================================================
+* 2001/01/12	acme		use dev_kfree_skb_irq on interrupt context
+* 2000/04/02	acme		dprintk, cycx_debug
+* 				fixed the bug introduced in get_dev_by_lcn and
+* 				get_dev_by_dte_addr by the anonymous hacker
+* 				that converted this driver to softnet
+* 2000/01/08	acme		cleanup
+* 1999/10/27	acme		use ARPHRD_HWX25 so that the X.25 stack know
+*				that we have a X.25 stack implemented in
+*				firmware onboard
+* 1999/10/18	acme		support for X.25 sockets in if_send,
+*				beware: socket(AF_X25...) IS WORK IN PROGRESS,
+*				TCP/IP over X.25 via wanrouter not affected,
+*				working.
+* 1999/10/09	acme		chan_disc renamed to chan_disconnect,
+* 				began adding support for X.25 sockets:
+* 				conf->protocol in new_if
+* 1999/10/05	acme		fixed return E... to return -E...
+* 1999/08/10	acme		serialized access to the card thru a spinlock
+*				in x25_exec
+* 1999/08/09	acme		removed per channel spinlocks
+*				removed references to enable_tx_int
+* 1999/05/28	acme		fixed nibble_to_byte, ackvc now properly treated
+*				if_send simplified
+* 1999/05/25	acme		fixed t1, t2, t21 & t23 configuration
+*				use spinlocks instead of cli/sti in some points
+* 1999/05/24	acme		finished the x25_get_stat function
+* 1999/05/23	acme		dev->type = ARPHRD_X25 (tcpdump only works,
+*				AFAIT, with ARPHRD_ETHER). This seems to be
+*				needed to use socket(AF_X25)...
+*				Now the config file must specify a peer media
+*				address for svc channels over a crossover cable.
+*				Removed hold_timeout from x25_channel_t,
+*				not used.
+*				A little enhancement in the DEBUG processing
+* 1999/05/22	acme		go to DISCONNECTED in disconnect_confirm_intr,
+*				instead of chan_disc.
+* 1999/05/16	marcelo		fixed timer initialization in SVCs
+* 1999/01/05	acme		x25_configure now get (most of) all
+*				parameters...
+* 1999/01/05	acme		pktlen now (correctly) uses log2 (value
+*				configured)
+* 1999/01/03	acme		judicious use of data types (u8, u16, u32, etc)
+* 1999/01/03	acme		cyx_isr: reset dpmbase to acknowledge
+*				indication (interrupt from cyclom 2x)
+* 1999/01/02	acme		cyx_isr: first hackings...
+* 1999/01/0203  acme 		when initializing an array don't give less
+*				elements than declared...
+* 				example: char send_cmd[6] = "?\xFF\x10";
+*          			you'll gonna lose a couple hours, 'cause your
+*				brain won't admit that there's an error in the
+*				above declaration...  the side effect is that
+*				memset is put into the unresolved symbols
+*				instead of using the inline memset functions...
+* 1999/01/02    acme 		began chan_connect, chan_send, x25_send
+* 1998/12/31	acme		x25_configure
+*				this code can be compiled as non module
+* 1998/12/27	acme		code cleanup
+*				IPX code wiped out! let's decrease code
+*				complexity for now, remember: I'm learning! :)
+*                               bps_to_speed_code OK
+* 1998/12/26	acme		Minimal debug code cleanup
+* 1998/08/08	acme		Initial version.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define CYCLOMX_X25_DEBUG 1
+
+#include <linux/ctype.h>	/* isdigit() */
+#include <linux/errno.h>	/* return codes */
+#include <linux/if_arp.h>       /* ARPHRD_HWX25 */
+#include <linux/kernel.h>	/* printk(), and other useful stuff */
+#include <linux/module.h>
+#include <linux/string.h>	/* inline memset(), etc. */
+#include <linux/sched.h>
+#include <linux/slab.h>		/* kmalloc(), kfree() */
+#include <linux/stddef.h>	/* offsetof(), etc. */
+#include <linux/wanrouter.h>	/* WAN router definitions */
+
+#include <asm/byteorder.h>	/* htons(), etc. */
+
+#include <linux/cyclomx.h>	/* Cyclom 2X common user API definitions */
+#include <linux/cycx_x25.h>	/* X.25 firmware API definitions */
+
+#include <net/x25device.h>
+
+/* Defines & Macros */
+#define CYCX_X25_MAX_CMD_RETRY 5
+#define CYCX_X25_CHAN_MTU 2048	/* unfragmented logical channel MTU */
+
+/* Data Structures */
+/* This is an extension of the 'struct net_device' we create for each network
+   interface to keep the rest of X.25 channel-specific data. */
+struct cycx_x25_channel {
+	/* This member must be first. */
+	struct net_device *slave;	/* WAN slave */
+
+	char name[WAN_IFNAME_SZ+1];	/* interface name, ASCIIZ */
+	char addr[WAN_ADDRESS_SZ+1];	/* media address, ASCIIZ */
+	char *local_addr;		/* local media address, ASCIIZ -
+					   svc thru crossover cable */
+	s16 lcn;			/* logical channel number/conn.req.key*/
+	u8 link;
+	struct timer_list timer;	/* timer used for svc channel disc. */
+	u16 protocol;			/* ethertype, 0 - multiplexed */
+	u8 svc;				/* 0 - permanent, 1 - switched */
+	u8 state;			/* channel state */
+	u8 drop_sequence;		/* mark sequence for dropping */
+	u32 idle_tmout;			/* sec, before disconnecting */
+	struct sk_buff *rx_skb;		/* receive socket buffer */
+	struct cycx_device *card;	/* -> owner */
+	struct net_device_stats ifstats;/* interface statistics */
+};
+
+/* Function Prototypes */
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int cycx_wan_update(struct wan_device *wandev),
+	   cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
+			   wanif_conf_t *conf),
+	   cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev);
+
+/* Network device interface */
+static int cycx_netdevice_init(struct net_device *dev);
+static int cycx_netdevice_open(struct net_device *dev);
+static int cycx_netdevice_stop(struct net_device *dev);
+static int cycx_netdevice_hard_header(struct sk_buff *skb,
+				      struct net_device *dev, u16 type,
+				      const void *daddr, const void *saddr,
+				      unsigned len);
+static int cycx_netdevice_rebuild_header(struct sk_buff *skb);
+static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+							struct net_device *dev);
+
+static struct net_device_stats *
+			cycx_netdevice_get_stats(struct net_device *dev);
+
+/* Interrupt handlers */
+static void cycx_x25_irq_handler(struct cycx_device *card),
+	    cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_log(struct cycx_device *card,
+			     struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_stat(struct cycx_device *card,
+			      struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_connect_confirm(struct cycx_device *card,
+					 struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
+					    struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_connect(struct cycx_device *card,
+				 struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_disconnect(struct cycx_device *card,
+				    struct cycx_x25_cmd *cmd),
+	    cycx_x25_irq_spurious(struct cycx_device *card,
+				  struct cycx_x25_cmd *cmd);
+
+/* X.25 firmware interface functions */
+static int cycx_x25_configure(struct cycx_device *card,
+			      struct cycx_x25_config *conf),
+	   cycx_x25_get_stats(struct cycx_device *card),
+	   cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
+			 int len, void *buf),
+	   cycx_x25_connect_response(struct cycx_device *card,
+				struct cycx_x25_channel *chan),
+	   cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
+			   		u8 lcn);
+
+/* channel functions */
+static int cycx_x25_chan_connect(struct net_device *dev),
+	   cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb);
+
+static void cycx_x25_chan_disconnect(struct net_device *dev),
+	    cycx_x25_chan_send_event(struct net_device *dev, u8 event);
+
+/* Miscellaneous functions */
+static void cycx_x25_set_chan_state(struct net_device *dev, u8 state),
+	    cycx_x25_chan_timer(unsigned long d);
+
+static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
+	    reset_timer(struct net_device *dev);
+
+static u8 bps_to_speed_code(u32 bps);
+static u8 cycx_log2(u32 n);
+
+static unsigned dec_to_uint(u8 *str, int len);
+
+static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
+						  s16 lcn);
+static struct net_device *
+	cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
+
+static void cycx_x25_chan_setup(struct net_device *dev);
+
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len);
+static void cycx_x25_dump_config(struct cycx_x25_config *conf);
+static void cycx_x25_dump_stats(struct cycx_x25_stats *stats);
+static void cycx_x25_dump_devs(struct wan_device *wandev);
+#else
+#define hex_dump(msg, p, len)
+#define cycx_x25_dump_config(conf)
+#define cycx_x25_dump_stats(stats)
+#define cycx_x25_dump_devs(wandev)
+#endif
+/* Public Functions */
+
+/* X.25 Protocol Initialization routine.
+ *
+ * This routine is called by the main Cyclom 2X module during setup.  At this
+ * point adapter is completely initialized and X.25 firmware is running.
+ *  o configure adapter
+ *  o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return:	0	o.k.
+ *		< 0	failure.  */
+int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
+{
+	struct cycx_x25_config cfg;
+
+	/* Verify configuration ID */
+	if (conf->config_id != WANCONFIG_X25) {
+		pr_info("%s: invalid configuration ID %u!\n",
+			card->devname, conf->config_id);
+		return -EINVAL;
+	}
+
+	/* Initialize protocol-specific fields */
+	card->mbox  = card->hw.dpmbase + X25_MBOX_OFFS;
+	card->u.x.connection_keys = 0;
+	spin_lock_init(&card->u.x.lock);
+
+	/* Configure adapter. Here we set reasonable defaults, then parse
+	 * device configuration structure and set configuration options.
+	 * Most configuration options are verified and corrected (if
+	 * necessary) since we can't rely on the adapter to do so and don't
+	 * want it to fail either. */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.link = 0;
+	cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
+	cfg.speed = bps_to_speed_code(conf->bps);
+	cfg.n3win = 7;
+	cfg.n2win = 2;
+	cfg.n2 = 5;
+	cfg.nvc = 1;
+	cfg.npvc = 1;
+	cfg.flags = 0x02; /* default = V35 */
+	cfg.t1 = 10;   /* line carrier timeout */
+	cfg.t2 = 29;   /* tx timeout */
+	cfg.t21 = 180; /* CALL timeout */
+	cfg.t23 = 180; /* CLEAR timeout */
+
+	/* adjust MTU */
+	if (!conf->mtu || conf->mtu >= 512)
+		card->wandev.mtu = 512;
+	else if (conf->mtu >= 256)
+		card->wandev.mtu = 256;
+	else if (conf->mtu >= 128)
+		card->wandev.mtu = 128;
+	else
+		card->wandev.mtu = 64;
+
+	cfg.pktlen = cycx_log2(card->wandev.mtu);
+
+	if (conf->station == WANOPT_DTE) {
+		cfg.locaddr = 3; /* DTE */
+		cfg.remaddr = 1; /* DCE */
+	} else {
+		cfg.locaddr = 1; /* DCE */
+		cfg.remaddr = 3; /* DTE */
+	}
+
+	if (conf->interface == WANOPT_RS232)
+	        cfg.flags = 0;      /* FIXME just reset the 2nd bit */
+
+	if (conf->u.x25.hi_pvc) {
+		card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
+		card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+	}
+
+	if (conf->u.x25.hi_svc) {
+		card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
+		card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
+	}
+
+	if (card->u.x.lo_pvc == 255)
+		cfg.npvc = 0;
+	else
+		cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
+
+	cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
+
+	if (conf->u.x25.hdlc_window)
+		cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
+
+	if (conf->u.x25.pkt_window)
+		cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
+
+	if (conf->u.x25.t1)
+		cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
+
+	if (conf->u.x25.t2)
+		cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
+
+	if (conf->u.x25.t11_t21)
+		cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
+
+	if (conf->u.x25.t13_t23)
+		cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
+
+	if (conf->u.x25.n2)
+		cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
+
+	/* initialize adapter */
+	if (cycx_x25_configure(card, &cfg))
+		return -EIO;
+
+	/* Initialize protocol-specific fields of adapter data space */
+	card->wandev.bps	= conf->bps;
+	card->wandev.interface	= conf->interface;
+	card->wandev.clocking	= conf->clocking;
+	card->wandev.station	= conf->station;
+	card->isr		= cycx_x25_irq_handler;
+	card->exec		= NULL;
+	card->wandev.update	= cycx_wan_update;
+	card->wandev.new_if	= cycx_wan_new_if;
+	card->wandev.del_if	= cycx_wan_del_if;
+	card->wandev.state	= WAN_DISCONNECTED;
+
+	return 0;
+}
+
+/* WAN Device Driver Entry Points */
+/* Update device status & statistics. */
+static int cycx_wan_update(struct wan_device *wandev)
+{
+	/* sanity checks */
+	if (!wandev || !wandev->private)
+		return -EFAULT;
+
+	if (wandev->state == WAN_UNCONFIGURED)
+		return -ENODEV;
+
+	cycx_x25_get_stats(wandev->private);
+
+	return 0;
+}
+
+/* Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registration.
+ *
+ * Return:	0	o.k.
+ *		< 0	failure (channel will not be created) */
+static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
+			   wanif_conf_t *conf)
+{
+	struct cycx_device *card = wandev->private;
+	struct cycx_x25_channel *chan;
+	int err = 0;
+
+	if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
+		pr_info("%s: invalid interface name!\n", card->devname);
+		return -EINVAL;
+	}
+
+	dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name,
+			     cycx_x25_chan_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	chan = netdev_priv(dev);
+	strcpy(chan->name, conf->name);
+	chan->card = card;
+	chan->link = conf->port;
+	chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP;
+	chan->rx_skb = NULL;
+	/* only used in svc connected thru crossover cable */
+	chan->local_addr = NULL;
+
+	if (conf->addr[0] == '@') {	/* SVC */
+		int len = strlen(conf->local_addr);
+
+		if (len) {
+			if (len > WAN_ADDRESS_SZ) {
+				pr_err("%s: %s local addr too long!\n",
+				       wandev->name, chan->name);
+				err = -EINVAL;
+				goto error;
+			} else {
+				chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
+
+				if (!chan->local_addr) {
+					err = -ENOMEM;
+					goto error;
+				}
+			}
+
+			strncpy(chan->local_addr, conf->local_addr,
+				WAN_ADDRESS_SZ);
+		}
+
+		chan->svc = 1;
+		strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
+		init_timer(&chan->timer);
+		chan->timer.function	= cycx_x25_chan_timer;
+		chan->timer.data	= (unsigned long)dev;
+
+		/* Set channel timeouts (default if not specified) */
+		chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
+	} else if (isdigit(conf->addr[0])) {	/* PVC */
+		s16 lcn = dec_to_uint(conf->addr, 0);
+
+		if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
+			chan->lcn = lcn;
+		else {
+			pr_err("%s: PVC %u is out of range on interface %s!\n",
+			       wandev->name, lcn, chan->name);
+			err = -EINVAL;
+			goto error;
+		}
+	} else {
+		pr_err("%s: invalid media address on interface %s!\n",
+		       wandev->name, chan->name);
+		err = -EINVAL;
+		goto error;
+	}
+
+	return 0;
+
+error:
+	free_netdev(dev);
+	return err;
+}
+
+/* Delete logical channel. */
+static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	if (chan->svc) {
+		kfree(chan->local_addr);
+		if (chan->state == WAN_CONNECTED)
+			del_timer(&chan->timer);
+	}
+
+	return 0;
+}
+
+
+/* Network Device Interface */
+
+static const struct header_ops cycx_header_ops = {
+	.create = cycx_netdevice_hard_header,
+	.rebuild = cycx_netdevice_rebuild_header,
+};
+
+static const struct net_device_ops cycx_netdev_ops = {
+	.ndo_init	= cycx_netdevice_init,
+	.ndo_open	= cycx_netdevice_open,
+	.ndo_stop	= cycx_netdevice_stop,
+	.ndo_start_xmit	= cycx_netdevice_hard_start_xmit,
+	.ndo_get_stats	= cycx_netdevice_get_stats,
+};
+
+static void cycx_x25_chan_setup(struct net_device *dev)
+{
+	/* Initialize device driver entry points */
+	dev->netdev_ops		= &cycx_netdev_ops;
+	dev->header_ops		= &cycx_header_ops;
+
+	/* Initialize media-specific parameters */
+	dev->mtu		= CYCX_X25_CHAN_MTU;
+	dev->type		= ARPHRD_HWX25;	/* ARP h/w type */
+	dev->hard_header_len	= 0;		/* media header length */
+	dev->addr_len		= 0;		/* hardware address length */
+}
+
+/* Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration.  Returning anything but zero will fail interface
+ * registration. */
+static int cycx_netdevice_init(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+	struct cycx_device *card = chan->card;
+	struct wan_device *wandev = &card->wandev;
+
+	if (!chan->svc)
+		*(__be16*)dev->dev_addr = htons(chan->lcn);
+
+	/* Initialize hardware parameters (just for reference) */
+	dev->irq		= wandev->irq;
+	dev->dma		= wandev->dma;
+	dev->base_addr		= wandev->ioport;
+	dev->mem_start		= (unsigned long)wandev->maddr;
+	dev->mem_end		= (unsigned long)(wandev->maddr +
+						  wandev->msize - 1);
+	dev->flags		|= IFF_NOARP;
+
+	/* Set transmit buffer queue length */
+	dev->tx_queue_len	= 10;
+
+	/* Initialize socket buffers */
+	cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+
+	return 0;
+}
+
+/* Open network interface.
+ * o prevent module from unloading by incrementing use count
+ * o if link is disconnected then initiate connection
+ *
+ * Return 0 if O.k. or errno.  */
+static int cycx_netdevice_open(struct net_device *dev)
+{
+	if (netif_running(dev))
+		return -EBUSY; /* only one open is allowed */
+
+	netif_start_queue(dev);
+	return 0;
+}
+
+/* Close network interface.
+ * o reset flags.
+ * o if there's no more open channels then disconnect physical link. */
+static int cycx_netdevice_stop(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
+		cycx_x25_chan_disconnect(dev);
+
+	return 0;
+}
+
+/* Build media header.
+ * o encapsulate packet according to encapsulation type.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol' field of
+ * the socket buffer, so that we don't forget it.  If encapsulation fails,
+ * set skb->protocol to 0 and discard packet later.
+ *
+ * Return:	media header length. */
+static int cycx_netdevice_hard_header(struct sk_buff *skb,
+				      struct net_device *dev, u16 type,
+				      const void *daddr, const void *saddr,
+				      unsigned len)
+{
+	skb->protocol = htons(type);
+
+	return dev->hard_header_len;
+}
+
+/* * Re-build media header.
+ * Return:	1	physical address resolved.
+ *		0	physical address not resolved */
+static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
+{
+	return 1;
+}
+
+/* Send a packet on a network interface.
+ * o set busy flag (marks start of the transmission).
+ * o check link state. If link is not up, then drop the packet.
+ * o check channel status. If it's down then initiate a call.
+ * o pass a packet to corresponding WAN device.
+ * o free socket buffer
+ *
+ * Return:	0	complete (socket buffer must be freed)
+ *		non-0	packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ *    bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ *    protocol stack and can be used for flow control with protocol layer. */
+static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
+							struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+	struct cycx_device *card = chan->card;
+
+	if (!chan->svc)
+		chan->protocol = ntohs(skb->protocol);
+
+	if (card->wandev.state != WAN_CONNECTED)
+		++chan->ifstats.tx_dropped;
+	else if (chan->svc && chan->protocol &&
+		 chan->protocol != ntohs(skb->protocol)) {
+		pr_info("%s: unsupported Ethertype 0x%04X on interface %s!\n",
+			card->devname, ntohs(skb->protocol), dev->name);
+		++chan->ifstats.tx_errors;
+	} else if (chan->protocol == ETH_P_IP) {
+		switch (chan->state) {
+		case WAN_DISCONNECTED:
+			if (cycx_x25_chan_connect(dev)) {
+				netif_stop_queue(dev);
+				return NETDEV_TX_BUSY;
+			}
+			/* fall thru */
+		case WAN_CONNECTED:
+			reset_timer(dev);
+			dev->trans_start = jiffies;
+			netif_stop_queue(dev);
+
+			if (cycx_x25_chan_send(dev, skb))
+				return NETDEV_TX_BUSY;
+
+			break;
+		default:
+			++chan->ifstats.tx_dropped;
+			++card->wandev.stats.tx_dropped;
+	}
+	} else { /* chan->protocol == ETH_P_X25 */
+		switch (skb->data[0]) {
+		case X25_IFACE_DATA:
+			break;
+		case X25_IFACE_CONNECT:
+			cycx_x25_chan_connect(dev);
+			goto free_packet;
+		case X25_IFACE_DISCONNECT:
+			cycx_x25_chan_disconnect(dev);
+			goto free_packet;
+	        default:
+			pr_info("%s: unknown %d x25-iface request on %s!\n",
+				card->devname, skb->data[0], dev->name);
+			++chan->ifstats.tx_errors;
+			goto free_packet;
+		}
+
+		skb_pull(skb, 1); /* Remove control byte */
+		reset_timer(dev);
+		dev->trans_start = jiffies;
+		netif_stop_queue(dev);
+
+		if (cycx_x25_chan_send(dev, skb)) {
+			/* prepare for future retransmissions */
+			skb_push(skb, 1);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+free_packet:
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+/* Get Ethernet-style interface statistics.
+ * Return a pointer to struct net_device_stats */
+static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	return chan ? &chan->ifstats : NULL;
+}
+
+/* Interrupt Handlers */
+/* X.25 Interrupt Service Routine. */
+static void cycx_x25_irq_handler(struct cycx_device *card)
+{
+	struct cycx_x25_cmd cmd;
+	u16 z = 0;
+
+	card->in_isr = 1;
+	card->buff_int_mode_unbusy = 0;
+	cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
+
+	switch (cmd.command) {
+	case X25_DATA_INDICATION:
+		cycx_x25_irq_rx(card, &cmd);
+		break;
+	case X25_ACK_FROM_VC:
+		cycx_x25_irq_tx(card, &cmd);
+		break;
+	case X25_LOG:
+		cycx_x25_irq_log(card, &cmd);
+		break;
+	case X25_STATISTIC:
+		cycx_x25_irq_stat(card, &cmd);
+		break;
+	case X25_CONNECT_CONFIRM:
+		cycx_x25_irq_connect_confirm(card, &cmd);
+		break;
+	case X25_CONNECT_INDICATION:
+		cycx_x25_irq_connect(card, &cmd);
+		break;
+	case X25_DISCONNECT_INDICATION:
+		cycx_x25_irq_disconnect(card, &cmd);
+		break;
+	case X25_DISCONNECT_CONFIRM:
+		cycx_x25_irq_disconnect_confirm(card, &cmd);
+		break;
+	case X25_LINE_ON:
+		cycx_set_state(card, WAN_CONNECTED);
+		break;
+	case X25_LINE_OFF:
+		cycx_set_state(card, WAN_DISCONNECTED);
+		break;
+	default:
+		cycx_x25_irq_spurious(card, &cmd);
+		break;
+	}
+
+	cycx_poke(&card->hw, 0, &z, sizeof(z));
+	cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
+	card->in_isr = 0;
+}
+
+/* Transmit interrupt handler.
+ *	o Release socket buffer
+ *	o Clear 'tbusy' flag */
+static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+	struct net_device *dev;
+	struct wan_device *wandev = &card->wandev;
+	u8 lcn;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+
+	/* unbusy device and then dev_tint(); */
+	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+	if (dev) {
+		card->buff_int_mode_unbusy = 1;
+		netif_wake_queue(dev);
+	} else
+		pr_err("%s:ackvc for inexistent lcn %d\n", card->devname, lcn);
+}
+
+/* Receive interrupt handler.
+ * This routine handles fragmented IP packets using M-bit according to the
+ * RFC1356.
+ * o map logical channel number to network interface.
+ * o allocate socket buffer or append received packet to the existing one.
+ * o if M-bit is reset (i.e. it's the last packet in a sequence) then
+ *   decapsulate packet and pass socket buffer to the protocol stack.
+ *
+ * Notes:
+ * 1. When allocating a socket buffer, if M-bit is set then more data is
+ *    coming and we have to allocate buffer for the maximum IP packet size
+ *    expected on this channel.
+ * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
+ *    socket buffers available) the whole packet sequence must be discarded. */
+static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+	struct wan_device *wandev = &card->wandev;
+	struct net_device *dev;
+	struct cycx_x25_channel *chan;
+	struct sk_buff *skb;
+	u8 bitm, lcn;
+	int pktlen = cmd->len - 5;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+	cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
+	bitm &= 0x10;
+
+	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+	if (!dev) {
+		/* Invalid channel, discard packet */
+		pr_info("%s: receiving on orphaned LCN %d!\n",
+			card->devname, lcn);
+		return;
+	}
+
+	chan = netdev_priv(dev);
+	reset_timer(dev);
+
+	if (chan->drop_sequence) {
+		if (!bitm)
+			chan->drop_sequence = 0;
+		else
+			return;
+	}
+
+	if ((skb = chan->rx_skb) == NULL) {
+		/* Allocate new socket buffer */
+		int bufsize = bitm ? dev->mtu : pktlen;
+
+		if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
+					 bufsize +
+					 dev->hard_header_len)) == NULL) {
+			pr_info("%s: no socket buffers available!\n",
+				card->devname);
+			chan->drop_sequence = 1;
+			++chan->ifstats.rx_dropped;
+			return;
+		}
+
+		if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */
+			/* 0 = data packet (dev_alloc_skb zeroed skb->data) */
+			skb_put(skb, 1);
+
+		skb->dev = dev;
+		skb->protocol = htons(chan->protocol);
+		chan->rx_skb = skb;
+	}
+
+	if (skb_tailroom(skb) < pktlen) {
+		/* No room for the packet. Call off the whole thing! */
+		dev_kfree_skb_irq(skb);
+		chan->rx_skb = NULL;
+
+		if (bitm)
+			chan->drop_sequence = 1;
+
+		pr_info("%s: unexpectedly long packet sequence on interface %s!\n",
+			card->devname, dev->name);
+		++chan->ifstats.rx_length_errors;
+		return;
+	}
+
+	/* Append packet to the socket buffer  */
+	cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
+
+	if (bitm)
+		return; /* more data is coming */
+
+	chan->rx_skb = NULL;		/* dequeue packet */
+
+	++chan->ifstats.rx_packets;
+	chan->ifstats.rx_bytes += pktlen;
+
+	skb_reset_mac_header(skb);
+	netif_rx(skb);
+}
+
+/* Connect interrupt handler. */
+static void cycx_x25_irq_connect(struct cycx_device *card,
+				 struct cycx_x25_cmd *cmd)
+{
+	struct wan_device *wandev = &card->wandev;
+	struct net_device *dev = NULL;
+	struct cycx_x25_channel *chan;
+	u8 d[32],
+	   loc[24],
+	   rem[24];
+	u8 lcn, sizeloc, sizerem;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+	cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc));
+	cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6);
+
+	sizerem = sizeloc >> 4;
+	sizeloc &= 0x0F;
+
+	loc[0] = rem[0] = '\0';
+
+	if (sizeloc)
+		nibble_to_byte(d, loc, sizeloc, 0);
+
+	if (sizerem)
+		nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
+
+	dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
+			  __func__, lcn, loc, rem);
+
+	dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
+	if (!dev) {
+		/* Invalid channel, discard packet */
+		pr_info("%s: connect not expected: remote %s!\n",
+			card->devname, rem);
+		return;
+	}
+
+	chan = netdev_priv(dev);
+	chan->lcn = lcn;
+	cycx_x25_connect_response(card, chan);
+	cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+}
+
+/* Connect confirm interrupt handler. */
+static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
+					 struct cycx_x25_cmd *cmd)
+{
+	struct wan_device *wandev = &card->wandev;
+	struct net_device *dev;
+	struct cycx_x25_channel *chan;
+	u8 lcn, key;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+	cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
+	dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
+			  card->devname, __func__, lcn, key);
+
+	dev = cycx_x25_get_dev_by_lcn(wandev, -key);
+	if (!dev) {
+		/* Invalid channel, discard packet */
+		clear_bit(--key, (void*)&card->u.x.connection_keys);
+		pr_info("%s: connect confirm not expected: lcn %d, key=%d!\n",
+			card->devname, lcn, key);
+		return;
+	}
+
+	clear_bit(--key, (void*)&card->u.x.connection_keys);
+	chan = netdev_priv(dev);
+	chan->lcn = lcn;
+	cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+}
+
+/* Disconnect confirm interrupt handler. */
+static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
+					    struct cycx_x25_cmd *cmd)
+{
+	struct wan_device *wandev = &card->wandev;
+	struct net_device *dev;
+	u8 lcn;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+	dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
+			  card->devname, __func__, lcn);
+	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+	if (!dev) {
+		/* Invalid channel, discard packet */
+		pr_info("%s:disconnect confirm not expected!:lcn %d\n",
+			card->devname, lcn);
+		return;
+	}
+
+	cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+}
+
+/* disconnect interrupt handler. */
+static void cycx_x25_irq_disconnect(struct cycx_device *card,
+				    struct cycx_x25_cmd *cmd)
+{
+	struct wan_device *wandev = &card->wandev;
+	struct net_device *dev;
+	u8 lcn;
+
+	cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+	dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
+
+	dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
+	if (dev) {
+		struct cycx_x25_channel *chan = netdev_priv(dev);
+
+		cycx_x25_disconnect_response(card, chan->link, lcn);
+		cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+	} else
+		cycx_x25_disconnect_response(card, 0, lcn);
+}
+
+/* LOG interrupt handler. */
+static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
+{
+#if CYCLOMX_X25_DEBUG
+	char bf[20];
+	u16 size, toread, link, msg_code;
+	u8 code, routine;
+
+	cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
+	cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
+	cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
+	/* at most 20 bytes are available... thanks to Daniela :) */
+	toread = size < 20 ? size : 20;
+	cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
+	cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
+	cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
+
+	pr_info("cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
+	pr_info("cmd->buf=0x%X\n", cmd->buf);
+	pr_info("Log message code=0x%X\n", msg_code);
+	pr_info("Link=%d\n", link);
+	pr_info("log code=0x%X\n", code);
+	pr_info("log routine=0x%X\n", routine);
+	pr_info("Message size=%d\n", size);
+	hex_dump("Message", bf, toread);
+#endif
+}
+
+/* STATISTIC interrupt handler. */
+static void cycx_x25_irq_stat(struct cycx_device *card,
+			      struct cycx_x25_cmd *cmd)
+{
+	cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
+		  sizeof(card->u.x.stats));
+	hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats,
+		 sizeof(card->u.x.stats));
+	cycx_x25_dump_stats(&card->u.x.stats);
+	wake_up_interruptible(&card->wait_stats);
+}
+
+/* Spurious interrupt handler.
+ * o print a warning
+ * If number of spurious interrupts exceeded some limit, then ??? */
+static void cycx_x25_irq_spurious(struct cycx_device *card,
+				  struct cycx_x25_cmd *cmd)
+{
+	pr_info("%s: spurious interrupt (0x%X)!\n",
+		card->devname, cmd->command);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len)
+{
+	print_hex_dump(KERN_INFO, msg, DUMP_PREFIX_OFFSET, 16, 1,
+		       p, len, true);
+}
+#endif
+
+/* Cyclom 2X Firmware-Specific Functions */
+/* Exec X.25 command. */
+static int x25_exec(struct cycx_device *card, int command, int link,
+		    void *d1, int len1, void *d2, int len2)
+{
+	struct cycx_x25_cmd c;
+	unsigned long flags;
+	u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
+	u8 retry = CYCX_X25_MAX_CMD_RETRY;
+	int err = 0;
+
+	c.command = command;
+	c.link = link;
+	c.len = len1 + len2;
+
+	spin_lock_irqsave(&card->u.x.lock, flags);
+
+	/* write command */
+	cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
+
+	/* write X.25 data */
+	if (d1) {
+		cycx_poke(&card->hw, addr, d1, len1);
+
+		if (d2) {
+			if (len2 > 254) {
+				u32 addr1 = 0xA00 + 0x400 * link;
+
+				cycx_poke(&card->hw, addr + len1, d2, 249);
+				cycx_poke(&card->hw, addr1, ((u8*)d2) + 249,
+					  len2 - 249);
+			} else
+				cycx_poke(&card->hw, addr + len1, d2, len2);
+		}
+	}
+
+	/* generate interruption, executing command */
+	cycx_intr(&card->hw);
+
+	/* wait till card->mbox == 0 */
+	do {
+		err = cycx_exec(card->mbox);
+	} while (retry-- && err);
+
+	spin_unlock_irqrestore(&card->u.x.lock, flags);
+
+	return err;
+}
+
+/* Configure adapter. */
+static int cycx_x25_configure(struct cycx_device *card,
+			      struct cycx_x25_config *conf)
+{
+	struct {
+		u16 nlinks;
+		struct cycx_x25_config conf[2];
+	} x25_cmd_conf;
+
+	memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
+	x25_cmd_conf.nlinks = 2;
+	x25_cmd_conf.conf[0] = *conf;
+	/* FIXME: we need to find a way in the wanrouter framework
+		  to configure the second link, for now lets use it
+		  with the same config from the first link, fixing
+		  the interface type to RS232, the speed in 38400 and
+		  the clock to external */
+	x25_cmd_conf.conf[1] = *conf;
+	x25_cmd_conf.conf[1].link = 1;
+	x25_cmd_conf.conf[1].speed = 5; /* 38400 */
+	x25_cmd_conf.conf[1].clock = 8;
+	x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
+
+	cycx_x25_dump_config(&x25_cmd_conf.conf[0]);
+	cycx_x25_dump_config(&x25_cmd_conf.conf[1]);
+
+	return x25_exec(card, X25_CONFIG, 0,
+			&x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
+}
+
+/* Get protocol statistics. */
+static int cycx_x25_get_stats(struct cycx_device *card)
+{
+	/* the firmware expects 20 in the size field!!!
+	   thanks to Daniela */
+	int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
+
+	if (err)
+		return err;
+
+	interruptible_sleep_on(&card->wait_stats);
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
+	card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
+	card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
+	card->wandev.stats.rx_length_errors = 0; /* not available from fw */
+	card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
+	card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
+	card->wandev.stats.rx_dropped = 0; /* not available from fw */
+	card->wandev.stats.rx_errors = 0; /* not available from fw */
+	card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
+	card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
+	card->wandev.stats.tx_dropped = 0; /* not available from fw */
+	card->wandev.stats.collisions = 0; /* not available from fw */
+	card->wandev.stats.tx_errors = 0; /* not available from fw */
+
+	cycx_x25_dump_devs(&card->wandev);
+
+	return 0;
+}
+
+/* return the number of nibbles */
+static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
+{
+	int i = 0;
+
+	if (*nibble && *s) {
+		d[i] |= *s++ - '0';
+		*nibble = 0;
+		++i;
+	}
+
+	while (*s) {
+		d[i] = (*s - '0') << 4;
+		if (*(s + 1))
+			d[i] |= *(s + 1) - '0';
+		else {
+			*nibble = 1;
+			break;
+		}
+		++i;
+		s += 2;
+	}
+
+	return i;
+}
+
+static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
+{
+	if (nibble) {
+		*d++ = '0' + (*s++ & 0x0F);
+		--len;
+	}
+
+	while (len) {
+		*d++ = '0' + (*s >> 4);
+
+		if (--len) {
+			*d++ = '0' + (*s & 0x0F);
+			--len;
+		} else break;
+
+		++s;
+	}
+
+	*d = '\0';
+}
+
+/* Place X.25 call. */
+static int x25_place_call(struct cycx_device *card,
+			  struct cycx_x25_channel *chan)
+{
+	int err = 0,
+	    len;
+	char d[64],
+	     nibble = 0,
+	     mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
+	     remotelen = strlen(chan->addr);
+	u8 key;
+
+	if (card->u.x.connection_keys == ~0U) {
+		pr_info("%s: too many simultaneous connection requests!\n",
+			card->devname);
+		return -EAGAIN;
+	}
+
+	key = ffz(card->u.x.connection_keys);
+	set_bit(key, (void*)&card->u.x.connection_keys);
+	++key;
+	dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
+	memset(d, 0, sizeof(d));
+	d[1] = key; /* user key */
+	d[2] = 0x10;
+	d[4] = 0x0B;
+
+	len = byte_to_nibble(chan->addr, d + 6, &nibble);
+
+	if (chan->local_addr)
+		len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble);
+
+	if (nibble)
+		++len;
+
+	d[5] = mylen << 4 | remotelen;
+	d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */
+
+	if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
+			    &d, 7 + len + 1, NULL, 0)) != 0)
+		clear_bit(--key, (void*)&card->u.x.connection_keys);
+	else
+		chan->lcn = -key;
+
+	return err;
+}
+
+/* Place X.25 CONNECT RESPONSE. */
+static int cycx_x25_connect_response(struct cycx_device *card,
+				     struct cycx_x25_channel *chan)
+{
+	u8 d[8];
+
+	memset(d, 0, sizeof(d));
+	d[0] = d[3] = chan->lcn;
+	d[2] = 0x10;
+	d[4] = 0x0F;
+	d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */
+
+	return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0);
+}
+
+/* Place X.25 DISCONNECT RESPONSE.  */
+static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
+					u8 lcn)
+{
+	char d[5];
+
+	memset(d, 0, sizeof(d));
+	d[0] = d[3] = lcn;
+	d[2] = 0x10;
+	d[4] = 0x17;
+
+	return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0);
+}
+
+/* Clear X.25 call.  */
+static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause,
+			  u8 diagn)
+{
+	u8 d[7];
+
+	memset(d, 0, sizeof(d));
+	d[0] = d[3] = lcn;
+	d[2] = 0x10;
+	d[4] = 0x13;
+	d[5] = cause;
+	d[6] = diagn;
+
+	return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0);
+}
+
+/* Send X.25 data packet. */
+static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
+			 int len, void *buf)
+{
+	u8 d[] = "?\xFF\x10??";
+
+	d[0] = d[3] = lcn;
+	d[4] = bitm;
+
+	return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len);
+}
+
+/* Miscellaneous */
+/* Find network device by its channel number.  */
+static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
+						  s16 lcn)
+{
+	struct net_device *dev = wandev->dev;
+	struct cycx_x25_channel *chan;
+
+	while (dev) {
+		chan = netdev_priv(dev);
+
+		if (chan->lcn == lcn)
+			break;
+		dev = chan->slave;
+	}
+	return dev;
+}
+
+/* Find network device by its remote dte address. */
+static struct net_device *
+	cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte)
+{
+	struct net_device *dev = wandev->dev;
+	struct cycx_x25_channel *chan;
+
+	while (dev) {
+		chan = netdev_priv(dev);
+
+		if (!strcmp(chan->addr, dte))
+			break;
+		dev = chan->slave;
+	}
+	return dev;
+}
+
+/* Initiate connection on the logical channel.
+ * o for PVC we just get channel configuration
+ * o for SVCs place an X.25 call
+ *
+ * Return:	0	connected
+ *		>0	connection in progress
+ *		<0	failure */
+static int cycx_x25_chan_connect(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+	struct cycx_device *card = chan->card;
+
+	if (chan->svc) {
+		if (!chan->addr[0])
+			return -EINVAL; /* no destination address */
+
+		dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
+				  card->devname, chan->addr);
+
+		if (x25_place_call(card, chan))
+			return -EIO;
+
+		cycx_x25_set_chan_state(dev, WAN_CONNECTING);
+		return 1;
+	} else
+		cycx_x25_set_chan_state(dev, WAN_CONNECTED);
+
+	return 0;
+}
+
+/* Disconnect logical channel.
+ * o if SVC then clear X.25 call */
+static void cycx_x25_chan_disconnect(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	if (chan->svc) {
+		x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
+		cycx_x25_set_chan_state(dev, WAN_DISCONNECTING);
+	} else
+		cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
+}
+
+/* Called by kernel timer */
+static void cycx_x25_chan_timer(unsigned long d)
+{
+	struct net_device *dev = (struct net_device *)d;
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	if (chan->state == WAN_CONNECTED)
+		cycx_x25_chan_disconnect(dev);
+	else
+		pr_err("%s: %s for svc (%s) not connected!\n",
+		       chan->card->devname, __func__, dev->name);
+}
+
+/* Set logical channel state. */
+static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+	struct cycx_device *card = chan->card;
+	unsigned long flags;
+	char *string_state = NULL;
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	if (chan->state != state) {
+		if (chan->svc && chan->state == WAN_CONNECTED)
+			del_timer(&chan->timer);
+
+		switch (state) {
+		case WAN_CONNECTED:
+			string_state = "connected!";
+			*(__be16*)dev->dev_addr = htons(chan->lcn);
+			netif_wake_queue(dev);
+			reset_timer(dev);
+
+			if (chan->protocol == ETH_P_X25)
+				cycx_x25_chan_send_event(dev,
+					X25_IFACE_CONNECT);
+
+			break;
+		case WAN_CONNECTING:
+			string_state = "connecting...";
+			break;
+		case WAN_DISCONNECTING:
+			string_state = "disconnecting...";
+			break;
+		case WAN_DISCONNECTED:
+			string_state = "disconnected!";
+
+			if (chan->svc) {
+				*(unsigned short*)dev->dev_addr = 0;
+				chan->lcn = 0;
+			}
+
+			if (chan->protocol == ETH_P_X25)
+				cycx_x25_chan_send_event(dev,
+					X25_IFACE_DISCONNECT);
+
+			netif_wake_queue(dev);
+			break;
+		}
+
+		pr_info("%s: interface %s %s\n",
+			card->devname, dev->name, string_state);
+		chan->state = state;
+	}
+
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/* Send packet on a logical channel.
+ *	When this function is called, tx_skb field of the channel data space
+ *	points to the transmit socket buffer.  When transmission is complete,
+ *	release socket buffer and reset 'tbusy' flag.
+ *
+ * Return:	0	- transmission complete
+ *		1	- busy
+ *
+ * Notes:
+ * 1. If packet length is greater than MTU for this channel, we'll fragment
+ *    the packet into 'complete sequence' using M-bit.
+ * 2. When transmission is complete, an event notification should be issued
+ *    to the router.  */
+static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+	struct cycx_device *card = chan->card;
+	int bitm = 0;		/* final packet */
+	unsigned len = skb->len;
+
+	if (skb->len > card->wandev.mtu) {
+		len = card->wandev.mtu;
+		bitm = 0x10;		/* set M-bit (more data) */
+	}
+
+	if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
+		return 1;
+
+	if (bitm) {
+		skb_pull(skb, len);
+		return 1;
+	}
+
+	++chan->ifstats.tx_packets;
+	chan->ifstats.tx_bytes += len;
+
+	return 0;
+}
+
+/* Send event (connection, disconnection, etc) to X.25 socket layer */
+
+static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
+{
+	struct sk_buff *skb;
+	unsigned char *ptr;
+
+	if ((skb = dev_alloc_skb(1)) == NULL) {
+		pr_err("%s: out of memory\n", __func__);
+		return;
+	}
+
+	ptr  = skb_put(skb, 1);
+	*ptr = event;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
+/* Convert line speed in bps to a number used by cyclom 2x code. */
+static u8 bps_to_speed_code(u32 bps)
+{
+	u8 number = 0; /* defaults to the lowest (1200) speed ;> */
+
+	     if (bps >= 512000) number = 8;
+	else if (bps >= 256000) number = 7;
+	else if (bps >= 64000)  number = 6;
+	else if (bps >= 38400)  number = 5;
+	else if (bps >= 19200)  number = 4;
+	else if (bps >= 9600)   number = 3;
+	else if (bps >= 4800)   number = 2;
+	else if (bps >= 2400)   number = 1;
+
+	return number;
+}
+
+/* log base 2 */
+static u8 cycx_log2(u32 n)
+{
+	u8 log = 0;
+
+	if (!n)
+		return 0;
+
+	while (n > 1) {
+		n >>= 1;
+		++log;
+	}
+
+	return log;
+}
+
+/* Convert decimal string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are converted. */
+static unsigned dec_to_uint(u8 *str, int len)
+{
+	unsigned val = 0;
+
+	if (!len)
+		len = strlen(str);
+
+	for (; len && isdigit(*str); ++str, --len)
+		val = (val * 10) + (*str - (unsigned) '0');
+
+	return val;
+}
+
+static void reset_timer(struct net_device *dev)
+{
+	struct cycx_x25_channel *chan = netdev_priv(dev);
+
+	if (chan->svc)
+		mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void cycx_x25_dump_config(struct cycx_x25_config *conf)
+{
+	pr_info("X.25 configuration\n");
+	pr_info("-----------------\n");
+	pr_info("link number=%d\n", conf->link);
+	pr_info("line speed=%d\n", conf->speed);
+	pr_info("clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
+	pr_info("# level 2 retransm.=%d\n", conf->n2);
+	pr_info("level 2 window=%d\n", conf->n2win);
+	pr_info("level 3 window=%d\n", conf->n3win);
+	pr_info("# logical channels=%d\n", conf->nvc);
+	pr_info("level 3 pkt len=%d\n", conf->pktlen);
+	pr_info("my address=%d\n", conf->locaddr);
+	pr_info("remote address=%d\n", conf->remaddr);
+	pr_info("t1=%d seconds\n", conf->t1);
+	pr_info("t2=%d seconds\n", conf->t2);
+	pr_info("t21=%d seconds\n", conf->t21);
+	pr_info("# PVCs=%d\n", conf->npvc);
+	pr_info("t23=%d seconds\n", conf->t23);
+	pr_info("flags=0x%x\n", conf->flags);
+}
+
+static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
+{
+	pr_info("X.25 statistics\n");
+	pr_info("--------------\n");
+	pr_info("rx_crc_errors=%d\n", stats->rx_crc_errors);
+	pr_info("rx_over_errors=%d\n", stats->rx_over_errors);
+	pr_info("n2_tx_frames=%d\n", stats->n2_tx_frames);
+	pr_info("n2_rx_frames=%d\n", stats->n2_rx_frames);
+	pr_info("tx_timeouts=%d\n", stats->tx_timeouts);
+	pr_info("rx_timeouts=%d\n", stats->rx_timeouts);
+	pr_info("n3_tx_packets=%d\n", stats->n3_tx_packets);
+	pr_info("n3_rx_packets=%d\n", stats->n3_rx_packets);
+	pr_info("tx_aborts=%d\n", stats->tx_aborts);
+	pr_info("rx_aborts=%d\n", stats->rx_aborts);
+}
+
+static void cycx_x25_dump_devs(struct wan_device *wandev)
+{
+	struct net_device *dev = wandev->dev;
+
+	pr_info("X.25 dev states\n");
+	pr_info("name: addr:           txoff:  protocol:\n");
+	pr_info("---------------------------------------\n");
+
+	while(dev) {
+		struct cycx_x25_channel *chan = netdev_priv(dev);
+
+		pr_info("%-5.5s %-15.15s   %d     ETH_P_%s\n",
+			chan->name, chan->addr, netif_queue_stopped(dev),
+			chan->protocol == ETH_P_IP ? "IP" : "X25");
+		dev = chan->slave;
+	}
+}
+
+#endif /* CYCLOMX_X25_DEBUG */
+/* End */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/dlci.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/dlci.c
new file mode 100644
index 0000000..6a8a382
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/dlci.c
@@ -0,0 +1,550 @@
+/*
+ * DLCI		Implementation of Frame Relay protocol for Linux, according to
+ *		RFC 1490.  This generic device provides en/decapsulation for an
+ *		underlying hardware driver.  Routes & IPs are assigned to these
+ *		interfaces.  Requires 'dlcicfg' program to create usable 
+ *		interfaces, the initial one, 'dlci' is for IOCTL use only.
+ *
+ * Version:	@(#)dlci.c	0.35	4 Jan 1997
+ *
+ * Author:	Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ *
+ *		0.15	Mike Mclagan	Packet freeing, bug in kmalloc call
+ *					DLCI_RET handling
+ *		0.20	Mike McLagan	More conservative on which packets
+ *					are returned for retry and which are
+ *					are dropped.  If DLCI_RET_DROP is
+ *					returned from the FRAD, the packet is
+ *				 	sent back to Linux for re-transmission
+ *		0.25	Mike McLagan	Converted to use SIOC IOCTL calls
+ *		0.30	Jim Freeman	Fixed to allow IPX traffic
+ *		0.35	Michael Elizabeth	Fixed incorrect memcpy_fromfs
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_frad.h>
+#include <linux/bitops.h>
+
+#include <net/sock.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
+
+static LIST_HEAD(dlci_devs);
+
+static void dlci_setup(struct net_device *);
+
+/* 
+ * these encapsulate the RFC 1490 requirements as well as 
+ * deal with packet transmission and reception, working with
+ * the upper network layers 
+ */
+
+static int dlci_header(struct sk_buff *skb, struct net_device *dev, 
+		       unsigned short type, const void *daddr,
+		       const void *saddr, unsigned len)
+{
+	struct frhdr		hdr;
+	struct dlci_local	*dlp;
+	unsigned int		hlen;
+	char			*dest;
+
+	dlp = netdev_priv(dev);
+
+	hdr.control = FRAD_I_UI;
+	switch (type)
+	{
+		case ETH_P_IP:
+			hdr.IP_NLPID = FRAD_P_IP;
+			hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
+			break;
+
+		/* feel free to add other types, if necessary */
+
+		default:
+			hdr.pad = FRAD_P_PADDING;
+			hdr.NLPID = FRAD_P_SNAP;
+			memset(hdr.OUI, 0, sizeof(hdr.OUI));
+			hdr.PID = htons(type);
+			hlen = sizeof(hdr);
+			break;
+	}
+
+	dest = skb_push(skb, hlen);
+	if (!dest)
+		return 0;
+
+	memcpy(dest, &hdr, hlen);
+
+	return hlen;
+}
+
+static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
+{
+	struct dlci_local *dlp;
+	struct frhdr		*hdr;
+	int					process, header;
+
+	dlp = netdev_priv(dev);
+	if (!pskb_may_pull(skb, sizeof(*hdr))) {
+		netdev_notice(dev, "invalid data no header\n");
+		dev->stats.rx_errors++;
+		kfree_skb(skb);
+		return;
+	}
+
+	hdr = (struct frhdr *) skb->data;
+	process = 0;
+	header = 0;
+	skb->dev = dev;
+
+	if (hdr->control != FRAD_I_UI)
+	{
+		netdev_notice(dev, "Invalid header flag 0x%02X\n",
+			      hdr->control);
+		dev->stats.rx_errors++;
+	}
+	else
+		switch (hdr->IP_NLPID)
+		{
+			case FRAD_P_PADDING:
+				if (hdr->NLPID != FRAD_P_SNAP)
+				{
+					netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
+						      hdr->NLPID);
+					dev->stats.rx_errors++;
+					break;
+				}
+	 
+				if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
+				{
+					netdev_notice(dev, "Unsupported organizationally unique identifier 0x%02X-%02X-%02X\n",
+						      hdr->OUI[0],
+						      hdr->OUI[1],
+						      hdr->OUI[2]);
+					dev->stats.rx_errors++;
+					break;
+				}
+
+				/* at this point, it's an EtherType frame */
+				header = sizeof(struct frhdr);
+				/* Already in network order ! */
+				skb->protocol = hdr->PID;
+				process = 1;
+				break;
+
+			case FRAD_P_IP:
+				header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
+				skb->protocol = htons(ETH_P_IP);
+				process = 1;
+				break;
+
+			case FRAD_P_SNAP:
+			case FRAD_P_Q933:
+			case FRAD_P_CLNP:
+				netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
+					      hdr->pad);
+				dev->stats.rx_errors++;
+				break;
+
+			default:
+				netdev_notice(dev, "Invalid pad byte 0x%02X\n",
+					      hdr->pad);
+				dev->stats.rx_errors++;
+				break;				
+		}
+
+	if (process)
+	{
+		/* we've set up the protocol, so discard the header */
+		skb_reset_mac_header(skb);
+		skb_pull(skb, header);
+		dev->stats.rx_bytes += skb->len;
+		netif_rx(skb);
+		dev->stats.rx_packets++;
+	}
+	else
+		dev_kfree_skb(skb);
+}
+
+static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct dlci_local *dlp = netdev_priv(dev);
+
+	if (skb)
+		dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave);
+	return NETDEV_TX_OK;
+}
+
+static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get)
+{
+	struct dlci_conf	config;
+	struct dlci_local	*dlp;
+	struct frad_local	*flp;
+	int			err;
+
+	dlp = netdev_priv(dev);
+
+	flp = netdev_priv(dlp->slave);
+
+	if (!get)
+	{
+		if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
+			return -EFAULT;
+		if (config.flags & ~DLCI_VALID_FLAGS)
+			return -EINVAL;
+		memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
+		dlp->configured = 1;
+	}
+
+	err = (*flp->dlci_conf)(dlp->slave, dev, get);
+	if (err)
+		return err;
+
+	if (get)
+	{
+		if (copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct dlci_local *dlp;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	dlp = netdev_priv(dev);
+
+	switch (cmd)
+	{
+		case DLCI_GET_SLAVE:
+			if (!*(short *)(dev->dev_addr))
+				return -EINVAL;
+
+			strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
+			break;
+
+		case DLCI_GET_CONF:
+		case DLCI_SET_CONF:
+			if (!*(short *)(dev->dev_addr))
+				return -EINVAL;
+
+			return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
+			break;
+
+		default: 
+			return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int dlci_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct dlci_local *dlp = netdev_priv(dev);
+
+	return dev_set_mtu(dlp->slave, new_mtu);
+}
+
+static int dlci_open(struct net_device *dev)
+{
+	struct dlci_local	*dlp;
+	struct frad_local	*flp;
+	int			err;
+
+	dlp = netdev_priv(dev);
+
+	if (!*(short *)(dev->dev_addr))
+		return -EINVAL;
+
+	if (!netif_running(dlp->slave))
+		return -ENOTCONN;
+
+	flp = netdev_priv(dlp->slave);
+	err = (*flp->activate)(dlp->slave, dev);
+	if (err)
+		return err;
+
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+static int dlci_close(struct net_device *dev)
+{
+	struct dlci_local	*dlp;
+	struct frad_local	*flp;
+	int			err;
+
+	netif_stop_queue(dev);
+
+	dlp = netdev_priv(dev);
+
+	flp = netdev_priv(dlp->slave);
+	err = (*flp->deactivate)(dlp->slave, dev);
+
+	return 0;
+}
+
+static int dlci_add(struct dlci_add *dlci)
+{
+	struct net_device	*master, *slave;
+	struct dlci_local	*dlp;
+	struct frad_local	*flp;
+	int			err = -EINVAL;
+
+
+	/* validate slave device */
+	slave = dev_get_by_name(&init_net, dlci->devname);
+	if (!slave)
+		return -ENODEV;
+
+	if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL)
+		goto err1;
+
+	/* create device name */
+	master = alloc_netdev( sizeof(struct dlci_local), "dlci%d",
+			      dlci_setup);
+	if (!master) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	/* make sure same slave not already registered */
+	rtnl_lock();
+	list_for_each_entry(dlp, &dlci_devs, list) {
+		if (dlp->slave == slave) {
+			err = -EBUSY;
+			goto err2;
+		}
+	}
+
+	*(short *)(master->dev_addr) = dlci->dlci;
+
+	dlp = netdev_priv(master);
+	dlp->slave = slave;
+	dlp->master = master;
+
+	flp = netdev_priv(slave);
+	err = (*flp->assoc)(slave, master);
+	if (err < 0)
+		goto err2;
+
+	err = register_netdevice(master);
+	if (err < 0) 
+		goto err2;
+
+	strcpy(dlci->devname, master->name);
+
+	list_add(&dlp->list, &dlci_devs);
+	rtnl_unlock();
+
+	return 0;
+
+ err2:
+	rtnl_unlock();
+	free_netdev(master);
+ err1:
+	dev_put(slave);
+	return err;
+}
+
+static int dlci_del(struct dlci_add *dlci)
+{
+	struct dlci_local	*dlp;
+	struct frad_local	*flp;
+	struct net_device	*master, *slave;
+	int			err;
+	bool			found = false;
+
+	rtnl_lock();
+
+	/* validate slave device */
+	master = __dev_get_by_name(&init_net, dlci->devname);
+	if (!master) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	list_for_each_entry(dlp, &dlci_devs, list) {
+		if (dlp->master == master) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	if (netif_running(master)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	dlp = netdev_priv(master);
+	slave = dlp->slave;
+	flp = netdev_priv(slave);
+
+	err = (*flp->deassoc)(slave, master);
+	if (!err) {
+		list_del(&dlp->list);
+
+		unregister_netdevice(master);
+
+		dev_put(slave);
+	}
+out:
+	rtnl_unlock();
+	return err;
+}
+
+static int dlci_ioctl(unsigned int cmd, void __user *arg)
+{
+	struct dlci_add add;
+	int err;
+	
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
+		return -EFAULT;
+
+	switch (cmd)
+	{
+		case SIOCADDDLCI:
+			err = dlci_add(&add);
+
+			if (!err)
+				if (copy_to_user(arg, &add, sizeof(struct dlci_add)))
+					return -EFAULT;
+			break;
+
+		case SIOCDELDLCI:
+			err = dlci_del(&add);
+			break;
+
+		default:
+			err = -EINVAL;
+	}
+
+	return err;
+}
+
+static const struct header_ops dlci_header_ops = {
+	.create	= dlci_header,
+};
+
+static const struct net_device_ops dlci_netdev_ops = {
+	.ndo_open	= dlci_open,
+	.ndo_stop	= dlci_close,
+	.ndo_do_ioctl	= dlci_dev_ioctl,
+	.ndo_start_xmit	= dlci_transmit,
+	.ndo_change_mtu	= dlci_change_mtu,
+};
+
+static void dlci_setup(struct net_device *dev)
+{
+	struct dlci_local *dlp = netdev_priv(dev);
+
+	dev->flags		= 0;
+	dev->header_ops		= &dlci_header_ops;
+	dev->netdev_ops		= &dlci_netdev_ops;
+	dev->destructor		= free_netdev;
+
+	dlp->receive		= dlci_receive;
+
+	dev->type		= ARPHRD_DLCI;
+	dev->hard_header_len	= sizeof(struct frhdr);
+	dev->addr_len		= sizeof(short);
+
+}
+
+/* if slave is unregistering, then cleanup master */
+static int dlci_dev_event(struct notifier_block *unused,
+			  unsigned long event, void *ptr)
+{
+	struct net_device *dev = (struct net_device *) ptr;
+
+	if (dev_net(dev) != &init_net)
+		return NOTIFY_DONE;
+
+	if (event == NETDEV_UNREGISTER) {
+		struct dlci_local *dlp;
+
+		list_for_each_entry(dlp, &dlci_devs, list) {
+			if (dlp->slave == dev) {
+				list_del(&dlp->list);
+				unregister_netdevice(dlp->master);
+				dev_put(dlp->slave);
+				break;
+			}
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dlci_notifier = {
+	.notifier_call = dlci_dev_event,
+};
+
+static int __init init_dlci(void)
+{
+	dlci_ioctl_set(dlci_ioctl);
+	register_netdevice_notifier(&dlci_notifier);
+
+	printk("%s.\n", version);
+
+	return 0;
+}
+
+static void __exit dlci_exit(void)
+{
+	struct dlci_local	*dlp, *nxt;
+	
+	dlci_ioctl_set(NULL);
+	unregister_netdevice_notifier(&dlci_notifier);
+
+	rtnl_lock();
+	list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
+		unregister_netdevice(dlp->master);
+		dev_put(dlp->slave);
+	}
+	rtnl_unlock();
+}
+
+module_init(init_dlci);
+module_exit(dlci_exit);
+
+MODULE_AUTHOR("Mike McLagan");
+MODULE_DESCRIPTION("Frame Relay DLCI layer");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/dscc4.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/dscc4.c
new file mode 100644
index 0000000..c676de7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/dscc4.c
@@ -0,0 +1,2069 @@
+/*
+ * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License.
+ *
+ * The author may be reached as romieu@cogenit.fr.
+ * Specific bug reports/asian food will be welcome.
+ *
+ * Special thanks to the nice people at CS-Telecom for the hardware and the
+ * access to the test/measure tools.
+ *
+ *
+ *                             Theory of Operation
+ *
+ * I. Board Compatibility
+ *
+ * This device driver is designed for the Siemens PEB20534 4 ports serial
+ * controller as found on Etinc PCISYNC cards. The documentation for the
+ * chipset is available at http://www.infineon.com:
+ * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
+ * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
+ * - Application Hint "Management of DSCC4 on-chip FIFO resources".
+ * - Errata sheet DS5 (courtesy of Michael Skerritt).
+ * Jens David has built an adapter based on the same chipset. Take a look
+ * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
+ * driver.
+ * Sample code (2 revisions) is available at Infineon.
+ *
+ * II. Board-specific settings
+ *
+ * Pcisync can transmit some clock signal to the outside world on the
+ * *first two* ports provided you put a quartz and a line driver on it and
+ * remove the jumpers. The operation is described on Etinc web site. If you
+ * go DCE on these ports, don't forget to use an adequate cable.
+ *
+ * Sharing of the PCI interrupt line for this board is possible.
+ *
+ * III. Driver operation
+ *
+ * The rx/tx operations are based on a linked list of descriptors. The driver
+ * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
+ * I tried to fix it, the more it started to look like (convoluted) software
+ * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
+ * this a rfc2119 MUST.
+ *
+ * Tx direction
+ * When the tx ring is full, the xmit routine issues a call to netdev_stop.
+ * The device is supposed to be enabled again during an ALLS irq (we could
+ * use HI but as it's easy to lose events, it's fscked).
+ *
+ * Rx direction
+ * The received frames aren't supposed to span over multiple receiving areas.
+ * I may implement it some day but it isn't the highest ranked item.
+ *
+ * IV. Notes
+ * The current error (XDU, RFO) recovery code is untested.
+ * So far, RDO takes his RX channel down and the right sequence to enable it
+ * again is still a mystery. If RDO happens, plan a reboot. More details
+ * in the code (NB: as this happens, TX still works).
+ * Don't mess the cables during operation, especially on DTE ports. I don't
+ * suggest it for DCE either but at least one can get some messages instead
+ * of a complete instant freeze.
+ * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
+ * the documentation/chipset releases.
+ *
+ * TODO:
+ * - test X25.
+ * - use polling at high irq/s,
+ * - performance analysis,
+ * - endianness.
+ *
+ * 2001/12/10	Daniela Squassoni  <daniela@cyclades.com>
+ * - Contribution to support the new generic HDLC layer.
+ *
+ * 2002/01	Ueimor
+ * - old style interface removal
+ * - dscc4_release_ring fix (related to DMA mapping)
+ * - hard_start_xmit fix (hint: TxSizeMax)
+ * - misc crapectomy.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/cache.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/mutex.h>
+
+/* Version */
+static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
+static int debug;
+static int quartz;
+
+#ifdef CONFIG_DSCC4_PCI_RST
+static DEFINE_MUTEX(dscc4_mutex);
+static u32 dscc4_pci_config_store[16];
+#endif
+
+#define	DRV_NAME	"dscc4"
+
+#undef DSCC4_POLLING
+
+/* Module parameters */
+
+MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
+MODULE_LICENSE("GPL");
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug,"Enable/disable extra messages");
+module_param(quartz, int, 0);
+MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
+
+/* Structures */
+
+struct thingie {
+	int define;
+	u32 bits;
+};
+
+struct TxFD {
+	__le32 state;
+	__le32 next;
+	__le32 data;
+	__le32 complete;
+	u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
+		     /* FWIW, datasheet calls that "dummy" and says that card
+		      * never looks at it; neither does the driver */
+};
+
+struct RxFD {
+	__le32 state1;
+	__le32 next;
+	__le32 data;
+	__le32 state2;
+	__le32 end;
+};
+
+#define DUMMY_SKB_SIZE		64
+#define TX_LOW			8
+#define TX_RING_SIZE		32
+#define RX_RING_SIZE		32
+#define TX_TOTAL_SIZE		TX_RING_SIZE*sizeof(struct TxFD)
+#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct RxFD)
+#define IRQ_RING_SIZE		64		/* Keep it a multiple of 32 */
+#define TX_TIMEOUT		(HZ/10)
+#define DSCC4_HZ_MAX		33000000
+#define BRR_DIVIDER_MAX		64*0x00004000	/* Cf errata DS5 p.10 */
+#define dev_per_card		4
+#define SCC_REGISTERS_MAX	23		/* Cf errata DS5 p.4 */
+
+#define SOURCE_ID(flags)	(((flags) >> 28) & 0x03)
+#define TO_SIZE(state)		(((state) >> 16) & 0x1fff)
+
+/*
+ * Given the operating range of Linux HDLC, the 2 defines below could be
+ * made simpler. However they are a fine reminder for the limitations of
+ * the driver: it's better to stay < TxSizeMax and < RxSizeMax.
+ */
+#define TO_STATE_TX(len)	cpu_to_le32(((len) & TxSizeMax) << 16)
+#define TO_STATE_RX(len)	cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
+#define RX_MAX(len)		((((len) >> 5) + 1) << 5)	/* Cf RLCR */
+#define SCC_REG_START(dpriv)	(SCC_START+(dpriv->dev_id)*SCC_OFFSET)
+
+struct dscc4_pci_priv {
+        __le32 *iqcfg;
+        int cfg_cur;
+        spinlock_t lock;
+        struct pci_dev *pdev;
+
+        struct dscc4_dev_priv *root;
+        dma_addr_t iqcfg_dma;
+	u32 xtal_hz;
+};
+
+struct dscc4_dev_priv {
+        struct sk_buff *rx_skbuff[RX_RING_SIZE];
+        struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+        struct RxFD *rx_fd;
+        struct TxFD *tx_fd;
+        __le32 *iqrx;
+        __le32 *iqtx;
+
+	/* FIXME: check all the volatile are required */
+        volatile u32 tx_current;
+        u32 rx_current;
+        u32 iqtx_current;
+        u32 iqrx_current;
+
+        volatile u32 tx_dirty;
+        volatile u32 ltda;
+        u32 rx_dirty;
+        u32 lrda;
+
+        dma_addr_t tx_fd_dma;
+        dma_addr_t rx_fd_dma;
+        dma_addr_t iqtx_dma;
+        dma_addr_t iqrx_dma;
+
+	u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
+
+	struct timer_list timer;
+
+        struct dscc4_pci_priv *pci_priv;
+        spinlock_t lock;
+
+        int dev_id;
+	volatile u32 flags;
+	u32 timer_help;
+
+	unsigned short encoding;
+	unsigned short parity;
+	struct net_device *dev;
+	sync_serial_settings settings;
+	void __iomem *base_addr;
+	u32 __pad __attribute__ ((aligned (4)));
+};
+
+/* GLOBAL registers definitions */
+#define GCMDR   0x00
+#define GSTAR   0x04
+#define GMODE   0x08
+#define IQLENR0 0x0C
+#define IQLENR1 0x10
+#define IQRX0   0x14
+#define IQTX0   0x24
+#define IQCFG   0x3c
+#define FIFOCR1 0x44
+#define FIFOCR2 0x48
+#define FIFOCR3 0x4c
+#define FIFOCR4 0x34
+#define CH0CFG  0x50
+#define CH0BRDA 0x54
+#define CH0BTDA 0x58
+#define CH0FRDA 0x98
+#define CH0FTDA 0xb0
+#define CH0LRDA 0xc8
+#define CH0LTDA 0xe0
+
+/* SCC registers definitions */
+#define SCC_START	0x0100
+#define SCC_OFFSET      0x80
+#define CMDR    0x00
+#define STAR    0x04
+#define CCR0    0x08
+#define CCR1    0x0c
+#define CCR2    0x10
+#define BRR     0x2C
+#define RLCR    0x40
+#define IMR     0x54
+#define ISR     0x58
+
+#define GPDIR	0x0400
+#define GPDATA	0x0404
+#define GPIM	0x0408
+
+/* Bit masks */
+#define EncodingMask	0x00700000
+#define CrcMask		0x00000003
+
+#define IntRxScc0	0x10000000
+#define IntTxScc0	0x01000000
+
+#define TxPollCmd	0x00000400
+#define RxActivate	0x08000000
+#define MTFi		0x04000000
+#define Rdr		0x00400000
+#define Rdt		0x00200000
+#define Idr		0x00100000
+#define Idt		0x00080000
+#define TxSccRes	0x01000000
+#define RxSccRes	0x00010000
+#define TxSizeMax	0x1fff		/* Datasheet DS1 - 11.1.1.1 */
+#define RxSizeMax	0x1ffc		/* Datasheet DS1 - 11.1.2.1 */
+
+#define Ccr0ClockMask	0x0000003f
+#define Ccr1LoopMask	0x00000200
+#define IsrMask		0x000fffff
+#define BrrExpMask	0x00000f00
+#define BrrMultMask	0x0000003f
+#define EncodingMask	0x00700000
+#define Hold		cpu_to_le32(0x40000000)
+#define SccBusy		0x10000000
+#define PowerUp		0x80000000
+#define Vis		0x00001000
+#define FrameOk		(FrameVfr | FrameCrc)
+#define FrameVfr	0x80
+#define FrameRdo	0x40
+#define FrameCrc	0x20
+#define FrameRab	0x10
+#define FrameAborted	cpu_to_le32(0x00000200)
+#define FrameEnd	cpu_to_le32(0x80000000)
+#define DataComplete	cpu_to_le32(0x40000000)
+#define LengthCheck	0x00008000
+#define SccEvt		0x02000000
+#define NoAck		0x00000200
+#define Action		0x00000001
+#define HiDesc		cpu_to_le32(0x20000000)
+
+/* SCC events */
+#define RxEvt		0xf0000000
+#define TxEvt		0x0f000000
+#define Alls		0x00040000
+#define Xdu		0x00010000
+#define Cts		0x00004000
+#define Xmr		0x00002000
+#define Xpr		0x00001000
+#define Rdo		0x00000080
+#define Rfs		0x00000040
+#define Cd		0x00000004
+#define Rfo		0x00000002
+#define Flex		0x00000001
+
+/* DMA core events */
+#define Cfg		0x00200000
+#define Hi		0x00040000
+#define Fi		0x00020000
+#define Err		0x00010000
+#define Arf		0x00000002
+#define ArAck		0x00000001
+
+/* State flags */
+#define Ready		0x00000000
+#define NeedIDR		0x00000001
+#define NeedIDT		0x00000002
+#define RdoSet		0x00000004
+#define FakeReset	0x00000008
+
+/* Don't mask RDO. Ever. */
+#ifdef DSCC4_POLLING
+#define EventsMask	0xfffeef7f
+#else
+#define EventsMask	0xfffa8f7a
+#endif
+
+/* Functions prototypes */
+static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
+static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
+static int dscc4_open(struct net_device *);
+static netdev_tx_t dscc4_start_xmit(struct sk_buff *,
+					  struct net_device *);
+static int dscc4_close(struct net_device *);
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int dscc4_init_ring(struct net_device *);
+static void dscc4_release_ring(struct dscc4_dev_priv *);
+static void dscc4_timer(unsigned long);
+static void dscc4_tx_timeout(struct net_device *);
+static irqreturn_t dscc4_irq(int irq, void *dev_id);
+static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
+static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
+#ifdef DSCC4_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
+#endif
+
+static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
+{
+	return dev_to_hdlc(dev)->priv;
+}
+
+static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
+{
+	return p->dev;
+}
+
+static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
+			struct net_device *dev, int offset)
+{
+	u32 state;
+
+	/* Cf scc_writel for concern regarding thread-safety */
+	state = dpriv->scc_regs[offset >> 2];
+	state &= ~mask;
+	state |= value;
+	dpriv->scc_regs[offset >> 2] = state;
+	writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
+}
+
+static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
+		       struct net_device *dev, int offset)
+{
+	/*
+	 * Thread-UNsafe.
+	 * As of 2002/02/16, there are no thread racing for access.
+	 */
+	dpriv->scc_regs[offset >> 2] = bits;
+	writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
+}
+
+static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
+{
+	return dpriv->scc_regs[offset >> 2];
+}
+
+static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+	/* Cf errata DS5 p.4 */
+	readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
+	return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
+}
+
+static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
+			       struct net_device *dev)
+{
+	dpriv->ltda = dpriv->tx_fd_dma +
+                      ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
+	writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
+	/* Flush posted writes *NOW* */
+	readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
+}
+
+static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
+				   struct net_device *dev)
+{
+	dpriv->lrda = dpriv->rx_fd_dma +
+		      ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
+	writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+}
+
+static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
+{
+	return dpriv->tx_current == dpriv->tx_dirty;
+}
+
+static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
+					      struct net_device *dev)
+{
+	return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
+}
+
+static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
+		       struct net_device *dev, const char *msg)
+{
+	int ret = 0;
+
+	if (debug > 1) {
+	if (SOURCE_ID(state) != dpriv->dev_id) {
+		printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+		       dev->name, msg, SOURCE_ID(state), state );
+			ret = -1;
+	}
+	if (state & 0x0df80c00) {
+		printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+		       dev->name, msg, state);
+			ret = -1;
+	}
+	}
+	return ret;
+}
+
+static void dscc4_tx_print(struct net_device *dev,
+			   struct dscc4_dev_priv *dpriv,
+			   char *msg)
+{
+	printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
+	       dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
+}
+
+static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
+{
+	struct pci_dev *pdev = dpriv->pci_priv->pdev;
+	struct TxFD *tx_fd = dpriv->tx_fd;
+	struct RxFD *rx_fd = dpriv->rx_fd;
+	struct sk_buff **skbuff;
+	int i;
+
+	pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
+	pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
+
+	skbuff = dpriv->tx_skbuff;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (*skbuff) {
+			pci_unmap_single(pdev, le32_to_cpu(tx_fd->data),
+				(*skbuff)->len, PCI_DMA_TODEVICE);
+			dev_kfree_skb(*skbuff);
+		}
+		skbuff++;
+		tx_fd++;
+	}
+
+	skbuff = dpriv->rx_skbuff;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		if (*skbuff) {
+			pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
+				RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(*skbuff);
+		}
+		skbuff++;
+		rx_fd++;
+	}
+}
+
+static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
+				 struct net_device *dev)
+{
+	unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
+	struct RxFD *rx_fd = dpriv->rx_fd + dirty;
+	const int len = RX_MAX(HDLC_MAX_MRU);
+	struct sk_buff *skb;
+	int ret = 0;
+
+	skb = dev_alloc_skb(len);
+	dpriv->rx_skbuff[dirty] = skb;
+	if (skb) {
+		skb->protocol = hdlc_type_trans(skb, dev);
+		rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
+					  skb->data, len, PCI_DMA_FROMDEVICE));
+	} else {
+		rx_fd->data = 0;
+		ret = -1;
+	}
+	return ret;
+}
+
+/*
+ * IRQ/thread/whatever safe
+ */
+static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
+			      struct net_device *dev, char *msg)
+{
+	s8 i = 0;
+
+	do {
+		if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
+			printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
+			       msg, i);
+			goto done;
+		}
+		schedule_timeout_uninterruptible(10);
+		rmb();
+	} while (++i > 0);
+	netdev_err(dev, "%s timeout\n", msg);
+done:
+	return (i >= 0) ? i : -EAGAIN;
+}
+
+static int dscc4_do_action(struct net_device *dev, char *msg)
+{
+	void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
+	s16 i = 0;
+
+	writel(Action, ioaddr + GCMDR);
+	ioaddr += GSTAR;
+	do {
+		u32 state = readl(ioaddr);
+
+		if (state & ArAck) {
+			netdev_dbg(dev, "%s ack\n", msg);
+			writel(ArAck, ioaddr);
+			goto done;
+		} else if (state & Arf) {
+			netdev_err(dev, "%s failed\n", msg);
+			writel(Arf, ioaddr);
+			i = -1;
+			goto done;
+	}
+		rmb();
+	} while (++i > 0);
+	netdev_err(dev, "%s timeout\n", msg);
+done:
+	return i;
+}
+
+static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
+{
+	int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+	s8 i = 0;
+
+	do {
+		if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
+		    (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
+			break;
+		smp_rmb();
+		schedule_timeout_uninterruptible(10);
+	} while (++i > 0);
+
+	return (i >= 0 ) ? i : -EAGAIN;
+}
+
+#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */
+static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
+	/* Cf errata DS5 p.6 */
+	writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+	scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
+	readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
+	writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
+	writel(Action, dpriv->base_addr + GCMDR);
+	spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
+}
+
+#endif
+
+#if 0
+static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+	u16 i = 0;
+
+	/* Cf errata DS5 p.7 */
+	scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
+	scc_writel(0x00050000, dpriv, dev, CCR2);
+	/*
+	 * Must be longer than the time required to fill the fifo.
+	 */
+	while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
+		udelay(1);
+		wmb();
+	}
+
+	writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
+	if (dscc4_do_action(dev, "Rdt") < 0)
+		netdev_err(dev, "Tx reset failed\n");
+}
+#endif
+
+/* TODO: (ab)use this function to refill a completely depleted RX ring. */
+static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
+				struct net_device *dev)
+{
+	struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
+	struct pci_dev *pdev = dpriv->pci_priv->pdev;
+	struct sk_buff *skb;
+	int pkt_len;
+
+	skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
+	if (!skb) {
+		printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
+		goto refill;
+	}
+	pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
+	pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
+			 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
+	if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += pkt_len;
+		skb_put(skb, pkt_len);
+		if (netif_running(dev))
+			skb->protocol = hdlc_type_trans(skb, dev);
+		netif_rx(skb);
+	} else {
+		if (skb->data[pkt_len] & FrameRdo)
+			dev->stats.rx_fifo_errors++;
+		else if (!(skb->data[pkt_len] & FrameCrc))
+			dev->stats.rx_crc_errors++;
+		else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) !=
+			 (FrameVfr | FrameRab))
+			dev->stats.rx_length_errors++;
+		dev->stats.rx_errors++;
+		dev_kfree_skb_irq(skb);
+	}
+refill:
+	while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
+		if (try_get_rx_skb(dpriv, dev) < 0)
+			break;
+		dpriv->rx_dirty++;
+	}
+	dscc4_rx_update(dpriv, dev);
+	rx_fd->state2 = 0x00000000;
+	rx_fd->end = cpu_to_le32(0xbabeface);
+}
+
+static void dscc4_free1(struct pci_dev *pdev)
+{
+	struct dscc4_pci_priv *ppriv;
+	struct dscc4_dev_priv *root;
+	int i;
+
+	ppriv = pci_get_drvdata(pdev);
+	root = ppriv->root;
+
+	for (i = 0; i < dev_per_card; i++)
+		unregister_hdlc_device(dscc4_to_dev(root + i));
+
+	pci_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < dev_per_card; i++)
+		free_netdev(root[i].dev);
+	kfree(root);
+	kfree(ppriv);
+}
+
+static int __devinit dscc4_init_one(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	struct dscc4_pci_priv *priv;
+	struct dscc4_dev_priv *dpriv;
+	void __iomem *ioaddr;
+	int i, rc;
+
+	printk(KERN_DEBUG "%s", version);
+
+	rc = pci_enable_device(pdev);
+	if (rc < 0)
+		goto out;
+
+	rc = pci_request_region(pdev, 0, "registers");
+	if (rc < 0) {
+		pr_err("can't reserve MMIO region (regs)\n");
+	        goto err_disable_0;
+	}
+	rc = pci_request_region(pdev, 1, "LBI interface");
+	if (rc < 0) {
+		pr_err("can't reserve MMIO region (lbi)\n");
+	        goto err_free_mmio_region_1;
+	}
+
+	ioaddr = pci_ioremap_bar(pdev, 0);
+	if (!ioaddr) {
+		pr_err("cannot remap MMIO region %llx @ %llx\n",
+		       (unsigned long long)pci_resource_len(pdev, 0),
+		       (unsigned long long)pci_resource_start(pdev, 0));
+		rc = -EIO;
+		goto err_free_mmio_regions_2;
+	}
+	printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n",
+	        (unsigned long long)pci_resource_start(pdev, 0),
+	        (unsigned long long)pci_resource_start(pdev, 1), pdev->irq);
+
+	/* Cf errata DS5 p.2 */
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
+	pci_set_master(pdev);
+
+	rc = dscc4_found1(pdev, ioaddr);
+	if (rc < 0)
+	        goto err_iounmap_3;
+
+	priv = pci_get_drvdata(pdev);
+
+	rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
+	if (rc < 0) {
+		pr_warn("IRQ %d busy\n", pdev->irq);
+		goto err_release_4;
+	}
+
+	/* power up/little endian/dma core controlled via lrda/ltda */
+	writel(0x00000001, ioaddr + GMODE);
+	/* Shared interrupt queue */
+	{
+		u32 bits;
+
+		bits = (IRQ_RING_SIZE >> 5) - 1;
+		bits |= bits << 4;
+		bits |= bits << 8;
+		bits |= bits << 16;
+		writel(bits, ioaddr + IQLENR0);
+	}
+	/* Global interrupt queue */
+	writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
+	priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
+		IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
+	if (!priv->iqcfg)
+		goto err_free_irq_5;
+	writel(priv->iqcfg_dma, ioaddr + IQCFG);
+
+	rc = -ENOMEM;
+
+	/*
+	 * SCC 0-3 private rx/tx irq structures
+	 * IQRX/TXi needs to be set soon. Learned it the hard way...
+	 */
+	for (i = 0; i < dev_per_card; i++) {
+		dpriv = priv->root + i;
+		dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev,
+			IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
+		if (!dpriv->iqtx)
+			goto err_free_iqtx_6;
+		writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
+	}
+	for (i = 0; i < dev_per_card; i++) {
+		dpriv = priv->root + i;
+		dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev,
+			IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
+		if (!dpriv->iqrx)
+			goto err_free_iqrx_7;
+		writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
+	}
+
+	/* Cf application hint. Beware of hard-lock condition on threshold. */
+	writel(0x42104000, ioaddr + FIFOCR1);
+	//writel(0x9ce69800, ioaddr + FIFOCR2);
+	writel(0xdef6d800, ioaddr + FIFOCR2);
+	//writel(0x11111111, ioaddr + FIFOCR4);
+	writel(0x18181818, ioaddr + FIFOCR4);
+	// FIXME: should depend on the chipset revision
+	writel(0x0000000e, ioaddr + FIFOCR3);
+
+	writel(0xff200001, ioaddr + GCMDR);
+
+	rc = 0;
+out:
+	return rc;
+
+err_free_iqrx_7:
+	while (--i >= 0) {
+		dpriv = priv->root + i;
+		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+				    dpriv->iqrx, dpriv->iqrx_dma);
+	}
+	i = dev_per_card;
+err_free_iqtx_6:
+	while (--i >= 0) {
+		dpriv = priv->root + i;
+		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+				    dpriv->iqtx, dpriv->iqtx_dma);
+	}
+	pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
+			    priv->iqcfg_dma);
+err_free_irq_5:
+	free_irq(pdev->irq, priv->root);
+err_release_4:
+	dscc4_free1(pdev);
+err_iounmap_3:
+	iounmap (ioaddr);
+err_free_mmio_regions_2:
+	pci_release_region(pdev, 1);
+err_free_mmio_region_1:
+	pci_release_region(pdev, 0);
+err_disable_0:
+	pci_disable_device(pdev);
+	goto out;
+};
+
+/*
+ * Let's hope the default values are decent enough to protect my
+ * feet from the user's gun - Ueimor
+ */
+static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
+				 struct net_device *dev)
+{
+	/* No interrupts, SCC core disabled. Let's relax */
+	scc_writel(0x00000000, dpriv, dev, CCR0);
+
+	scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
+
+	/*
+	 * No address recognition/crc-CCITT/cts enabled
+	 * Shared flags transmission disabled - cf errata DS5 p.11
+	 * Carrier detect disabled - cf errata p.14
+	 * FIXME: carrier detection/polarity may be handled more gracefully.
+	 */
+	scc_writel(0x02408000, dpriv, dev, CCR1);
+
+	/* crc not forwarded - Cf errata DS5 p.11 */
+	scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
+	// crc forwarded
+	//scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
+}
+
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
+{
+	int ret = 0;
+
+	if ((hz < 0) || (hz > DSCC4_HZ_MAX))
+		ret = -EOPNOTSUPP;
+	else
+		dpriv->pci_priv->xtal_hz = hz;
+
+	return ret;
+}
+
+static const struct net_device_ops dscc4_ops = {
+	.ndo_open       = dscc4_open,
+	.ndo_stop       = dscc4_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = dscc4_ioctl,
+	.ndo_tx_timeout = dscc4_tx_timeout,
+};
+
+static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
+{
+	struct dscc4_pci_priv *ppriv;
+	struct dscc4_dev_priv *root;
+	int i, ret = -ENOMEM;
+
+	root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
+	if (!root)
+		goto err_out;
+
+	for (i = 0; i < dev_per_card; i++) {
+		root[i].dev = alloc_hdlcdev(root + i);
+		if (!root[i].dev)
+			goto err_free_dev;
+	}
+
+	ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
+	if (!ppriv)
+		goto err_free_dev;
+
+	ppriv->root = root;
+	spin_lock_init(&ppriv->lock);
+
+	for (i = 0; i < dev_per_card; i++) {
+		struct dscc4_dev_priv *dpriv = root + i;
+		struct net_device *d = dscc4_to_dev(dpriv);
+		hdlc_device *hdlc = dev_to_hdlc(d);
+
+	        d->base_addr = (unsigned long)ioaddr;
+	        d->irq = pdev->irq;
+		d->netdev_ops = &dscc4_ops;
+		d->watchdog_timeo = TX_TIMEOUT;
+		SET_NETDEV_DEV(d, &pdev->dev);
+
+		dpriv->dev_id = i;
+		dpriv->pci_priv = ppriv;
+		dpriv->base_addr = ioaddr;
+		spin_lock_init(&dpriv->lock);
+
+		hdlc->xmit = dscc4_start_xmit;
+		hdlc->attach = dscc4_hdlc_attach;
+
+		dscc4_init_registers(dpriv, d);
+		dpriv->parity = PARITY_CRC16_PR0_CCITT;
+		dpriv->encoding = ENCODING_NRZ;
+	
+		ret = dscc4_init_ring(d);
+		if (ret < 0)
+			goto err_unregister;
+
+		ret = register_hdlc_device(d);
+		if (ret < 0) {
+			pr_err("unable to register\n");
+			dscc4_release_ring(dpriv);
+			goto err_unregister;
+	        }
+	}
+
+	ret = dscc4_set_quartz(root, quartz);
+	if (ret < 0)
+		goto err_unregister;
+
+	pci_set_drvdata(pdev, ppriv);
+	return ret;
+
+err_unregister:
+	while (i-- > 0) {
+		dscc4_release_ring(root + i);
+		unregister_hdlc_device(dscc4_to_dev(root + i));
+	}
+	kfree(ppriv);
+	i = dev_per_card;
+err_free_dev:
+	while (i-- > 0)
+		free_netdev(root[i].dev);
+	kfree(root);
+err_out:
+	return ret;
+};
+
+/* FIXME: get rid of the unneeded code */
+static void dscc4_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+//	struct dscc4_pci_priv *ppriv;
+
+	goto done;
+done:
+        dpriv->timer.expires = jiffies + TX_TIMEOUT;
+        add_timer(&dpriv->timer);
+}
+
+static void dscc4_tx_timeout(struct net_device *dev)
+{
+	/* FIXME: something is missing there */
+}
+
+static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
+{
+	sync_serial_settings *settings = &dpriv->settings;
+
+	if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
+		struct net_device *dev = dscc4_to_dev(dpriv);
+
+		netdev_info(dev, "loopback requires clock\n");
+		return -1;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DSCC4_PCI_RST
+/*
+ * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
+ * so as to provide a safe way to reset the asic while not the whole machine
+ * rebooting.
+ *
+ * This code doesn't need to be efficient. Keep It Simple
+ */
+static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
+{
+	int i;
+
+	mutex_lock(&dscc4_mutex);
+	for (i = 0; i < 16; i++)
+		pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
+
+	/* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
+	writel(0x001c0000, ioaddr + GMODE);
+	/* Configure GPIO port as output */
+	writel(0x0000ffff, ioaddr + GPDIR);
+	/* Disable interruption */
+	writel(0x0000ffff, ioaddr + GPIM);
+
+	writel(0x0000ffff, ioaddr + GPDATA);
+	writel(0x00000000, ioaddr + GPDATA);
+
+	/* Flush posted writes */
+	readl(ioaddr + GSTAR);
+
+	schedule_timeout_uninterruptible(10);
+
+	for (i = 0; i < 16; i++)
+		pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
+	mutex_unlock(&dscc4_mutex);
+}
+#else
+#define dscc4_pci_reset(pdev,ioaddr)	do {} while (0)
+#endif /* CONFIG_DSCC4_PCI_RST */
+
+static int dscc4_open(struct net_device *dev)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+	struct dscc4_pci_priv *ppriv;
+	int ret = -EAGAIN;
+
+	if ((dscc4_loopback_check(dpriv) < 0))
+		goto err;
+
+	if ((ret = hdlc_open(dev)))
+		goto err;
+
+	ppriv = dpriv->pci_priv;
+
+	/*
+	 * Due to various bugs, there is no way to reliably reset a
+	 * specific port (manufacturer's dependent special PCI #RST wiring
+	 * apart: it affects all ports). Thus the device goes in the best
+	 * silent mode possible at dscc4_close() time and simply claims to
+	 * be up if it's opened again. It still isn't possible to change
+	 * the HDLC configuration without rebooting but at least the ports
+	 * can be up/down ifconfig'ed without killing the host.
+	 */
+	if (dpriv->flags & FakeReset) {
+		dpriv->flags &= ~FakeReset;
+		scc_patchl(0, PowerUp, dpriv, dev, CCR0);
+		scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
+		scc_writel(EventsMask, dpriv, dev, IMR);
+		netdev_info(dev, "up again\n");
+		goto done;
+	}
+
+	/* IDT+IDR during XPR */
+	dpriv->flags = NeedIDR | NeedIDT;
+
+	scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
+
+	/*
+	 * The following is a bit paranoid...
+	 *
+	 * NB: the datasheet "...CEC will stay active if the SCC is in
+	 * power-down mode or..." and CCR2.RAC = 1 are two different
+	 * situations.
+	 */
+	if (scc_readl_star(dpriv, dev) & SccBusy) {
+		netdev_err(dev, "busy - try later\n");
+		ret = -EAGAIN;
+		goto err_out;
+	} else
+		netdev_info(dev, "available - good\n");
+
+	scc_writel(EventsMask, dpriv, dev, IMR);
+
+	/* Posted write is flushed in the wait_ack loop */
+	scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
+
+	if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
+		goto err_disable_scc_events;
+
+	/*
+	 * I would expect XPR near CE completion (before ? after ?).
+	 * At worst, this code won't see a late XPR and people
+	 * will have to re-issue an ifconfig (this is harmless).
+	 * WARNING, a really missing XPR usually means a hardware
+	 * reset is needed. Suggestions anyone ?
+	 */
+	if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
+		pr_err("XPR timeout\n");
+		goto err_disable_scc_events;
+	}
+	
+	if (debug > 2)
+		dscc4_tx_print(dev, dpriv, "Open");
+
+done:
+	netif_start_queue(dev);
+
+        init_timer(&dpriv->timer);
+        dpriv->timer.expires = jiffies + 10*HZ;
+        dpriv->timer.data = (unsigned long)dev;
+	dpriv->timer.function = dscc4_timer;
+        add_timer(&dpriv->timer);
+	netif_carrier_on(dev);
+
+	return 0;
+
+err_disable_scc_events:
+	scc_writel(0xffffffff, dpriv, dev, IMR);
+	scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
+err_out:
+	hdlc_close(dev);
+err:
+	return ret;
+}
+
+#ifdef DSCC4_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+	/* FIXME: it's gonna be easy (TM), for sure */
+}
+#endif /* DSCC4_POLLING */
+
+static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
+					  struct net_device *dev)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+	struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
+	struct TxFD *tx_fd;
+	int next;
+
+	next = dpriv->tx_current%TX_RING_SIZE;
+	dpriv->tx_skbuff[next] = skb;
+	tx_fd = dpriv->tx_fd + next;
+	tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
+	tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len,
+				     PCI_DMA_TODEVICE));
+	tx_fd->complete = 0x00000000;
+	tx_fd->jiffies = jiffies;
+	mb();
+
+#ifdef DSCC4_POLLING
+	spin_lock(&dpriv->lock);
+	while (dscc4_tx_poll(dpriv, dev));
+	spin_unlock(&dpriv->lock);
+#endif
+
+	if (debug > 2)
+		dscc4_tx_print(dev, dpriv, "Xmit");
+	/* To be cleaned(unsigned int)/optimized. Later, ok ? */
+	if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
+		netif_stop_queue(dev);
+
+	if (dscc4_tx_quiescent(dpriv, dev))
+		dscc4_do_tx(dpriv, dev);
+
+	return NETDEV_TX_OK;
+}
+
+static int dscc4_close(struct net_device *dev)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+
+	del_timer_sync(&dpriv->timer);
+	netif_stop_queue(dev);
+
+	scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
+	scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
+	scc_writel(0xffffffff, dpriv, dev, IMR);
+
+	dpriv->flags |= FakeReset;
+
+	hdlc_close(dev);
+
+	return 0;
+}
+
+static inline int dscc4_check_clock_ability(int port)
+{
+	int ret = 0;
+
+#ifdef CONFIG_DSCC4_PCISYNC
+	if (port >= 2)
+		ret = -1;
+#endif
+	return ret;
+}
+
+/*
+ * DS1 p.137: "There are a total of 13 different clocking modes..."
+ *                                  ^^
+ * Design choices:
+ * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a).
+ *   Clock mode 3b _should_ work but the testing seems to make this point
+ *   dubious (DIY testing requires setting CCR0 at 0x00000033).
+ *   This is supposed to provide least surprise "DTE like" behavior.
+ * - if line rate is specified, clocks are assumed to be locally generated.
+ *   A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing
+ *   between these it automagically done according on the required frequency
+ *   scaling. Of course some rounding may take place.
+ * - no high speed mode (40Mb/s). May be trivial to do but I don't have an
+ *   appropriate external clocking device for testing.
+ * - no time-slot/clock mode 5: shameless laziness.
+ *
+ * The clock signals wiring can be (is ?) manufacturer dependent. Good luck.
+ *
+ * BIG FAT WARNING: if the device isn't provided enough clocking signal, it
+ * won't pass the init sequence. For example, straight back-to-back DTE without
+ * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is
+ * called.
+ *
+ * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153
+ * DS0 for example)
+ *
+ * Clock mode related bits of CCR0:
+ *     +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only)
+ *     | +---------- SSEL: sub-mode select 0 -> a, 1 -> b
+ *     | | +-------- High Speed: say 0
+ *     | | | +-+-+-- Clock Mode: 0..7
+ *     | | | | | |
+ * -+-+-+-+-+-+-+-+
+ * x|x|5|4|3|2|1|0| lower bits
+ *
+ * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b)
+ *            +-+-+-+------------------ M (0..15)
+ *            | | | |     +-+-+-+-+-+-- N (0..63)
+ *    0 0 0 0 | | | | 0 0 | | | | | |
+ * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *    f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits
+ *
+ */
+static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+	int ret = -1;
+	u32 brr;
+
+	*state &= ~Ccr0ClockMask;
+	if (*bps) { /* Clock generated - required for DCE */
+		u32 n = 0, m = 0, divider;
+		int xtal;
+
+		xtal = dpriv->pci_priv->xtal_hz;
+		if (!xtal)
+			goto done;
+		if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
+			goto done;
+		divider = xtal / *bps;
+		if (divider > BRR_DIVIDER_MAX) {
+			divider >>= 4;
+			*state |= 0x00000036; /* Clock mode 6b (BRG/16) */
+		} else
+			*state |= 0x00000037; /* Clock mode 7b (BRG) */
+		if (divider >> 22) {
+			n = 63;
+			m = 15;
+		} else if (divider) {
+			/* Extraction of the 6 highest weighted bits */
+			m = 0;
+			while (0xffffffc0 & divider) {
+				m++;
+				divider >>= 1;
+			}
+			n = divider;
+		}
+		brr = (m << 8) | n;
+		divider = n << m;
+		if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */
+			divider <<= 4;
+		*bps = xtal / divider;
+	} else {
+		/*
+		 * External clock - DTE
+		 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
+		 * Nothing more to be done
+		 */
+		brr = 0;
+	}
+	scc_writel(brr, dpriv, dev, BRR);
+	ret = 0;
+done:
+	return ret;
+}
+
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+	const size_t size = sizeof(dpriv->settings);
+	int ret = 0;
+
+        if (dev->flags & IFF_UP)
+                return -EBUSY;
+
+	if (cmd != SIOCWANDEV)
+		return -EOPNOTSUPP;
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(line, &dpriv->settings, size))
+			return -EFAULT;
+		break;
+
+	case IF_IFACE_SYNC_SERIAL:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dpriv->flags & FakeReset) {
+			netdev_info(dev, "please reset the device before this command\n");
+			return -EPERM;
+		}
+		if (copy_from_user(&dpriv->settings, line, size))
+			return -EFAULT;
+		ret = dscc4_set_iface(dpriv, dev);
+		break;
+
+	default:
+		ret = hdlc_ioctl(dev, ifr, cmd);
+		break;
+	}
+
+	return ret;
+}
+
+static int dscc4_match(const struct thingie *p, int value)
+{
+	int i;
+
+	for (i = 0; p[i].define != -1; i++) {
+		if (value == p[i].define)
+			break;
+	}
+	if (p[i].define == -1)
+		return -1;
+	else
+		return i;
+}
+
+static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
+			       struct net_device *dev)
+{
+	sync_serial_settings *settings = &dpriv->settings;
+	int ret = -EOPNOTSUPP;
+	u32 bps, state;
+
+	bps = settings->clock_rate;
+	state = scc_readl(dpriv, CCR0);
+	if (dscc4_set_clock(dev, &bps, &state) < 0)
+		goto done;
+	if (bps) { /* DCE */
+		printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
+		if (settings->clock_rate != bps) {
+			printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
+				dev->name, settings->clock_rate, bps);
+			settings->clock_rate = bps;
+		}
+	} else { /* DTE */
+		state |= PowerUp | Vis;
+		printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
+	}
+	scc_writel(state, dpriv, dev, CCR0);
+	ret = 0;
+done:
+	return ret;
+}
+
+static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
+				  struct net_device *dev)
+{
+	static const struct thingie encoding[] = {
+		{ ENCODING_NRZ,		0x00000000 },
+		{ ENCODING_NRZI,	0x00200000 },
+		{ ENCODING_FM_MARK,	0x00400000 },
+		{ ENCODING_FM_SPACE,	0x00500000 },
+		{ ENCODING_MANCHESTER,	0x00600000 },
+		{ -1,			0}
+	};
+	int i, ret = 0;
+
+	i = dscc4_match(encoding, dpriv->encoding);
+	if (i >= 0)
+		scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
+	else
+		ret = -EOPNOTSUPP;
+	return ret;
+}
+
+static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
+				  struct net_device *dev)
+{
+	sync_serial_settings *settings = &dpriv->settings;
+	u32 state;
+
+	state = scc_readl(dpriv, CCR1);
+	if (settings->loopback) {
+		printk(KERN_DEBUG "%s: loopback\n", dev->name);
+		state |= 0x00000100;
+	} else {
+		printk(KERN_DEBUG "%s: normal\n", dev->name);
+		state &= ~0x00000100;
+	}
+	scc_writel(state, dpriv, dev, CCR1);
+	return 0;
+}
+
+static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
+			     struct net_device *dev)
+{
+	static const struct thingie crc[] = {
+		{ PARITY_CRC16_PR0_CCITT,	0x00000010 },
+		{ PARITY_CRC16_PR1_CCITT,	0x00000000 },
+		{ PARITY_CRC32_PR0_CCITT,	0x00000011 },
+		{ PARITY_CRC32_PR1_CCITT,	0x00000001 }
+	};
+	int i, ret = 0;
+
+	i = dscc4_match(crc, dpriv->parity);
+	if (i >= 0)
+		scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
+	else
+		ret = -EOPNOTSUPP;
+	return ret;
+}
+
+static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+	struct {
+		int (*action)(struct dscc4_dev_priv *, struct net_device *);
+	} *p, do_setting[] = {
+		{ dscc4_encoding_setting },
+		{ dscc4_clock_setting },
+		{ dscc4_loopback_setting },
+		{ dscc4_crc_setting },
+		{ NULL }
+	};
+	int ret = 0;
+
+	for (p = do_setting; p->action; p++) {
+		if ((ret = p->action(dpriv, dev)) < 0)
+			break;
+	}
+	return ret;
+}
+
+static irqreturn_t dscc4_irq(int irq, void *token)
+{
+	struct dscc4_dev_priv *root = token;
+	struct dscc4_pci_priv *priv;
+	struct net_device *dev;
+	void __iomem *ioaddr;
+	u32 state;
+	unsigned long flags;
+	int i, handled = 1;
+
+	priv = root->pci_priv;
+	dev = dscc4_to_dev(root);
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	ioaddr = root->base_addr;
+
+	state = readl(ioaddr + GSTAR);
+	if (!state) {
+		handled = 0;
+		goto out;
+	}
+	if (debug > 3)
+		printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
+	writel(state, ioaddr + GSTAR);
+
+	if (state & Arf) {
+		netdev_err(dev, "failure (Arf). Harass the maintainer\n");
+		goto out;
+	}
+	state &= ~ArAck;
+	if (state & Cfg) {
+		if (debug > 0)
+			printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
+		if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
+			netdev_err(dev, "CFG failed\n");
+		if (!(state &= ~Cfg))
+			goto out;
+	}
+	if (state & RxEvt) {
+		i = dev_per_card - 1;
+		do {
+			dscc4_rx_irq(priv, root + i);
+		} while (--i >= 0);
+		state &= ~RxEvt;
+	}
+	if (state & TxEvt) {
+		i = dev_per_card - 1;
+		do {
+			dscc4_tx_irq(priv, root + i);
+		} while (--i >= 0);
+		state &= ~TxEvt;
+	}
+out:
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return IRQ_RETVAL(handled);
+}
+
+static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
+				struct dscc4_dev_priv *dpriv)
+{
+	struct net_device *dev = dscc4_to_dev(dpriv);
+	u32 state;
+	int cur, loop = 0;
+
+try:
+	cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+	state = le32_to_cpu(dpriv->iqtx[cur]);
+	if (!state) {
+		if (debug > 4)
+			printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
+			       state);
+		if ((debug > 1) && (loop > 1))
+			printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
+		if (loop && netif_queue_stopped(dev))
+			if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
+				netif_wake_queue(dev);
+
+		if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
+		    !dscc4_tx_done(dpriv))
+				dscc4_do_tx(dpriv, dev);
+		return;
+	}
+	loop++;
+	dpriv->iqtx[cur] = 0;
+	dpriv->iqtx_current++;
+
+	if (state_check(state, dpriv, dev, "Tx") < 0)
+		return;
+
+	if (state & SccEvt) {
+		if (state & Alls) {
+			struct sk_buff *skb;
+			struct TxFD *tx_fd;
+
+			if (debug > 2)
+				dscc4_tx_print(dev, dpriv, "Alls");
+			/*
+			 * DataComplete can't be trusted for Tx completion.
+			 * Cf errata DS5 p.8
+			 */
+			cur = dpriv->tx_dirty%TX_RING_SIZE;
+			tx_fd = dpriv->tx_fd + cur;
+			skb = dpriv->tx_skbuff[cur];
+			if (skb) {
+				pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
+						 skb->len, PCI_DMA_TODEVICE);
+				if (tx_fd->state & FrameEnd) {
+					dev->stats.tx_packets++;
+					dev->stats.tx_bytes += skb->len;
+				}
+				dev_kfree_skb_irq(skb);
+				dpriv->tx_skbuff[cur] = NULL;
+				++dpriv->tx_dirty;
+			} else {
+				if (debug > 1)
+					netdev_err(dev, "Tx: NULL skb %d\n",
+						   cur);
+			}
+			/*
+			 * If the driver ends sending crap on the wire, it
+			 * will be way easier to diagnose than the (not so)
+			 * random freeze induced by null sized tx frames.
+			 */
+			tx_fd->data = tx_fd->next;
+			tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
+			tx_fd->complete = 0x00000000;
+			tx_fd->jiffies = 0;
+
+			if (!(state &= ~Alls))
+				goto try;
+		}
+		/*
+		 * Transmit Data Underrun
+		 */
+		if (state & Xdu) {
+			netdev_err(dev, "Tx Data Underrun. Ask maintainer\n");
+			dpriv->flags = NeedIDT;
+			/* Tx reset */
+			writel(MTFi | Rdt,
+			       dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
+			writel(Action, dpriv->base_addr + GCMDR);
+			return;
+		}
+		if (state & Cts) {
+			netdev_info(dev, "CTS transition\n");
+			if (!(state &= ~Cts)) /* DEBUG */
+				goto try;
+		}
+		if (state & Xmr) {
+			/* Frame needs to be sent again - FIXME */
+			netdev_err(dev, "Tx ReTx. Ask maintainer\n");
+			if (!(state &= ~Xmr)) /* DEBUG */
+				goto try;
+		}
+		if (state & Xpr) {
+			void __iomem *scc_addr;
+			unsigned long ring;
+			int i;
+
+			/*
+			 * - the busy condition happens (sometimes);
+			 * - it doesn't seem to make the handler unreliable.
+			 */
+			for (i = 1; i; i <<= 1) {
+				if (!(scc_readl_star(dpriv, dev) & SccBusy))
+					break;
+			}
+			if (!i)
+				netdev_info(dev, "busy in irq\n");
+
+			scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
+			/* Keep this order: IDT before IDR */
+			if (dpriv->flags & NeedIDT) {
+				if (debug > 2)
+					dscc4_tx_print(dev, dpriv, "Xpr");
+				ring = dpriv->tx_fd_dma +
+				       (dpriv->tx_dirty%TX_RING_SIZE)*
+				       sizeof(struct TxFD);
+				writel(ring, scc_addr + CH0BTDA);
+				dscc4_do_tx(dpriv, dev);
+				writel(MTFi | Idt, scc_addr + CH0CFG);
+				if (dscc4_do_action(dev, "IDT") < 0)
+					goto err_xpr;
+				dpriv->flags &= ~NeedIDT;
+			}
+			if (dpriv->flags & NeedIDR) {
+				ring = dpriv->rx_fd_dma +
+				       (dpriv->rx_current%RX_RING_SIZE)*
+				       sizeof(struct RxFD);
+				writel(ring, scc_addr + CH0BRDA);
+				dscc4_rx_update(dpriv, dev);
+				writel(MTFi | Idr, scc_addr + CH0CFG);
+				if (dscc4_do_action(dev, "IDR") < 0)
+					goto err_xpr;
+				dpriv->flags &= ~NeedIDR;
+				smp_wmb();
+				/* Activate receiver and misc */
+				scc_writel(0x08050008, dpriv, dev, CCR2);
+			}
+		err_xpr:
+			if (!(state &= ~Xpr))
+				goto try;
+		}
+		if (state & Cd) {
+			if (debug > 0)
+				netdev_info(dev, "CD transition\n");
+			if (!(state &= ~Cd)) /* DEBUG */
+				goto try;
+		}
+	} else { /* ! SccEvt */
+		if (state & Hi) {
+#ifdef DSCC4_POLLING
+			while (!dscc4_tx_poll(dpriv, dev));
+#endif
+			netdev_info(dev, "Tx Hi\n");
+			state &= ~Hi;
+		}
+		if (state & Err) {
+			netdev_info(dev, "Tx ERR\n");
+			dev->stats.tx_errors++;
+			state &= ~Err;
+		}
+	}
+	goto try;
+}
+
+static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
+				    struct dscc4_dev_priv *dpriv)
+{
+	struct net_device *dev = dscc4_to_dev(dpriv);
+	u32 state;
+	int cur;
+
+try:
+	cur = dpriv->iqrx_current%IRQ_RING_SIZE;
+	state = le32_to_cpu(dpriv->iqrx[cur]);
+	if (!state)
+		return;
+	dpriv->iqrx[cur] = 0;
+	dpriv->iqrx_current++;
+
+	if (state_check(state, dpriv, dev, "Rx") < 0)
+		return;
+
+	if (!(state & SccEvt)){
+		struct RxFD *rx_fd;
+
+		if (debug > 4)
+			printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
+			       state);
+		state &= 0x00ffffff;
+		if (state & Err) { /* Hold or reset */
+			printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
+			cur = dpriv->rx_current%RX_RING_SIZE;
+			rx_fd = dpriv->rx_fd + cur;
+			/*
+			 * Presume we're not facing a DMAC receiver reset.
+			 * As We use the rx size-filtering feature of the
+			 * DSCC4, the beginning of a new frame is waiting in
+			 * the rx fifo. I bet a Receive Data Overflow will
+			 * happen most of time but let's try and avoid it.
+			 * Btw (as for RDO) if one experiences ERR whereas
+			 * the system looks rather idle, there may be a
+			 * problem with latency. In this case, increasing
+			 * RX_RING_SIZE may help.
+			 */
+			//while (dpriv->rx_needs_refill) {
+				while (!(rx_fd->state1 & Hold)) {
+					rx_fd++;
+					cur++;
+					if (!(cur = cur%RX_RING_SIZE))
+						rx_fd = dpriv->rx_fd;
+				}
+				//dpriv->rx_needs_refill--;
+				try_get_rx_skb(dpriv, dev);
+				if (!rx_fd->data)
+					goto try;
+				rx_fd->state1 &= ~Hold;
+				rx_fd->state2 = 0x00000000;
+				rx_fd->end = cpu_to_le32(0xbabeface);
+			//}
+			goto try;
+		}
+		if (state & Fi) {
+			dscc4_rx_skb(dpriv, dev);
+			goto try;
+		}
+		if (state & Hi ) { /* HI bit */
+			netdev_info(dev, "Rx Hi\n");
+			state &= ~Hi;
+			goto try;
+		}
+	} else { /* SccEvt */
+		if (debug > 1) {
+			//FIXME: verifier la presence de tous les evenements
+		static struct {
+			u32 mask;
+			const char *irq_name;
+		} evts[] = {
+			{ 0x00008000, "TIN"},
+			{ 0x00000020, "RSC"},
+			{ 0x00000010, "PCE"},
+			{ 0x00000008, "PLLA"},
+			{ 0, NULL}
+		}, *evt;
+
+		for (evt = evts; evt->irq_name; evt++) {
+			if (state & evt->mask) {
+					printk(KERN_DEBUG "%s: %s\n",
+						dev->name, evt->irq_name);
+				if (!(state &= ~evt->mask))
+					goto try;
+			}
+		}
+		} else {
+			if (!(state &= ~0x0000c03c))
+				goto try;
+		}
+		if (state & Cts) {
+			netdev_info(dev, "CTS transition\n");
+			if (!(state &= ~Cts)) /* DEBUG */
+				goto try;
+		}
+		/*
+		 * Receive Data Overflow (FIXME: fscked)
+		 */
+		if (state & Rdo) {
+			struct RxFD *rx_fd;
+			void __iomem *scc_addr;
+			int cur;
+
+			//if (debug)
+			//	dscc4_rx_dump(dpriv);
+			scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
+
+			scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
+			/*
+			 * This has no effect. Why ?
+			 * ORed with TxSccRes, one sees the CFG ack (for
+			 * the TX part only).
+			 */
+			scc_writel(RxSccRes, dpriv, dev, CMDR);
+			dpriv->flags |= RdoSet;
+
+			/*
+			 * Let's try and save something in the received data.
+			 * rx_current must be incremented at least once to
+			 * avoid HOLD in the BRDA-to-be-pointed desc.
+			 */
+			do {
+				cur = dpriv->rx_current++%RX_RING_SIZE;
+				rx_fd = dpriv->rx_fd + cur;
+				if (!(rx_fd->state2 & DataComplete))
+					break;
+				if (rx_fd->state2 & FrameAborted) {
+					dev->stats.rx_over_errors++;
+					rx_fd->state1 |= Hold;
+					rx_fd->state2 = 0x00000000;
+					rx_fd->end = cpu_to_le32(0xbabeface);
+				} else
+					dscc4_rx_skb(dpriv, dev);
+			} while (1);
+
+			if (debug > 0) {
+				if (dpriv->flags & RdoSet)
+					printk(KERN_DEBUG
+					       "%s: no RDO in Rx data\n", DRV_NAME);
+			}
+#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
+			/*
+			 * FIXME: must the reset be this violent ?
+			 */
+#warning "FIXME: CH0BRDA"
+			writel(dpriv->rx_fd_dma +
+			       (dpriv->rx_current%RX_RING_SIZE)*
+			       sizeof(struct RxFD), scc_addr + CH0BRDA);
+			writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
+			if (dscc4_do_action(dev, "RDR") < 0) {
+				netdev_err(dev, "RDO recovery failed(RDR)\n");
+				goto rdo_end;
+			}
+			writel(MTFi|Idr, scc_addr + CH0CFG);
+			if (dscc4_do_action(dev, "IDR") < 0) {
+				netdev_err(dev, "RDO recovery failed(IDR)\n");
+				goto rdo_end;
+			}
+		rdo_end:
+#endif
+			scc_patchl(0, RxActivate, dpriv, dev, CCR2);
+			goto try;
+		}
+		if (state & Cd) {
+			netdev_info(dev, "CD transition\n");
+			if (!(state &= ~Cd)) /* DEBUG */
+				goto try;
+		}
+		if (state & Flex) {
+			printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
+			if (!(state &= ~Flex))
+				goto try;
+		}
+	}
+}
+
+/*
+ * I had expected the following to work for the first descriptor
+ * (tx_fd->state = 0xc0000000)
+ * - Hold=1 (don't try and branch to the next descripto);
+ * - No=0 (I want an empty data section, i.e. size=0);
+ * - Fe=1 (required by No=0 or we got an Err irq and must reset).
+ * It failed and locked solid. Thus the introduction of a dummy skb.
+ * Problem is acknowledged in errata sheet DS5. Joy :o/
+ */
+static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
+{
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(DUMMY_SKB_SIZE);
+	if (skb) {
+		int last = dpriv->tx_dirty%TX_RING_SIZE;
+		struct TxFD *tx_fd = dpriv->tx_fd + last;
+
+		skb->len = DUMMY_SKB_SIZE;
+		skb_copy_to_linear_data(skb, version,
+					strlen(version) % DUMMY_SKB_SIZE);
+		tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
+		tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev,
+					     skb->data, DUMMY_SKB_SIZE,
+					     PCI_DMA_TODEVICE));
+		dpriv->tx_skbuff[last] = skb;
+	}
+	return skb;
+}
+
+static int dscc4_init_ring(struct net_device *dev)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+	struct pci_dev *pdev = dpriv->pci_priv->pdev;
+	struct TxFD *tx_fd;
+	struct RxFD *rx_fd;
+	void *ring;
+	int i;
+
+	ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
+	if (!ring)
+		goto err_out;
+	dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
+
+	ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
+	if (!ring)
+		goto err_free_dma_rx;
+	dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
+
+	memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
+	dpriv->tx_dirty = 0xffffffff;
+	i = dpriv->tx_current = 0;
+	do {
+		tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
+		tx_fd->complete = 0x00000000;
+	        /* FIXME: NULL should be ok - to be tried */
+	        tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma);
+		(tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma +
+					(++i%TX_RING_SIZE)*sizeof(*tx_fd));
+	} while (i < TX_RING_SIZE);
+
+	if (!dscc4_init_dummy_skb(dpriv))
+		goto err_free_dma_tx;
+
+	memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
+	i = dpriv->rx_dirty = dpriv->rx_current = 0;
+	do {
+		/* size set by the host. Multiple of 4 bytes please */
+	        rx_fd->state1 = HiDesc;
+	        rx_fd->state2 = 0x00000000;
+	        rx_fd->end = cpu_to_le32(0xbabeface);
+	        rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
+		// FIXME: return value verifiee mais traitement suspect
+		if (try_get_rx_skb(dpriv, dev) >= 0)
+			dpriv->rx_dirty++;
+		(rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma +
+					(++i%RX_RING_SIZE)*sizeof(*rx_fd));
+	} while (i < RX_RING_SIZE);
+
+	return 0;
+
+err_free_dma_tx:
+	pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
+err_free_dma_rx:
+	pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
+err_out:
+	return -ENOMEM;
+}
+
+static void __devexit dscc4_remove_one(struct pci_dev *pdev)
+{
+	struct dscc4_pci_priv *ppriv;
+	struct dscc4_dev_priv *root;
+	void __iomem *ioaddr;
+	int i;
+
+	ppriv = pci_get_drvdata(pdev);
+	root = ppriv->root;
+
+	ioaddr = root->base_addr;
+
+	dscc4_pci_reset(pdev, ioaddr);
+
+	free_irq(pdev->irq, root);
+	pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
+			    ppriv->iqcfg_dma);
+	for (i = 0; i < dev_per_card; i++) {
+		struct dscc4_dev_priv *dpriv = root + i;
+
+		dscc4_release_ring(dpriv);
+		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+				    dpriv->iqrx, dpriv->iqrx_dma);
+		pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+				    dpriv->iqtx, dpriv->iqtx_dma);
+	}
+
+	dscc4_free1(pdev);
+
+	iounmap(ioaddr);
+
+	pci_release_region(pdev, 1);
+	pci_release_region(pdev, 0);
+
+	pci_disable_device(pdev);
+}
+
+static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
+	unsigned short parity)
+{
+	struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
+
+	if (encoding != ENCODING_NRZ &&
+	    encoding != ENCODING_NRZI &&
+	    encoding != ENCODING_FM_MARK &&
+	    encoding != ENCODING_FM_SPACE &&
+	    encoding != ENCODING_MANCHESTER)
+		return -EINVAL;
+
+	if (parity != PARITY_NONE &&
+	    parity != PARITY_CRC16_PR0_CCITT &&
+	    parity != PARITY_CRC16_PR1_CCITT &&
+	    parity != PARITY_CRC32_PR0_CCITT &&
+	    parity != PARITY_CRC32_PR1_CCITT)
+		return -EINVAL;
+
+        dpriv->encoding = encoding;
+        dpriv->parity = parity;
+	return 0;
+}
+
+#ifndef MODULE
+static int __init dscc4_setup(char *str)
+{
+	int *args[] = { &debug, &quartz, NULL }, **p = args;
+
+	while (*p && (get_option(&str, *p) == 2))
+		p++;
+	return 1;
+}
+
+__setup("dscc4.setup=", dscc4_setup);
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
+	{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
+	        PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
+
+static struct pci_driver dscc4_driver = {
+	.name		= DRV_NAME,
+	.id_table	= dscc4_pci_tbl,
+	.probe		= dscc4_init_one,
+	.remove		= __devexit_p(dscc4_remove_one),
+};
+
+static int __init dscc4_init_module(void)
+{
+	return pci_register_driver(&dscc4_driver);
+}
+
+static void __exit dscc4_cleanup_module(void)
+{
+	pci_unregister_driver(&dscc4_driver);
+}
+
+module_init(dscc4_init_module);
+module_exit(dscc4_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.c
new file mode 100644
index 0000000..3710427
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.c
@@ -0,0 +1,2680 @@
+/*
+ *      FarSync WAN driver for Linux (2.6.x kernel version)
+ *
+ *      Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
+ *
+ *      Copyright (C) 2001-2004 FarSite Communications Ltd.
+ *      www.farsite.co.uk
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      Author:      R.J.Dunlop    <bob.dunlop@farsite.co.uk>
+ *      Maintainer:  Kevin Curtis  <kevin.curtis@farsite.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/if.h>
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "farsync.h"
+
+/*
+ *      Module info
+ */
+MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
+MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
+MODULE_LICENSE("GPL");
+
+/*      Driver configuration and global parameters
+ *      ==========================================
+ */
+
+/*      Number of ports (per card) and cards supported
+ */
+#define FST_MAX_PORTS           4
+#define FST_MAX_CARDS           32
+
+/*      Default parameters for the link
+ */
+#define FST_TX_QUEUE_LEN        100	/* At 8Mbps a longer queue length is
+					 * useful */
+#define FST_TXQ_DEPTH           16	/* This one is for the buffering
+					 * of frames on the way down to the card
+					 * so that we can keep the card busy
+					 * and maximise throughput
+					 */
+#define FST_HIGH_WATER_MARK     12	/* Point at which we flow control
+					 * network layer */
+#define FST_LOW_WATER_MARK      8	/* Point at which we remove flow
+					 * control from network layer */
+#define FST_MAX_MTU             8000	/* Huge but possible */
+#define FST_DEF_MTU             1500	/* Common sane value */
+
+#define FST_TX_TIMEOUT          (2*HZ)
+
+#ifdef ARPHRD_RAWHDLC
+#define ARPHRD_MYTYPE   ARPHRD_RAWHDLC	/* Raw frames */
+#else
+#define ARPHRD_MYTYPE   ARPHRD_HDLC	/* Cisco-HDLC (keepalives etc) */
+#endif
+
+/*
+ * Modules parameters and associated variables
+ */
+static int fst_txq_low = FST_LOW_WATER_MARK;
+static int fst_txq_high = FST_HIGH_WATER_MARK;
+static int fst_max_reads = 7;
+static int fst_excluded_cards = 0;
+static int fst_excluded_list[FST_MAX_CARDS];
+
+module_param(fst_txq_low, int, 0);
+module_param(fst_txq_high, int, 0);
+module_param(fst_max_reads, int, 0);
+module_param(fst_excluded_cards, int, 0);
+module_param_array(fst_excluded_list, int, NULL, 0);
+
+/*      Card shared memory layout
+ *      =========================
+ */
+#pragma pack(1)
+
+/*      This information is derived in part from the FarSite FarSync Smc.h
+ *      file. Unfortunately various name clashes and the non-portability of the
+ *      bit field declarations in that file have meant that I have chosen to
+ *      recreate the information here.
+ *
+ *      The SMC (Shared Memory Configuration) has a version number that is
+ *      incremented every time there is a significant change. This number can
+ *      be used to check that we have not got out of step with the firmware
+ *      contained in the .CDE files.
+ */
+#define SMC_VERSION 24
+
+#define FST_MEMSIZE 0x100000	/* Size of card memory (1Mb) */
+
+#define SMC_BASE 0x00002000L	/* Base offset of the shared memory window main
+				 * configuration structure */
+#define BFM_BASE 0x00010000L	/* Base offset of the shared memory window DMA
+				 * buffers */
+
+#define LEN_TX_BUFFER 8192	/* Size of packet buffers */
+#define LEN_RX_BUFFER 8192
+
+#define LEN_SMALL_TX_BUFFER 256	/* Size of obsolete buffs used for DOS diags */
+#define LEN_SMALL_RX_BUFFER 256
+
+#define NUM_TX_BUFFER 2		/* Must be power of 2. Fixed by firmware */
+#define NUM_RX_BUFFER 8
+
+/* Interrupt retry time in milliseconds */
+#define INT_RETRY_TIME 2
+
+/*      The Am186CH/CC processors support a SmartDMA mode using circular pools
+ *      of buffer descriptors. The structure is almost identical to that used
+ *      in the LANCE Ethernet controllers. Details available as PDF from the
+ *      AMD web site: http://www.amd.com/products/epd/processors/\
+ *                    2.16bitcont/3.am186cxfa/a21914/21914.pdf
+ */
+struct txdesc {			/* Transmit descriptor */
+	volatile u16 ladr;	/* Low order address of packet. This is a
+				 * linear address in the Am186 memory space
+				 */
+	volatile u8 hadr;	/* High order address. Low 4 bits only, high 4
+				 * bits must be zero
+				 */
+	volatile u8 bits;	/* Status and config */
+	volatile u16 bcnt;	/* 2s complement of packet size in low 15 bits.
+				 * Transmit terminal count interrupt enable in
+				 * top bit.
+				 */
+	u16 unused;		/* Not used in Tx */
+};
+
+struct rxdesc {			/* Receive descriptor */
+	volatile u16 ladr;	/* Low order address of packet */
+	volatile u8 hadr;	/* High order address */
+	volatile u8 bits;	/* Status and config */
+	volatile u16 bcnt;	/* 2s complement of buffer size in low 15 bits.
+				 * Receive terminal count interrupt enable in
+				 * top bit.
+				 */
+	volatile u16 mcnt;	/* Message byte count (15 bits) */
+};
+
+/* Convert a length into the 15 bit 2's complement */
+/* #define cnv_bcnt(len)   (( ~(len) + 1 ) & 0x7FFF ) */
+/* Since we need to set the high bit to enable the completion interrupt this
+ * can be made a lot simpler
+ */
+#define cnv_bcnt(len)   (-(len))
+
+/* Status and config bits for the above */
+#define DMA_OWN         0x80	/* SmartDMA owns the descriptor */
+#define TX_STP          0x02	/* Tx: start of packet */
+#define TX_ENP          0x01	/* Tx: end of packet */
+#define RX_ERR          0x40	/* Rx: error (OR of next 4 bits) */
+#define RX_FRAM         0x20	/* Rx: framing error */
+#define RX_OFLO         0x10	/* Rx: overflow error */
+#define RX_CRC          0x08	/* Rx: CRC error */
+#define RX_HBUF         0x04	/* Rx: buffer error */
+#define RX_STP          0x02	/* Rx: start of packet */
+#define RX_ENP          0x01	/* Rx: end of packet */
+
+/* Interrupts from the card are caused by various events which are presented
+ * in a circular buffer as several events may be processed on one physical int
+ */
+#define MAX_CIRBUFF     32
+
+struct cirbuff {
+	u8 rdindex;		/* read, then increment and wrap */
+	u8 wrindex;		/* write, then increment and wrap */
+	u8 evntbuff[MAX_CIRBUFF];
+};
+
+/* Interrupt event codes.
+ * Where appropriate the two low order bits indicate the port number
+ */
+#define CTLA_CHG        0x18	/* Control signal changed */
+#define CTLB_CHG        0x19
+#define CTLC_CHG        0x1A
+#define CTLD_CHG        0x1B
+
+#define INIT_CPLT       0x20	/* Initialisation complete */
+#define INIT_FAIL       0x21	/* Initialisation failed */
+
+#define ABTA_SENT       0x24	/* Abort sent */
+#define ABTB_SENT       0x25
+#define ABTC_SENT       0x26
+#define ABTD_SENT       0x27
+
+#define TXA_UNDF        0x28	/* Transmission underflow */
+#define TXB_UNDF        0x29
+#define TXC_UNDF        0x2A
+#define TXD_UNDF        0x2B
+
+#define F56_INT         0x2C
+#define M32_INT         0x2D
+
+#define TE1_ALMA        0x30
+
+/* Port physical configuration. See farsync.h for field values */
+struct port_cfg {
+	u16 lineInterface;	/* Physical interface type */
+	u8 x25op;		/* Unused at present */
+	u8 internalClock;	/* 1 => internal clock, 0 => external */
+	u8 transparentMode;	/* 1 => on, 0 => off */
+	u8 invertClock;		/* 0 => normal, 1 => inverted */
+	u8 padBytes[6];		/* Padding */
+	u32 lineSpeed;		/* Speed in bps */
+};
+
+/* TE1 port physical configuration */
+struct su_config {
+	u32 dataRate;
+	u8 clocking;
+	u8 framing;
+	u8 structure;
+	u8 interface;
+	u8 coding;
+	u8 lineBuildOut;
+	u8 equalizer;
+	u8 transparentMode;
+	u8 loopMode;
+	u8 range;
+	u8 txBufferMode;
+	u8 rxBufferMode;
+	u8 startingSlot;
+	u8 losThreshold;
+	u8 enableIdleCode;
+	u8 idleCode;
+	u8 spare[44];
+};
+
+/* TE1 Status */
+struct su_status {
+	u32 receiveBufferDelay;
+	u32 framingErrorCount;
+	u32 codeViolationCount;
+	u32 crcErrorCount;
+	u32 lineAttenuation;
+	u8 portStarted;
+	u8 lossOfSignal;
+	u8 receiveRemoteAlarm;
+	u8 alarmIndicationSignal;
+	u8 spare[40];
+};
+
+/* Finally sling all the above together into the shared memory structure.
+ * Sorry it's a hodge podge of arrays, structures and unused bits, it's been
+ * evolving under NT for some time so I guess we're stuck with it.
+ * The structure starts at offset SMC_BASE.
+ * See farsync.h for some field values.
+ */
+struct fst_shared {
+	/* DMA descriptor rings */
+	struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER];
+	struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
+
+	/* Obsolete small buffers */
+	u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
+	u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
+
+	u8 taskStatus;		/* 0x00 => initialising, 0x01 => running,
+				 * 0xFF => halted
+				 */
+
+	u8 interruptHandshake;	/* Set to 0x01 by adapter to signal interrupt,
+				 * set to 0xEE by host to acknowledge interrupt
+				 */
+
+	u16 smcVersion;		/* Must match SMC_VERSION */
+
+	u32 smcFirmwareVersion;	/* 0xIIVVRRBB where II = product ID, VV = major
+				 * version, RR = revision and BB = build
+				 */
+
+	u16 txa_done;		/* Obsolete completion flags */
+	u16 rxa_done;
+	u16 txb_done;
+	u16 rxb_done;
+	u16 txc_done;
+	u16 rxc_done;
+	u16 txd_done;
+	u16 rxd_done;
+
+	u16 mailbox[4];		/* Diagnostics mailbox. Not used */
+
+	struct cirbuff interruptEvent;	/* interrupt causes */
+
+	u32 v24IpSts[FST_MAX_PORTS];	/* V.24 control input status */
+	u32 v24OpSts[FST_MAX_PORTS];	/* V.24 control output status */
+
+	struct port_cfg portConfig[FST_MAX_PORTS];
+
+	u16 clockStatus[FST_MAX_PORTS];	/* lsb: 0=> present, 1=> absent */
+
+	u16 cableStatus;	/* lsb: 0=> present, 1=> absent */
+
+	u16 txDescrIndex[FST_MAX_PORTS];	/* transmit descriptor ring index */
+	u16 rxDescrIndex[FST_MAX_PORTS];	/* receive descriptor ring index */
+
+	u16 portMailbox[FST_MAX_PORTS][2];	/* command, modifier */
+	u16 cardMailbox[4];	/* Not used */
+
+	/* Number of times the card thinks the host has
+	 * missed an interrupt by not acknowledging
+	 * within 2mS (I guess NT has problems)
+	 */
+	u32 interruptRetryCount;
+
+	/* Driver private data used as an ID. We'll not
+	 * use this as I'd rather keep such things
+	 * in main memory rather than on the PCI bus
+	 */
+	u32 portHandle[FST_MAX_PORTS];
+
+	/* Count of Tx underflows for stats */
+	u32 transmitBufferUnderflow[FST_MAX_PORTS];
+
+	/* Debounced V.24 control input status */
+	u32 v24DebouncedSts[FST_MAX_PORTS];
+
+	/* Adapter debounce timers. Don't touch */
+	u32 ctsTimer[FST_MAX_PORTS];
+	u32 ctsTimerRun[FST_MAX_PORTS];
+	u32 dcdTimer[FST_MAX_PORTS];
+	u32 dcdTimerRun[FST_MAX_PORTS];
+
+	u32 numberOfPorts;	/* Number of ports detected at startup */
+
+	u16 _reserved[64];
+
+	u16 cardMode;		/* Bit-mask to enable features:
+				 * Bit 0: 1 enables LED identify mode
+				 */
+
+	u16 portScheduleOffset;
+
+	struct su_config suConfig;	/* TE1 Bits */
+	struct su_status suStatus;
+
+	u32 endOfSmcSignature;	/* endOfSmcSignature MUST be the last member of
+				 * the structure and marks the end of shared
+				 * memory. Adapter code initializes it as
+				 * END_SIG.
+				 */
+};
+
+/* endOfSmcSignature value */
+#define END_SIG                 0x12345678
+
+/* Mailbox values. (portMailbox) */
+#define NOP             0	/* No operation */
+#define ACK             1	/* Positive acknowledgement to PC driver */
+#define NAK             2	/* Negative acknowledgement to PC driver */
+#define STARTPORT       3	/* Start an HDLC port */
+#define STOPPORT        4	/* Stop an HDLC port */
+#define ABORTTX         5	/* Abort the transmitter for a port */
+#define SETV24O         6	/* Set V24 outputs */
+
+/* PLX Chip Register Offsets */
+#define CNTRL_9052      0x50	/* Control Register */
+#define CNTRL_9054      0x6c	/* Control Register */
+
+#define INTCSR_9052     0x4c	/* Interrupt control/status register */
+#define INTCSR_9054     0x68	/* Interrupt control/status register */
+
+/* 9054 DMA Registers */
+/*
+ * Note that we will be using DMA Channel 0 for copying rx data
+ * and Channel 1 for copying tx data
+ */
+#define DMAMODE0        0x80
+#define DMAPADR0        0x84
+#define DMALADR0        0x88
+#define DMASIZ0         0x8c
+#define DMADPR0         0x90
+#define DMAMODE1        0x94
+#define DMAPADR1        0x98
+#define DMALADR1        0x9c
+#define DMASIZ1         0xa0
+#define DMADPR1         0xa4
+#define DMACSR0         0xa8
+#define DMACSR1         0xa9
+#define DMAARB          0xac
+#define DMATHR          0xb0
+#define DMADAC0         0xb4
+#define DMADAC1         0xb8
+#define DMAMARBR        0xac
+
+#define FST_MIN_DMA_LEN 64
+#define FST_RX_DMA_INT  0x01
+#define FST_TX_DMA_INT  0x02
+#define FST_CARD_INT    0x04
+
+/* Larger buffers are positioned in memory at offset BFM_BASE */
+struct buf_window {
+	u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
+	u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
+};
+
+/* Calculate offset of a buffer object within the shared memory window */
+#define BUF_OFFSET(X)   (BFM_BASE + offsetof(struct buf_window, X))
+
+#pragma pack()
+
+/*      Device driver private information
+ *      =================================
+ */
+/*      Per port (line or channel) information
+ */
+struct fst_port_info {
+        struct net_device *dev; /* Device struct - must be first */
+	struct fst_card_info *card;	/* Card we're associated with */
+	int index;		/* Port index on the card */
+	int hwif;		/* Line hardware (lineInterface copy) */
+	int run;		/* Port is running */
+	int mode;		/* Normal or FarSync raw */
+	int rxpos;		/* Next Rx buffer to use */
+	int txpos;		/* Next Tx buffer to use */
+	int txipos;		/* Next Tx buffer to check for free */
+	int start;		/* Indication of start/stop to network */
+	/*
+	 * A sixteen entry transmit queue
+	 */
+	int txqs;		/* index to get next buffer to tx */
+	int txqe;		/* index to queue next packet */
+	struct sk_buff *txq[FST_TXQ_DEPTH];	/* The queue */
+	int rxqdepth;
+};
+
+/*      Per card information
+ */
+struct fst_card_info {
+	char __iomem *mem;	/* Card memory mapped to kernel space */
+	char __iomem *ctlmem;	/* Control memory for PCI cards */
+	unsigned int phys_mem;	/* Physical memory window address */
+	unsigned int phys_ctlmem;	/* Physical control memory address */
+	unsigned int irq;	/* Interrupt request line number */
+	unsigned int nports;	/* Number of serial ports */
+	unsigned int type;	/* Type index of card */
+	unsigned int state;	/* State of card */
+	spinlock_t card_lock;	/* Lock for SMP access */
+	unsigned short pci_conf;	/* PCI card config in I/O space */
+	/* Per port info */
+	struct fst_port_info ports[FST_MAX_PORTS];
+	struct pci_dev *device;	/* Information about the pci device */
+	int card_no;		/* Inst of the card on the system */
+	int family;		/* TxP or TxU */
+	int dmarx_in_progress;
+	int dmatx_in_progress;
+	unsigned long int_count;
+	unsigned long int_time_ave;
+	void *rx_dma_handle_host;
+	dma_addr_t rx_dma_handle_card;
+	void *tx_dma_handle_host;
+	dma_addr_t tx_dma_handle_card;
+	struct sk_buff *dma_skb_rx;
+	struct fst_port_info *dma_port_rx;
+	struct fst_port_info *dma_port_tx;
+	int dma_len_rx;
+	int dma_len_tx;
+	int dma_txpos;
+	int dma_rxpos;
+};
+
+/* Convert an HDLC device pointer into a port info pointer and similar */
+#define dev_to_port(D)  (dev_to_hdlc(D)->priv)
+#define port_to_dev(P)  ((P)->dev)
+
+
+/*
+ *      Shared memory window access macros
+ *
+ *      We have a nice memory based structure above, which could be directly
+ *      mapped on i386 but might not work on other architectures unless we use
+ *      the readb,w,l and writeb,w,l macros. Unfortunately these macros take
+ *      physical offsets so we have to convert. The only saving grace is that
+ *      this should all collapse back to a simple indirection eventually.
+ */
+#define WIN_OFFSET(X)   ((long)&(((struct fst_shared *)SMC_BASE)->X))
+
+#define FST_RDB(C,E)    readb ((C)->mem + WIN_OFFSET(E))
+#define FST_RDW(C,E)    readw ((C)->mem + WIN_OFFSET(E))
+#define FST_RDL(C,E)    readl ((C)->mem + WIN_OFFSET(E))
+
+#define FST_WRB(C,E,B)  writeb ((B), (C)->mem + WIN_OFFSET(E))
+#define FST_WRW(C,E,W)  writew ((W), (C)->mem + WIN_OFFSET(E))
+#define FST_WRL(C,E,L)  writel ((L), (C)->mem + WIN_OFFSET(E))
+
+/*
+ *      Debug support
+ */
+#if FST_DEBUG
+
+static int fst_debug_mask = { FST_DEBUG };
+
+/* Most common debug activity is to print something if the corresponding bit
+ * is set in the debug mask. Note: this uses a non-ANSI extension in GCC to
+ * support variable numbers of macro parameters. The inverted if prevents us
+ * eating someone else's else clause.
+ */
+#define dbg(F, fmt, args...)					\
+do {								\
+	if (fst_debug_mask & (F))				\
+		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
+} while (0)
+#else
+#define dbg(F, fmt, args...)					\
+do {								\
+	if (0)							\
+		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
+} while (0)
+#endif
+
+/*
+ *      PCI ID lookup table
+ */
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
+
+	{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID, 
+	 PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
+	{0,}			/* End */
+};
+
+MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
+
+/*
+ *      Device Driver Work Queues
+ *
+ *      So that we don't spend too much time processing events in the 
+ *      Interrupt Service routine, we will declare a work queue per Card 
+ *      and make the ISR schedule a task in the queue for later execution.
+ *      In the 2.4 Kernel we used to use the immediate queue for BH's
+ *      Now that they are gone, tasklets seem to be much better than work 
+ *      queues.
+ */
+
+static void do_bottom_half_tx(struct fst_card_info *card);
+static void do_bottom_half_rx(struct fst_card_info *card);
+static void fst_process_tx_work_q(unsigned long work_q);
+static void fst_process_int_work_q(unsigned long work_q);
+
+static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
+static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+
+static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
+static spinlock_t fst_work_q_lock;
+static u64 fst_work_txq;
+static u64 fst_work_intq;
+
+static void
+fst_q_work_item(u64 * queue, int card_index)
+{
+	unsigned long flags;
+	u64 mask;
+
+	/*
+	 * Grab the queue exclusively
+	 */
+	spin_lock_irqsave(&fst_work_q_lock, flags);
+
+	/*
+	 * Making an entry in the queue is simply a matter of setting
+	 * a bit for the card indicating that there is work to do in the
+	 * bottom half for the card.  Note the limitation of 64 cards.
+	 * That ought to be enough
+	 */
+	mask = 1 << card_index;
+	*queue |= mask;
+	spin_unlock_irqrestore(&fst_work_q_lock, flags);
+}
+
+static void
+fst_process_tx_work_q(unsigned long /*void **/work_q)
+{
+	unsigned long flags;
+	u64 work_txq;
+	int i;
+
+	/*
+	 * Grab the queue exclusively
+	 */
+	dbg(DBG_TX, "fst_process_tx_work_q\n");
+	spin_lock_irqsave(&fst_work_q_lock, flags);
+	work_txq = fst_work_txq;
+	fst_work_txq = 0;
+	spin_unlock_irqrestore(&fst_work_q_lock, flags);
+
+	/*
+	 * Call the bottom half for each card with work waiting
+	 */
+	for (i = 0; i < FST_MAX_CARDS; i++) {
+		if (work_txq & 0x01) {
+			if (fst_card_array[i] != NULL) {
+				dbg(DBG_TX, "Calling tx bh for card %d\n", i);
+				do_bottom_half_tx(fst_card_array[i]);
+			}
+		}
+		work_txq = work_txq >> 1;
+	}
+}
+
+static void
+fst_process_int_work_q(unsigned long /*void **/work_q)
+{
+	unsigned long flags;
+	u64 work_intq;
+	int i;
+
+	/*
+	 * Grab the queue exclusively
+	 */
+	dbg(DBG_INTR, "fst_process_int_work_q\n");
+	spin_lock_irqsave(&fst_work_q_lock, flags);
+	work_intq = fst_work_intq;
+	fst_work_intq = 0;
+	spin_unlock_irqrestore(&fst_work_q_lock, flags);
+
+	/*
+	 * Call the bottom half for each card with work waiting
+	 */
+	for (i = 0; i < FST_MAX_CARDS; i++) {
+		if (work_intq & 0x01) {
+			if (fst_card_array[i] != NULL) {
+				dbg(DBG_INTR,
+				    "Calling rx & tx bh for card %d\n", i);
+				do_bottom_half_rx(fst_card_array[i]);
+				do_bottom_half_tx(fst_card_array[i]);
+			}
+		}
+		work_intq = work_intq >> 1;
+	}
+}
+
+/*      Card control functions
+ *      ======================
+ */
+/*      Place the processor in reset state
+ *
+ * Used to be a simple write to card control space but a glitch in the latest
+ * AMD Am186CH processor means that we now have to do it by asserting and de-
+ * asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
+ * at offset 9052_CNTRL.  Note the updates for the TXU.
+ */
+static inline void
+fst_cpureset(struct fst_card_info *card)
+{
+	unsigned char interrupt_line_register;
+	unsigned long j = jiffies + 1;
+	unsigned int regval;
+
+	if (card->family == FST_FAMILY_TXU) {
+		if (pci_read_config_byte
+		    (card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
+			dbg(DBG_ASS,
+			    "Error in reading interrupt line register\n");
+		}
+		/*
+		 * Assert PLX software reset and Am186 hardware reset
+		 * and then deassert the PLX software reset but 186 still in reset
+		 */
+		outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
+		outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+		/*
+		 * We are delaying here to allow the 9054 to reset itself
+		 */
+		j = jiffies + 1;
+		while (jiffies < j)
+			/* Do nothing */ ;
+		outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
+		/*
+		 * We are delaying here to allow the 9054 to reload its eeprom
+		 */
+		j = jiffies + 1;
+		while (jiffies < j)
+			/* Do nothing */ ;
+		outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+
+		if (pci_write_config_byte
+		    (card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
+			dbg(DBG_ASS,
+			    "Error in writing interrupt line register\n");
+		}
+
+	} else {
+		regval = inl(card->pci_conf + CNTRL_9052);
+
+		outl(regval | 0x40000000, card->pci_conf + CNTRL_9052);
+		outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052);
+	}
+}
+
+/*      Release the processor from reset
+ */
+static inline void
+fst_cpurelease(struct fst_card_info *card)
+{
+	if (card->family == FST_FAMILY_TXU) {
+		/*
+		 * Force posted writes to complete
+		 */
+		(void) readb(card->mem);
+
+		/*
+		 * Release LRESET DO = 1
+		 * Then release Local Hold, DO = 1
+		 */
+		outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
+		outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
+	} else {
+		(void) readb(card->ctlmem);
+	}
+}
+
+/*      Clear the cards interrupt flag
+ */
+static inline void
+fst_clear_intr(struct fst_card_info *card)
+{
+	if (card->family == FST_FAMILY_TXU) {
+		(void) readb(card->ctlmem);
+	} else {
+		/* Poke the appropriate PLX chip register (same as enabling interrupts)
+		 */
+		outw(0x0543, card->pci_conf + INTCSR_9052);
+	}
+}
+
+/*      Enable card interrupts
+ */
+static inline void
+fst_enable_intr(struct fst_card_info *card)
+{
+	if (card->family == FST_FAMILY_TXU) {
+		outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
+	} else {
+		outw(0x0543, card->pci_conf + INTCSR_9052);
+	}
+}
+
+/*      Disable card interrupts
+ */
+static inline void
+fst_disable_intr(struct fst_card_info *card)
+{
+	if (card->family == FST_FAMILY_TXU) {
+		outl(0x00000000, card->pci_conf + INTCSR_9054);
+	} else {
+		outw(0x0000, card->pci_conf + INTCSR_9052);
+	}
+}
+
+/*      Process the result of trying to pass a received frame up the stack
+ */
+static void
+fst_process_rx_status(int rx_status, char *name)
+{
+	switch (rx_status) {
+	case NET_RX_SUCCESS:
+		{
+			/*
+			 * Nothing to do here
+			 */
+			break;
+		}
+	case NET_RX_DROP:
+		{
+			dbg(DBG_ASS, "%s: Received packet dropped\n", name);
+			break;
+		}
+	}
+}
+
+/*      Initilaise DMA for PLX 9054
+ */
+static inline void
+fst_init_dma(struct fst_card_info *card)
+{
+	/*
+	 * This is only required for the PLX 9054
+	 */
+	if (card->family == FST_FAMILY_TXU) {
+	        pci_set_master(card->device);
+		outl(0x00020441, card->pci_conf + DMAMODE0);
+		outl(0x00020441, card->pci_conf + DMAMODE1);
+		outl(0x0, card->pci_conf + DMATHR);
+	}
+}
+
+/*      Tx dma complete interrupt
+ */
+static void
+fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
+		    int len, int txpos)
+{
+	struct net_device *dev = port_to_dev(port);
+
+	/*
+	 * Everything is now set, just tell the card to go
+	 */
+	dbg(DBG_TX, "fst_tx_dma_complete\n");
+	FST_WRB(card, txDescrRing[port->index][txpos].bits,
+		DMA_OWN | TX_STP | TX_ENP);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += len;
+	dev->trans_start = jiffies;
+}
+
+/*
+ * Mark it for our own raw sockets interface
+ */
+static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	skb->dev = dev;
+	skb_reset_mac_header(skb);
+	skb->pkt_type = PACKET_HOST;
+	return htons(ETH_P_CUST);
+}
+
+/*      Rx dma complete interrupt
+ */
+static void
+fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
+		    int len, struct sk_buff *skb, int rxp)
+{
+	struct net_device *dev = port_to_dev(port);
+	int pi;
+	int rx_status;
+
+	dbg(DBG_TX, "fst_rx_dma_complete\n");
+	pi = port->index;
+	memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
+
+	/* Reset buffer descriptor */
+	FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+	/* Update stats */
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += len;
+
+	/* Push upstream */
+	dbg(DBG_RX, "Pushing the frame up the stack\n");
+	if (port->mode == FST_RAW)
+		skb->protocol = farsync_type_trans(skb, dev);
+	else
+		skb->protocol = hdlc_type_trans(skb, dev);
+	rx_status = netif_rx(skb);
+	fst_process_rx_status(rx_status, port_to_dev(port)->name);
+	if (rx_status == NET_RX_DROP)
+		dev->stats.rx_dropped++;
+}
+
+/*
+ *      Receive a frame through the DMA
+ */
+static inline void
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+	   dma_addr_t mem, int len)
+{
+	/*
+	 * This routine will setup the DMA and start it
+	 */
+
+	dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+	    (unsigned long) skb, (unsigned long) mem, len);
+	if (card->dmarx_in_progress) {
+		dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
+	}
+
+	outl(skb, card->pci_conf + DMAPADR0);	/* Copy to here */
+	outl(mem, card->pci_conf + DMALADR0);	/* from here */
+	outl(len, card->pci_conf + DMASIZ0);	/* for this length */
+	outl(0x00000000c, card->pci_conf + DMADPR0);	/* In this direction */
+
+	/*
+	 * We use the dmarx_in_progress flag to flag the channel as busy
+	 */
+	card->dmarx_in_progress = 1;
+	outb(0x03, card->pci_conf + DMACSR0);	/* Start the transfer */
+}
+
+/*
+ *      Send a frame through the DMA
+ */
+static inline void
+fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
+	   unsigned char *mem, int len)
+{
+	/*
+	 * This routine will setup the DMA and start it.
+	 */
+
+	dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
+	if (card->dmatx_in_progress) {
+		dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
+	}
+
+	outl((unsigned long) skb, card->pci_conf + DMAPADR1);	/* Copy from here */
+	outl((unsigned long) mem, card->pci_conf + DMALADR1);	/* to here */
+	outl(len, card->pci_conf + DMASIZ1);	/* for this length */
+	outl(0x000000004, card->pci_conf + DMADPR1);	/* In this direction */
+
+	/*
+	 * We use the dmatx_in_progress to flag the channel as busy
+	 */
+	card->dmatx_in_progress = 1;
+	outb(0x03, card->pci_conf + DMACSR1);	/* Start the transfer */
+}
+
+/*      Issue a Mailbox command for a port.
+ *      Note we issue them on a fire and forget basis, not expecting to see an
+ *      error and not waiting for completion.
+ */
+static void
+fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
+{
+	struct fst_card_info *card;
+	unsigned short mbval;
+	unsigned long flags;
+	int safety;
+
+	card = port->card;
+	spin_lock_irqsave(&card->card_lock, flags);
+	mbval = FST_RDW(card, portMailbox[port->index][0]);
+
+	safety = 0;
+	/* Wait for any previous command to complete */
+	while (mbval > NAK) {
+		spin_unlock_irqrestore(&card->card_lock, flags);
+		schedule_timeout_uninterruptible(1);
+		spin_lock_irqsave(&card->card_lock, flags);
+
+		if (++safety > 2000) {
+			pr_err("Mailbox safety timeout\n");
+			break;
+		}
+
+		mbval = FST_RDW(card, portMailbox[port->index][0]);
+	}
+	if (safety > 0) {
+		dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
+	}
+	if (mbval == NAK) {
+		dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
+	}
+
+	FST_WRW(card, portMailbox[port->index][0], cmd);
+
+	if (cmd == ABORTTX || cmd == STARTPORT) {
+		port->txpos = 0;
+		port->txipos = 0;
+		port->start = 0;
+	}
+
+	spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/*      Port output signals control
+ */
+static inline void
+fst_op_raise(struct fst_port_info *port, unsigned int outputs)
+{
+	outputs |= FST_RDL(port->card, v24OpSts[port->index]);
+	FST_WRL(port->card, v24OpSts[port->index], outputs);
+
+	if (port->run)
+		fst_issue_cmd(port, SETV24O);
+}
+
+static inline void
+fst_op_lower(struct fst_port_info *port, unsigned int outputs)
+{
+	outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]);
+	FST_WRL(port->card, v24OpSts[port->index], outputs);
+
+	if (port->run)
+		fst_issue_cmd(port, SETV24O);
+}
+
+/*
+ *      Setup port Rx buffers
+ */
+static void
+fst_rx_config(struct fst_port_info *port)
+{
+	int i;
+	int pi;
+	unsigned int offset;
+	unsigned long flags;
+	struct fst_card_info *card;
+
+	pi = port->index;
+	card = port->card;
+	spin_lock_irqsave(&card->card_lock, flags);
+	for (i = 0; i < NUM_RX_BUFFER; i++) {
+		offset = BUF_OFFSET(rxBuffer[pi][i][0]);
+
+		FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
+		FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
+		FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
+		FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
+		FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
+	}
+	port->rxpos = 0;
+	spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/*
+ *      Setup port Tx buffers
+ */
+static void
+fst_tx_config(struct fst_port_info *port)
+{
+	int i;
+	int pi;
+	unsigned int offset;
+	unsigned long flags;
+	struct fst_card_info *card;
+
+	pi = port->index;
+	card = port->card;
+	spin_lock_irqsave(&card->card_lock, flags);
+	for (i = 0; i < NUM_TX_BUFFER; i++) {
+		offset = BUF_OFFSET(txBuffer[pi][i][0]);
+
+		FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
+		FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
+		FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
+		FST_WRB(card, txDescrRing[pi][i].bits, 0);
+	}
+	port->txpos = 0;
+	port->txipos = 0;
+	port->start = 0;
+	spin_unlock_irqrestore(&card->card_lock, flags);
+}
+
+/*      TE1 Alarm change interrupt event
+ */
+static void
+fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
+{
+	u8 los;
+	u8 rra;
+	u8 ais;
+
+	los = FST_RDB(card, suStatus.lossOfSignal);
+	rra = FST_RDB(card, suStatus.receiveRemoteAlarm);
+	ais = FST_RDB(card, suStatus.alarmIndicationSignal);
+
+	if (los) {
+		/*
+		 * Lost the link
+		 */
+		if (netif_carrier_ok(port_to_dev(port))) {
+			dbg(DBG_INTR, "Net carrier off\n");
+			netif_carrier_off(port_to_dev(port));
+		}
+	} else {
+		/*
+		 * Link available
+		 */
+		if (!netif_carrier_ok(port_to_dev(port))) {
+			dbg(DBG_INTR, "Net carrier on\n");
+			netif_carrier_on(port_to_dev(port));
+		}
+	}
+
+	if (los)
+		dbg(DBG_INTR, "Assert LOS Alarm\n");
+	else
+		dbg(DBG_INTR, "De-assert LOS Alarm\n");
+	if (rra)
+		dbg(DBG_INTR, "Assert RRA Alarm\n");
+	else
+		dbg(DBG_INTR, "De-assert RRA Alarm\n");
+
+	if (ais)
+		dbg(DBG_INTR, "Assert AIS Alarm\n");
+	else
+		dbg(DBG_INTR, "De-assert AIS Alarm\n");
+}
+
+/*      Control signal change interrupt event
+ */
+static void
+fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
+{
+	int signals;
+
+	signals = FST_RDL(card, v24DebouncedSts[port->index]);
+
+	if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+		       ? IPSTS_INDICATE : IPSTS_DCD)) {
+		if (!netif_carrier_ok(port_to_dev(port))) {
+			dbg(DBG_INTR, "DCD active\n");
+			netif_carrier_on(port_to_dev(port));
+		}
+	} else {
+		if (netif_carrier_ok(port_to_dev(port))) {
+			dbg(DBG_INTR, "DCD lost\n");
+			netif_carrier_off(port_to_dev(port));
+		}
+	}
+}
+
+/*      Log Rx Errors
+ */
+static void
+fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
+		 unsigned char dmabits, int rxp, unsigned short len)
+{
+	struct net_device *dev = port_to_dev(port);
+
+	/*
+	 * Increment the appropriate error counter
+	 */
+	dev->stats.rx_errors++;
+	if (dmabits & RX_OFLO) {
+		dev->stats.rx_fifo_errors++;
+		dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
+		    card->card_no, port->index, rxp);
+	}
+	if (dmabits & RX_CRC) {
+		dev->stats.rx_crc_errors++;
+		dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
+		    card->card_no, port->index);
+	}
+	if (dmabits & RX_FRAM) {
+		dev->stats.rx_frame_errors++;
+		dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
+		    card->card_no, port->index);
+	}
+	if (dmabits == (RX_STP | RX_ENP)) {
+		dev->stats.rx_length_errors++;
+		dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
+		    len, card->card_no, port->index);
+	}
+}
+
+/*      Rx Error Recovery
+ */
+static void
+fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
+		     unsigned char dmabits, int rxp, unsigned short len)
+{
+	int i;
+	int pi;
+
+	pi = port->index;
+	/* 
+	 * Discard buffer descriptors until we see the start of the
+	 * next frame.  Note that for long frames this could be in
+	 * a subsequent interrupt. 
+	 */
+	i = 0;
+	while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
+		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+		rxp = (rxp+1) % NUM_RX_BUFFER;
+		if (++i > NUM_RX_BUFFER) {
+			dbg(DBG_ASS, "intr_rx: Discarding more bufs"
+			    " than we have\n");
+			break;
+		}
+		dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
+		dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
+	}
+	dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
+
+	/* Discard the terminal buffer */
+	if (!(dmabits & DMA_OWN)) {
+		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+		rxp = (rxp+1) % NUM_RX_BUFFER;
+	}
+	port->rxpos = rxp;
+	return;
+
+}
+
+/*      Rx complete interrupt
+ */
+static void
+fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
+{
+	unsigned char dmabits;
+	int pi;
+	int rxp;
+	int rx_status;
+	unsigned short len;
+	struct sk_buff *skb;
+	struct net_device *dev = port_to_dev(port);
+
+	/* Check we have a buffer to process */
+	pi = port->index;
+	rxp = port->rxpos;
+	dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
+	if (dmabits & DMA_OWN) {
+		dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
+		    pi, rxp);
+		return;
+	}
+	if (card->dmarx_in_progress) {
+		return;
+	}
+
+	/* Get buffer length */
+	len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
+	/* Discard the CRC */
+	len -= 2;
+	if (len == 0) {
+		/*
+		 * This seems to happen on the TE1 interface sometimes
+		 * so throw the frame away and log the event.
+		 */
+		pr_err("Frame received with 0 length. Card %d Port %d\n",
+		       card->card_no, port->index);
+		/* Return descriptor to card */
+		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+		rxp = (rxp+1) % NUM_RX_BUFFER;
+		port->rxpos = rxp;
+		return;
+	}
+
+	/* Check buffer length and for other errors. We insist on one packet
+	 * in one buffer. This simplifies things greatly and since we've
+	 * allocated 8K it shouldn't be a real world limitation
+	 */
+	dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len);
+	if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
+		fst_log_rx_error(card, port, dmabits, rxp, len);
+		fst_recover_rx_error(card, port, dmabits, rxp, len);
+		return;
+	}
+
+	/* Allocate SKB */
+	if ((skb = dev_alloc_skb(len)) == NULL) {
+		dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
+
+		dev->stats.rx_dropped++;
+
+		/* Return descriptor to card */
+		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+		rxp = (rxp+1) % NUM_RX_BUFFER;
+		port->rxpos = rxp;
+		return;
+	}
+
+	/*
+	 * We know the length we need to receive, len.
+	 * It's not worth using the DMA for reads of less than
+	 * FST_MIN_DMA_LEN
+	 */
+
+	if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
+		memcpy_fromio(skb_put(skb, len),
+			      card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
+			      len);
+
+		/* Reset buffer descriptor */
+		FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
+
+		/* Update stats */
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
+
+		/* Push upstream */
+		dbg(DBG_RX, "Pushing frame up the stack\n");
+		if (port->mode == FST_RAW)
+			skb->protocol = farsync_type_trans(skb, dev);
+		else
+			skb->protocol = hdlc_type_trans(skb, dev);
+		rx_status = netif_rx(skb);
+		fst_process_rx_status(rx_status, port_to_dev(port)->name);
+		if (rx_status == NET_RX_DROP)
+			dev->stats.rx_dropped++;
+	} else {
+		card->dma_skb_rx = skb;
+		card->dma_port_rx = port;
+		card->dma_len_rx = len;
+		card->dma_rxpos = rxp;
+		fst_rx_dma(card, card->rx_dma_handle_card,
+			   BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+	}
+	if (rxp != port->rxpos) {
+		dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
+		dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
+	}
+	rxp = (rxp+1) % NUM_RX_BUFFER;
+	port->rxpos = rxp;
+}
+
+/*
+ *      The bottom halfs to the ISR
+ *
+ */
+
+static void
+do_bottom_half_tx(struct fst_card_info *card)
+{
+	struct fst_port_info *port;
+	int pi;
+	int txq_length;
+	struct sk_buff *skb;
+	unsigned long flags;
+	struct net_device *dev;
+
+	/*
+	 *  Find a free buffer for the transmit
+	 *  Step through each port on this card
+	 */
+
+	dbg(DBG_TX, "do_bottom_half_tx\n");
+	for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
+		if (!port->run)
+			continue;
+
+		dev = port_to_dev(port);
+		while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
+			 DMA_OWN) &&
+		       !(card->dmatx_in_progress)) {
+			/*
+			 * There doesn't seem to be a txdone event per-se
+			 * We seem to have to deduce it, by checking the DMA_OWN
+			 * bit on the next buffer we think we can use
+			 */
+			spin_lock_irqsave(&card->card_lock, flags);
+			if ((txq_length = port->txqe - port->txqs) < 0) {
+				/*
+				 * This is the case where one has wrapped and the
+				 * maths gives us a negative number
+				 */
+				txq_length = txq_length + FST_TXQ_DEPTH;
+			}
+			spin_unlock_irqrestore(&card->card_lock, flags);
+			if (txq_length > 0) {
+				/*
+				 * There is something to send
+				 */
+				spin_lock_irqsave(&card->card_lock, flags);
+				skb = port->txq[port->txqs];
+				port->txqs++;
+				if (port->txqs == FST_TXQ_DEPTH) {
+					port->txqs = 0;
+				}
+				spin_unlock_irqrestore(&card->card_lock, flags);
+				/*
+				 * copy the data and set the required indicators on the
+				 * card.
+				 */
+				FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
+					cnv_bcnt(skb->len));
+				if ((skb->len < FST_MIN_DMA_LEN) ||
+				    (card->family == FST_FAMILY_TXP)) {
+					/* Enqueue the packet with normal io */
+					memcpy_toio(card->mem +
+						    BUF_OFFSET(txBuffer[pi]
+							       [port->
+								txpos][0]),
+						    skb->data, skb->len);
+					FST_WRB(card,
+						txDescrRing[pi][port->txpos].
+						bits,
+						DMA_OWN | TX_STP | TX_ENP);
+					dev->stats.tx_packets++;
+					dev->stats.tx_bytes += skb->len;
+					dev->trans_start = jiffies;
+				} else {
+					/* Or do it through dma */
+					memcpy(card->tx_dma_handle_host,
+					       skb->data, skb->len);
+					card->dma_port_tx = port;
+					card->dma_len_tx = skb->len;
+					card->dma_txpos = port->txpos;
+					fst_tx_dma(card,
+						   (char *) card->
+						   tx_dma_handle_card,
+						   (char *)
+						   BUF_OFFSET(txBuffer[pi]
+							      [port->txpos][0]),
+						   skb->len);
+				}
+				if (++port->txpos >= NUM_TX_BUFFER)
+					port->txpos = 0;
+				/*
+				 * If we have flow control on, can we now release it?
+				 */
+				if (port->start) {
+					if (txq_length < fst_txq_low) {
+						netif_wake_queue(port_to_dev
+								 (port));
+						port->start = 0;
+					}
+				}
+				dev_kfree_skb(skb);
+			} else {
+				/*
+				 * Nothing to send so break out of the while loop
+				 */
+				break;
+			}
+		}
+	}
+}
+
+static void
+do_bottom_half_rx(struct fst_card_info *card)
+{
+	struct fst_port_info *port;
+	int pi;
+	int rx_count = 0;
+
+	/* Check for rx completions on all ports on this card */
+	dbg(DBG_RX, "do_bottom_half_rx\n");
+	for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
+		if (!port->run)
+			continue;
+
+		while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
+			 & DMA_OWN) && !(card->dmarx_in_progress)) {
+			if (rx_count > fst_max_reads) {
+				/*
+				 * Don't spend forever in receive processing
+				 * Schedule another event
+				 */
+				fst_q_work_item(&fst_work_intq, card->card_no);
+				tasklet_schedule(&fst_int_task);
+				break;	/* Leave the loop */
+			}
+			fst_intr_rx(card, port);
+			rx_count++;
+		}
+	}
+}
+
+/*
+ *      The interrupt service routine
+ *      Dev_id is our fst_card_info pointer
+ */
+static irqreturn_t
+fst_intr(int dummy, void *dev_id)
+{
+	struct fst_card_info *card = dev_id;
+	struct fst_port_info *port;
+	int rdidx;		/* Event buffer indices */
+	int wridx;
+	int event;		/* Actual event for processing */
+	unsigned int dma_intcsr = 0;
+	unsigned int do_card_interrupt;
+	unsigned int int_retry_count;
+
+	/*
+	 * Check to see if the interrupt was for this card
+	 * return if not
+	 * Note that the call to clear the interrupt is important
+	 */
+	dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
+	if (card->state != FST_RUNNING) {
+		pr_err("Interrupt received for card %d in a non running state (%d)\n",
+		       card->card_no, card->state);
+
+		/* 
+		 * It is possible to really be running, i.e. we have re-loaded
+		 * a running card
+		 * Clear and reprime the interrupt source 
+		 */
+		fst_clear_intr(card);
+		return IRQ_HANDLED;
+	}
+
+	/* Clear and reprime the interrupt source */
+	fst_clear_intr(card);
+
+	/*
+	 * Is the interrupt for this card (handshake == 1)
+	 */
+	do_card_interrupt = 0;
+	if (FST_RDB(card, interruptHandshake) == 1) {
+		do_card_interrupt += FST_CARD_INT;
+		/* Set the software acknowledge */
+		FST_WRB(card, interruptHandshake, 0xEE);
+	}
+	if (card->family == FST_FAMILY_TXU) {
+		/*
+		 * Is it a DMA Interrupt
+		 */
+		dma_intcsr = inl(card->pci_conf + INTCSR_9054);
+		if (dma_intcsr & 0x00200000) {
+			/*
+			 * DMA Channel 0 (Rx transfer complete)
+			 */
+			dbg(DBG_RX, "DMA Rx xfer complete\n");
+			outb(0x8, card->pci_conf + DMACSR0);
+			fst_rx_dma_complete(card, card->dma_port_rx,
+					    card->dma_len_rx, card->dma_skb_rx,
+					    card->dma_rxpos);
+			card->dmarx_in_progress = 0;
+			do_card_interrupt += FST_RX_DMA_INT;
+		}
+		if (dma_intcsr & 0x00400000) {
+			/*
+			 * DMA Channel 1 (Tx transfer complete)
+			 */
+			dbg(DBG_TX, "DMA Tx xfer complete\n");
+			outb(0x8, card->pci_conf + DMACSR1);
+			fst_tx_dma_complete(card, card->dma_port_tx,
+					    card->dma_len_tx, card->dma_txpos);
+			card->dmatx_in_progress = 0;
+			do_card_interrupt += FST_TX_DMA_INT;
+		}
+	}
+
+	/*
+	 * Have we been missing Interrupts
+	 */
+	int_retry_count = FST_RDL(card, interruptRetryCount);
+	if (int_retry_count) {
+		dbg(DBG_ASS, "Card %d int_retry_count is  %d\n",
+		    card->card_no, int_retry_count);
+		FST_WRL(card, interruptRetryCount, 0);
+	}
+
+	if (!do_card_interrupt) {
+		return IRQ_HANDLED;
+	}
+
+	/* Scehdule the bottom half of the ISR */
+	fst_q_work_item(&fst_work_intq, card->card_no);
+	tasklet_schedule(&fst_int_task);
+
+	/* Drain the event queue */
+	rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f;
+	wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f;
+	while (rdidx != wridx) {
+		event = FST_RDB(card, interruptEvent.evntbuff[rdidx]);
+		port = &card->ports[event & 0x03];
+
+		dbg(DBG_INTR, "Processing Interrupt event: %x\n", event);
+
+		switch (event) {
+		case TE1_ALMA:
+			dbg(DBG_INTR, "TE1 Alarm intr\n");
+			if (port->run)
+				fst_intr_te1_alarm(card, port);
+			break;
+
+		case CTLA_CHG:
+		case CTLB_CHG:
+		case CTLC_CHG:
+		case CTLD_CHG:
+			if (port->run)
+				fst_intr_ctlchg(card, port);
+			break;
+
+		case ABTA_SENT:
+		case ABTB_SENT:
+		case ABTC_SENT:
+		case ABTD_SENT:
+			dbg(DBG_TX, "Abort complete port %d\n", port->index);
+			break;
+
+		case TXA_UNDF:
+		case TXB_UNDF:
+		case TXC_UNDF:
+		case TXD_UNDF:
+			/* Difficult to see how we'd get this given that we
+			 * always load up the entire packet for DMA.
+			 */
+			dbg(DBG_TX, "Tx underflow port %d\n", port->index);
+			port_to_dev(port)->stats.tx_errors++;
+			port_to_dev(port)->stats.tx_fifo_errors++;
+			dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
+			    card->card_no, port->index);
+			break;
+
+		case INIT_CPLT:
+			dbg(DBG_INIT, "Card init OK intr\n");
+			break;
+
+		case INIT_FAIL:
+			dbg(DBG_INIT, "Card init FAILED intr\n");
+			card->state = FST_IFAILED;
+			break;
+
+		default:
+			pr_err("intr: unknown card event %d. ignored\n", event);
+			break;
+		}
+
+		/* Bump and wrap the index */
+		if (++rdidx >= MAX_CIRBUFF)
+			rdidx = 0;
+	}
+	FST_WRB(card, interruptEvent.rdindex, rdidx);
+        return IRQ_HANDLED;
+}
+
+/*      Check that the shared memory configuration is one that we can handle
+ *      and that some basic parameters are correct
+ */
+static void
+check_started_ok(struct fst_card_info *card)
+{
+	int i;
+
+	/* Check structure version and end marker */
+	if (FST_RDW(card, smcVersion) != SMC_VERSION) {
+		pr_err("Bad shared memory version %d expected %d\n",
+		       FST_RDW(card, smcVersion), SMC_VERSION);
+		card->state = FST_BADVERSION;
+		return;
+	}
+	if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
+		pr_err("Missing shared memory signature\n");
+		card->state = FST_BADVERSION;
+		return;
+	}
+	/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
+	if ((i = FST_RDB(card, taskStatus)) == 0x01) {
+		card->state = FST_RUNNING;
+	} else if (i == 0xFF) {
+		pr_err("Firmware initialisation failed. Card halted\n");
+		card->state = FST_HALTED;
+		return;
+	} else if (i != 0x00) {
+		pr_err("Unknown firmware status 0x%x\n", i);
+		card->state = FST_HALTED;
+		return;
+	}
+
+	/* Finally check the number of ports reported by firmware against the
+	 * number we assumed at card detection. Should never happen with
+	 * existing firmware etc so we just report it for the moment.
+	 */
+	if (FST_RDL(card, numberOfPorts) != card->nports) {
+		pr_warn("Port count mismatch on card %d.  Firmware thinks %d we say %d\n",
+			card->card_no,
+			FST_RDL(card, numberOfPorts), card->nports);
+	}
+}
+
+static int
+set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
+		   struct fstioc_info *info)
+{
+	int err;
+	unsigned char my_framing;
+
+	/* Set things according to the user set valid flags 
+	 * Several of the old options have been invalidated/replaced by the 
+	 * generic hdlc package.
+	 */
+	err = 0;
+	if (info->valid & FSTVAL_PROTO) {
+		if (info->proto == FST_RAW)
+			port->mode = FST_RAW;
+		else
+			port->mode = FST_GEN_HDLC;
+	}
+
+	if (info->valid & FSTVAL_CABLE)
+		err = -EINVAL;
+
+	if (info->valid & FSTVAL_SPEED)
+		err = -EINVAL;
+
+	if (info->valid & FSTVAL_PHASE)
+		FST_WRB(card, portConfig[port->index].invertClock,
+			info->invertClock);
+	if (info->valid & FSTVAL_MODE)
+		FST_WRW(card, cardMode, info->cardMode);
+	if (info->valid & FSTVAL_TE1) {
+		FST_WRL(card, suConfig.dataRate, info->lineSpeed);
+		FST_WRB(card, suConfig.clocking, info->clockSource);
+		my_framing = FRAMING_E1;
+		if (info->framing == E1)
+			my_framing = FRAMING_E1;
+		if (info->framing == T1)
+			my_framing = FRAMING_T1;
+		if (info->framing == J1)
+			my_framing = FRAMING_J1;
+		FST_WRB(card, suConfig.framing, my_framing);
+		FST_WRB(card, suConfig.structure, info->structure);
+		FST_WRB(card, suConfig.interface, info->interface);
+		FST_WRB(card, suConfig.coding, info->coding);
+		FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut);
+		FST_WRB(card, suConfig.equalizer, info->equalizer);
+		FST_WRB(card, suConfig.transparentMode, info->transparentMode);
+		FST_WRB(card, suConfig.loopMode, info->loopMode);
+		FST_WRB(card, suConfig.range, info->range);
+		FST_WRB(card, suConfig.txBufferMode, info->txBufferMode);
+		FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode);
+		FST_WRB(card, suConfig.startingSlot, info->startingSlot);
+		FST_WRB(card, suConfig.losThreshold, info->losThreshold);
+		if (info->idleCode)
+			FST_WRB(card, suConfig.enableIdleCode, 1);
+		else
+			FST_WRB(card, suConfig.enableIdleCode, 0);
+		FST_WRB(card, suConfig.idleCode, info->idleCode);
+#if FST_DEBUG
+		if (info->valid & FSTVAL_TE1) {
+			printk("Setting TE1 data\n");
+			printk("Line Speed = %d\n", info->lineSpeed);
+			printk("Start slot = %d\n", info->startingSlot);
+			printk("Clock source = %d\n", info->clockSource);
+			printk("Framing = %d\n", my_framing);
+			printk("Structure = %d\n", info->structure);
+			printk("interface = %d\n", info->interface);
+			printk("Coding = %d\n", info->coding);
+			printk("Line build out = %d\n", info->lineBuildOut);
+			printk("Equaliser = %d\n", info->equalizer);
+			printk("Transparent mode = %d\n",
+			       info->transparentMode);
+			printk("Loop mode = %d\n", info->loopMode);
+			printk("Range = %d\n", info->range);
+			printk("Tx Buffer mode = %d\n", info->txBufferMode);
+			printk("Rx Buffer mode = %d\n", info->rxBufferMode);
+			printk("LOS Threshold = %d\n", info->losThreshold);
+			printk("Idle Code = %d\n", info->idleCode);
+		}
+#endif
+	}
+#if FST_DEBUG
+	if (info->valid & FSTVAL_DEBUG) {
+		fst_debug_mask = info->debug;
+	}
+#endif
+
+	return err;
+}
+
+static void
+gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
+		 struct fstioc_info *info)
+{
+	int i;
+
+	memset(info, 0, sizeof (struct fstioc_info));
+
+	i = port->index;
+	info->kernelVersion = LINUX_VERSION_CODE;
+	info->nports = card->nports;
+	info->type = card->type;
+	info->state = card->state;
+	info->proto = FST_GEN_HDLC;
+	info->index = i;
+#if FST_DEBUG
+	info->debug = fst_debug_mask;
+#endif
+
+	/* Only mark information as valid if card is running.
+	 * Copy the data anyway in case it is useful for diagnostics
+	 */
+	info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD)
+#if FST_DEBUG
+	    | FSTVAL_DEBUG
+#endif
+	    ;
+
+	info->lineInterface = FST_RDW(card, portConfig[i].lineInterface);
+	info->internalClock = FST_RDB(card, portConfig[i].internalClock);
+	info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed);
+	info->invertClock = FST_RDB(card, portConfig[i].invertClock);
+	info->v24IpSts = FST_RDL(card, v24IpSts[i]);
+	info->v24OpSts = FST_RDL(card, v24OpSts[i]);
+	info->clockStatus = FST_RDW(card, clockStatus[i]);
+	info->cableStatus = FST_RDW(card, cableStatus);
+	info->cardMode = FST_RDW(card, cardMode);
+	info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
+
+	/*
+	 * The T2U can report cable presence for both A or B
+	 * in bits 0 and 1 of cableStatus.  See which port we are and 
+	 * do the mapping.
+	 */
+	if (card->family == FST_FAMILY_TXU) {
+		if (port->index == 0) {
+			/*
+			 * Port A
+			 */
+			info->cableStatus = info->cableStatus & 1;
+		} else {
+			/*
+			 * Port B
+			 */
+			info->cableStatus = info->cableStatus >> 1;
+			info->cableStatus = info->cableStatus & 1;
+		}
+	}
+	/*
+	 * Some additional bits if we are TE1
+	 */
+	if (card->type == FST_TYPE_TE1) {
+		info->lineSpeed = FST_RDL(card, suConfig.dataRate);
+		info->clockSource = FST_RDB(card, suConfig.clocking);
+		info->framing = FST_RDB(card, suConfig.framing);
+		info->structure = FST_RDB(card, suConfig.structure);
+		info->interface = FST_RDB(card, suConfig.interface);
+		info->coding = FST_RDB(card, suConfig.coding);
+		info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut);
+		info->equalizer = FST_RDB(card, suConfig.equalizer);
+		info->loopMode = FST_RDB(card, suConfig.loopMode);
+		info->range = FST_RDB(card, suConfig.range);
+		info->txBufferMode = FST_RDB(card, suConfig.txBufferMode);
+		info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode);
+		info->startingSlot = FST_RDB(card, suConfig.startingSlot);
+		info->losThreshold = FST_RDB(card, suConfig.losThreshold);
+		if (FST_RDB(card, suConfig.enableIdleCode))
+			info->idleCode = FST_RDB(card, suConfig.idleCode);
+		else
+			info->idleCode = 0;
+		info->receiveBufferDelay =
+		    FST_RDL(card, suStatus.receiveBufferDelay);
+		info->framingErrorCount =
+		    FST_RDL(card, suStatus.framingErrorCount);
+		info->codeViolationCount =
+		    FST_RDL(card, suStatus.codeViolationCount);
+		info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount);
+		info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation);
+		info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal);
+		info->receiveRemoteAlarm =
+		    FST_RDB(card, suStatus.receiveRemoteAlarm);
+		info->alarmIndicationSignal =
+		    FST_RDB(card, suStatus.alarmIndicationSignal);
+	}
+}
+
+static int
+fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
+	      struct ifreq *ifr)
+{
+	sync_serial_settings sync;
+	int i;
+
+	if (ifr->ifr_settings.size != sizeof (sync)) {
+		return -ENOMEM;
+	}
+
+	if (copy_from_user
+	    (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
+		return -EFAULT;
+	}
+
+	if (sync.loopback)
+		return -EINVAL;
+
+	i = port->index;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_IFACE_V35:
+		FST_WRW(card, portConfig[i].lineInterface, V35);
+		port->hwif = V35;
+		break;
+
+	case IF_IFACE_V24:
+		FST_WRW(card, portConfig[i].lineInterface, V24);
+		port->hwif = V24;
+		break;
+
+	case IF_IFACE_X21:
+		FST_WRW(card, portConfig[i].lineInterface, X21);
+		port->hwif = X21;
+		break;
+
+	case IF_IFACE_X21D:
+		FST_WRW(card, portConfig[i].lineInterface, X21D);
+		port->hwif = X21D;
+		break;
+
+	case IF_IFACE_T1:
+		FST_WRW(card, portConfig[i].lineInterface, T1);
+		port->hwif = T1;
+		break;
+
+	case IF_IFACE_E1:
+		FST_WRW(card, portConfig[i].lineInterface, E1);
+		port->hwif = E1;
+		break;
+
+	case IF_IFACE_SYNC_SERIAL:
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	switch (sync.clock_type) {
+	case CLOCK_EXT:
+		FST_WRB(card, portConfig[i].internalClock, EXTCLK);
+		break;
+
+	case CLOCK_INT:
+		FST_WRB(card, portConfig[i].internalClock, INTCLK);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate);
+	return 0;
+}
+
+static int
+fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
+	      struct ifreq *ifr)
+{
+	sync_serial_settings sync;
+	int i;
+
+	/* First check what line type is set, we'll default to reporting X.21
+	 * if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
+	 * changed
+	 */
+	switch (port->hwif) {
+	case E1:
+		ifr->ifr_settings.type = IF_IFACE_E1;
+		break;
+	case T1:
+		ifr->ifr_settings.type = IF_IFACE_T1;
+		break;
+	case V35:
+		ifr->ifr_settings.type = IF_IFACE_V35;
+		break;
+	case V24:
+		ifr->ifr_settings.type = IF_IFACE_V24;
+		break;
+	case X21D:
+		ifr->ifr_settings.type = IF_IFACE_X21D;
+		break;
+	case X21:
+	default:
+		ifr->ifr_settings.type = IF_IFACE_X21;
+		break;
+	}
+	if (ifr->ifr_settings.size == 0) {
+		return 0;	/* only type requested */
+	}
+	if (ifr->ifr_settings.size < sizeof (sync)) {
+		return -ENOMEM;
+	}
+
+	i = port->index;
+	memset(&sync, 0, sizeof(sync));
+	sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
+	/* Lucky card and linux use same encoding here */
+	sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+	    INTCLK ? CLOCK_INT : CLOCK_EXT;
+	sync.loopback = 0;
+
+	if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
+		return -EFAULT;
+	}
+
+	ifr->ifr_settings.size = sizeof (sync);
+	return 0;
+}
+
+static int
+fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct fst_card_info *card;
+	struct fst_port_info *port;
+	struct fstioc_write wrthdr;
+	struct fstioc_info info;
+	unsigned long flags;
+	void *buf;
+
+	dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
+
+	port = dev_to_port(dev);
+	card = port->card;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	switch (cmd) {
+	case FSTCPURESET:
+		fst_cpureset(card);
+		card->state = FST_RESET;
+		return 0;
+
+	case FSTCPURELEASE:
+		fst_cpurelease(card);
+		card->state = FST_STARTING;
+		return 0;
+
+	case FSTWRITE:		/* Code write (download) */
+
+		/* First copy in the header with the length and offset of data
+		 * to write
+		 */
+		if (ifr->ifr_data == NULL) {
+			return -EINVAL;
+		}
+		if (copy_from_user(&wrthdr, ifr->ifr_data,
+				   sizeof (struct fstioc_write))) {
+			return -EFAULT;
+		}
+
+		/* Sanity check the parameters. We don't support partial writes
+		 * when going over the top
+		 */
+		if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
+		    wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
+			return -ENXIO;
+		}
+
+		/* Now copy the data to the card. */
+
+		buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
+				  wrthdr.size);
+		if (IS_ERR(buf))
+			return PTR_ERR(buf);
+
+		memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
+		kfree(buf);
+
+		/* Writes to the memory of a card in the reset state constitute
+		 * a download
+		 */
+		if (card->state == FST_RESET) {
+			card->state = FST_DOWNLOAD;
+		}
+		return 0;
+
+	case FSTGETCONF:
+
+		/* If card has just been started check the shared memory config
+		 * version and marker
+		 */
+		if (card->state == FST_STARTING) {
+			check_started_ok(card);
+
+			/* If everything checked out enable card interrupts */
+			if (card->state == FST_RUNNING) {
+				spin_lock_irqsave(&card->card_lock, flags);
+				fst_enable_intr(card);
+				FST_WRB(card, interruptHandshake, 0xEE);
+				spin_unlock_irqrestore(&card->card_lock, flags);
+			}
+		}
+
+		if (ifr->ifr_data == NULL) {
+			return -EINVAL;
+		}
+
+		gather_conf_info(card, port, &info);
+
+		if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
+			return -EFAULT;
+		}
+		return 0;
+
+	case FSTSETCONF:
+
+		/*
+		 * Most of the settings have been moved to the generic ioctls
+		 * this just covers debug and board ident now
+		 */
+
+		if (card->state != FST_RUNNING) {
+			pr_err("Attempt to configure card %d in non-running state (%d)\n",
+			       card->card_no, card->state);
+			return -EIO;
+		}
+		if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
+			return -EFAULT;
+		}
+
+		return set_conf_from_info(card, port, &info);
+
+	case SIOCWANDEV:
+		switch (ifr->ifr_settings.type) {
+		case IF_GET_IFACE:
+			return fst_get_iface(card, port, ifr);
+
+		case IF_IFACE_SYNC_SERIAL:
+		case IF_IFACE_V35:
+		case IF_IFACE_V24:
+		case IF_IFACE_X21:
+		case IF_IFACE_X21D:
+		case IF_IFACE_T1:
+		case IF_IFACE_E1:
+			return fst_set_iface(card, port, ifr);
+
+		case IF_PROTO_RAW:
+			port->mode = FST_RAW;
+			return 0;
+
+		case IF_GET_PROTO:
+			if (port->mode == FST_RAW) {
+				ifr->ifr_settings.type = IF_PROTO_RAW;
+				return 0;
+			}
+			return hdlc_ioctl(dev, ifr, cmd);
+
+		default:
+			port->mode = FST_GEN_HDLC;
+			dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
+			    ifr->ifr_settings.type);
+			return hdlc_ioctl(dev, ifr, cmd);
+		}
+
+	default:
+		/* Not one of ours. Pass through to HDLC package */
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+static void
+fst_openport(struct fst_port_info *port)
+{
+	int signals;
+	int txq_length;
+
+	/* Only init things if card is actually running. This allows open to
+	 * succeed for downloads etc.
+	 */
+	if (port->card->state == FST_RUNNING) {
+		if (port->run) {
+			dbg(DBG_OPEN, "open: found port already running\n");
+
+			fst_issue_cmd(port, STOPPORT);
+			port->run = 0;
+		}
+
+		fst_rx_config(port);
+		fst_tx_config(port);
+		fst_op_raise(port, OPSTS_RTS | OPSTS_DTR);
+
+		fst_issue_cmd(port, STARTPORT);
+		port->run = 1;
+
+		signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
+		if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+			       ? IPSTS_INDICATE : IPSTS_DCD))
+			netif_carrier_on(port_to_dev(port));
+		else
+			netif_carrier_off(port_to_dev(port));
+
+		txq_length = port->txqe - port->txqs;
+		port->txqe = 0;
+		port->txqs = 0;
+	}
+
+}
+
+static void
+fst_closeport(struct fst_port_info *port)
+{
+	if (port->card->state == FST_RUNNING) {
+		if (port->run) {
+			port->run = 0;
+			fst_op_lower(port, OPSTS_RTS | OPSTS_DTR);
+
+			fst_issue_cmd(port, STOPPORT);
+		} else {
+			dbg(DBG_OPEN, "close: port not running\n");
+		}
+	}
+}
+
+static int
+fst_open(struct net_device *dev)
+{
+	int err;
+	struct fst_port_info *port;
+
+	port = dev_to_port(dev);
+	if (!try_module_get(THIS_MODULE))
+          return -EBUSY;
+
+	if (port->mode != FST_RAW) {
+		err = hdlc_open(dev);
+		if (err) {
+			module_put(THIS_MODULE);
+			return err;
+		}
+	}
+
+	fst_openport(port);
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int
+fst_close(struct net_device *dev)
+{
+	struct fst_port_info *port;
+	struct fst_card_info *card;
+	unsigned char tx_dma_done;
+	unsigned char rx_dma_done;
+
+	port = dev_to_port(dev);
+	card = port->card;
+
+	tx_dma_done = inb(card->pci_conf + DMACSR1);
+	rx_dma_done = inb(card->pci_conf + DMACSR0);
+	dbg(DBG_OPEN,
+	    "Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n",
+	    card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress,
+	    rx_dma_done);
+
+	netif_stop_queue(dev);
+	fst_closeport(dev_to_port(dev));
+	if (port->mode != FST_RAW) {
+		hdlc_close(dev);
+	}
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static int
+fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
+{
+	/*
+	 * Setting currently fixed in FarSync card so we check and forget
+	 */
+	if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
+		return -EINVAL;
+	return 0;
+}
+
+static void
+fst_tx_timeout(struct net_device *dev)
+{
+	struct fst_port_info *port;
+	struct fst_card_info *card;
+
+	port = dev_to_port(dev);
+	card = port->card;
+	dev->stats.tx_errors++;
+	dev->stats.tx_aborted_errors++;
+	dbg(DBG_ASS, "Tx timeout card %d port %d\n",
+	    card->card_no, port->index);
+	fst_issue_cmd(port, ABORTTX);
+
+	dev->trans_start = jiffies;
+	netif_wake_queue(dev);
+	port->start = 0;
+}
+
+static netdev_tx_t
+fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fst_card_info *card;
+	struct fst_port_info *port;
+	unsigned long flags;
+	int txq_length;
+
+	port = dev_to_port(dev);
+	card = port->card;
+	dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
+
+	/* Drop packet with error if we don't have carrier */
+	if (!netif_carrier_ok(dev)) {
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		dev->stats.tx_carrier_errors++;
+		dbg(DBG_ASS,
+		    "Tried to transmit but no carrier on card %d port %d\n",
+		    card->card_no, port->index);
+		return NETDEV_TX_OK;
+	}
+
+	/* Drop it if it's too big! MTU failure ? */
+	if (skb->len > LEN_TX_BUFFER) {
+		dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
+		    LEN_TX_BUFFER);
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * We are always going to queue the packet
+	 * so that the bottom half is the only place we tx from
+	 * Check there is room in the port txq
+	 */
+	spin_lock_irqsave(&card->card_lock, flags);
+	if ((txq_length = port->txqe - port->txqs) < 0) {
+		/*
+		 * This is the case where the next free has wrapped but the
+		 * last used hasn't
+		 */
+		txq_length = txq_length + FST_TXQ_DEPTH;
+	}
+	spin_unlock_irqrestore(&card->card_lock, flags);
+	if (txq_length > fst_txq_high) {
+		/*
+		 * We have got enough buffers in the pipeline.  Ask the network
+		 * layer to stop sending frames down
+		 */
+		netif_stop_queue(dev);
+		port->start = 1;	/* I'm using this to signal stop sent up */
+	}
+
+	if (txq_length == FST_TXQ_DEPTH - 1) {
+		/*
+		 * This shouldn't have happened but such is life
+		 */
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
+		    card->card_no, port->index);
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * queue the buffer
+	 */
+	spin_lock_irqsave(&card->card_lock, flags);
+	port->txq[port->txqe] = skb;
+	port->txqe++;
+	if (port->txqe == FST_TXQ_DEPTH)
+		port->txqe = 0;
+	spin_unlock_irqrestore(&card->card_lock, flags);
+
+	/* Scehdule the bottom half which now does transmit processing */
+	fst_q_work_item(&fst_work_txq, card->card_no);
+	tasklet_schedule(&fst_tx_task);
+
+	return NETDEV_TX_OK;
+}
+
+/*
+ *      Card setup having checked hardware resources.
+ *      Should be pretty bizarre if we get an error here (kernel memory
+ *      exhaustion is one possibility). If we do see a problem we report it
+ *      via a printk and leave the corresponding interface and all that follow
+ *      disabled.
+ */
+static char *type_strings[] __devinitdata = {
+	"no hardware",		/* Should never be seen */
+	"FarSync T2P",
+	"FarSync T4P",
+	"FarSync T1U",
+	"FarSync T2U",
+	"FarSync T4U",
+	"FarSync TE1"
+};
+
+static void __devinit
+fst_init_card(struct fst_card_info *card)
+{
+	int i;
+	int err;
+
+	/* We're working on a number of ports based on the card ID. If the
+	 * firmware detects something different later (should never happen)
+	 * we'll have to revise it in some way then.
+	 */
+	for (i = 0; i < card->nports; i++) {
+                err = register_hdlc_device(card->ports[i].dev);
+                if (err < 0) {
+			int j;
+			pr_err("Cannot register HDLC device for port %d (errno %d)\n",
+			       i, -err);
+			for (j = i; j < card->nports; j++) {
+				free_netdev(card->ports[j].dev);
+				card->ports[j].dev = NULL;
+			}
+                        card->nports = i;
+                        break;
+                }
+	}
+
+	pr_info("%s-%s: %s IRQ%d, %d ports\n",
+		port_to_dev(&card->ports[0])->name,
+		port_to_dev(&card->ports[card->nports - 1])->name,
+		type_strings[card->type], card->irq, card->nports);
+}
+
+static const struct net_device_ops fst_ops = {
+	.ndo_open       = fst_open,
+	.ndo_stop       = fst_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = fst_ioctl,
+	.ndo_tx_timeout = fst_tx_timeout,
+};
+
+/*
+ *      Initialise card when detected.
+ *      Returns 0 to indicate success, or errno otherwise.
+ */
+static int __devinit
+fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int no_of_cards_added = 0;
+	struct fst_card_info *card;
+	int err = 0;
+	int i;
+
+	printk_once(KERN_INFO
+		    pr_fmt("FarSync WAN driver " FST_USER_VERSION
+			   " (c) 2001-2004 FarSite Communications Ltd.\n"));
+#if FST_DEBUG
+	dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
+#endif
+	/*
+	 * We are going to be clever and allow certain cards not to be
+	 * configured.  An exclude list can be provided in /etc/modules.conf
+	 */
+	if (fst_excluded_cards != 0) {
+		/*
+		 * There are cards to exclude
+		 *
+		 */
+		for (i = 0; i < fst_excluded_cards; i++) {
+			if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
+				pr_info("FarSync PCI device %d not assigned\n",
+					(pdev->devfn) >> 3);
+				return -EBUSY;
+			}
+		}
+	}
+
+	/* Allocate driver private data */
+	card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
+	if (card == NULL) {
+		pr_err("FarSync card found but insufficient memory for driver storage\n");
+		return -ENOMEM;
+	}
+
+	/* Try to enable the device */
+	if ((err = pci_enable_device(pdev)) != 0) {
+		pr_err("Failed to enable card. Err %d\n", -err);
+		kfree(card);
+		return err;
+	}
+
+	if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
+		pr_err("Failed to allocate regions. Err %d\n", -err);
+		pci_disable_device(pdev);
+		kfree(card);
+	        return err;
+	}
+
+	/* Get virtual addresses of memory regions */
+	card->pci_conf = pci_resource_start(pdev, 1);
+	card->phys_mem = pci_resource_start(pdev, 2);
+	card->phys_ctlmem = pci_resource_start(pdev, 3);
+	if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
+		pr_err("Physical memory remap failed\n");
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		kfree(card);
+		return -ENODEV;
+	}
+	if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
+		pr_err("Control memory remap failed\n");
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		iounmap(card->mem);
+		kfree(card);
+		return -ENODEV;
+	}
+	dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
+
+	/* Register the interrupt handler */
+	if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
+		pr_err("Unable to register interrupt %d\n", card->irq);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		iounmap(card->ctlmem);
+		iounmap(card->mem);
+		kfree(card);
+		return -ENODEV;
+	}
+
+	/* Record info we need */
+	card->irq = pdev->irq;
+	card->type = ent->driver_data;
+	card->family = ((ent->driver_data == FST_TYPE_T2P) ||
+			(ent->driver_data == FST_TYPE_T4P))
+	    ? FST_FAMILY_TXP : FST_FAMILY_TXU;
+	if ((ent->driver_data == FST_TYPE_T1U) ||
+	    (ent->driver_data == FST_TYPE_TE1))
+		card->nports = 1;
+	else
+		card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
+				(ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
+
+	card->state = FST_UNINIT;
+        spin_lock_init ( &card->card_lock );
+
+        for ( i = 0 ; i < card->nports ; i++ ) {
+		struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
+		hdlc_device *hdlc;
+		if (!dev) {
+			while (i--)
+				free_netdev(card->ports[i].dev);
+			pr_err("FarSync: out of memory\n");
+                        free_irq(card->irq, card);
+                        pci_release_regions(pdev);
+                        pci_disable_device(pdev);
+                        iounmap(card->ctlmem);
+                        iounmap(card->mem);
+                        kfree(card);
+                        return -ENODEV;
+		}
+		card->ports[i].dev    = dev;
+                card->ports[i].card   = card;
+                card->ports[i].index  = i;
+                card->ports[i].run    = 0;
+
+		hdlc = dev_to_hdlc(dev);
+
+                /* Fill in the net device info */
+		/* Since this is a PCI setup this is purely
+		 * informational. Give them the buffer addresses
+		 * and basic card I/O.
+		 */
+                dev->mem_start   = card->phys_mem
+                                 + BUF_OFFSET ( txBuffer[i][0][0]);
+                dev->mem_end     = card->phys_mem
+                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+                dev->base_addr   = card->pci_conf;
+                dev->irq         = card->irq;
+
+		dev->netdev_ops = &fst_ops;
+		dev->tx_queue_len = FST_TX_QUEUE_LEN;
+		dev->watchdog_timeo = FST_TX_TIMEOUT;
+                hdlc->attach = fst_attach;
+                hdlc->xmit   = fst_start_xmit;
+	}
+
+	card->device = pdev;
+
+	dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type,
+	    card->nports, card->irq);
+	dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n",
+	    card->pci_conf, card->phys_mem, card->phys_ctlmem);
+
+	/* Reset the card's processor */
+	fst_cpureset(card);
+	card->state = FST_RESET;
+
+	/* Initialise DMA (if required) */
+	fst_init_dma(card);
+
+	/* Record driver data for later use */
+	pci_set_drvdata(pdev, card);
+
+	/* Remainder of card setup */
+	fst_card_array[no_of_cards_added] = card;
+	card->card_no = no_of_cards_added++;	/* Record instance and bump it */
+	fst_init_card(card);
+	if (card->family == FST_FAMILY_TXU) {
+		/*
+		 * Allocate a dma buffer for transmit and receives
+		 */
+		card->rx_dma_handle_host =
+		    pci_alloc_consistent(card->device, FST_MAX_MTU,
+					 &card->rx_dma_handle_card);
+		if (card->rx_dma_handle_host == NULL) {
+			pr_err("Could not allocate rx dma buffer\n");
+			fst_disable_intr(card);
+			pci_release_regions(pdev);
+			pci_disable_device(pdev);
+			iounmap(card->ctlmem);
+			iounmap(card->mem);
+			kfree(card);
+			return -ENOMEM;
+		}
+		card->tx_dma_handle_host =
+		    pci_alloc_consistent(card->device, FST_MAX_MTU,
+					 &card->tx_dma_handle_card);
+		if (card->tx_dma_handle_host == NULL) {
+			pr_err("Could not allocate tx dma buffer\n");
+			fst_disable_intr(card);
+			pci_release_regions(pdev);
+			pci_disable_device(pdev);
+			iounmap(card->ctlmem);
+			iounmap(card->mem);
+			kfree(card);
+			return -ENOMEM;
+		}
+	}
+	return 0;		/* Success */
+}
+
+/*
+ *      Cleanup and close down a card
+ */
+static void __devexit
+fst_remove_one(struct pci_dev *pdev)
+{
+	struct fst_card_info *card;
+	int i;
+
+	card = pci_get_drvdata(pdev);
+
+	for (i = 0; i < card->nports; i++) {
+		struct net_device *dev = port_to_dev(&card->ports[i]);
+		unregister_hdlc_device(dev);
+	}
+
+	fst_disable_intr(card);
+	free_irq(card->irq, card);
+
+	iounmap(card->ctlmem);
+	iounmap(card->mem);
+	pci_release_regions(pdev);
+	if (card->family == FST_FAMILY_TXU) {
+		/*
+		 * Free dma buffers
+		 */
+		pci_free_consistent(card->device, FST_MAX_MTU,
+				    card->rx_dma_handle_host,
+				    card->rx_dma_handle_card);
+		pci_free_consistent(card->device, FST_MAX_MTU,
+				    card->tx_dma_handle_host,
+				    card->tx_dma_handle_card);
+	}
+	fst_card_array[card->card_no] = NULL;
+}
+
+static struct pci_driver fst_driver = {
+        .name		= FST_NAME,
+        .id_table	= fst_pci_dev_id,
+        .probe		= fst_add_one,
+        .remove	= __devexit_p(fst_remove_one),
+        .suspend	= NULL,
+        .resume	= NULL,
+};
+
+static int __init
+fst_init(void)
+{
+	int i;
+
+	for (i = 0; i < FST_MAX_CARDS; i++)
+		fst_card_array[i] = NULL;
+	spin_lock_init(&fst_work_q_lock);
+	return pci_register_driver(&fst_driver);
+}
+
+static void __exit
+fst_cleanup_module(void)
+{
+	pr_info("FarSync WAN driver unloading\n");
+	pci_unregister_driver(&fst_driver);
+}
+
+module_init(fst_init);
+module_exit(fst_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.h
new file mode 100644
index 0000000..6b27e7c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/farsync.h
@@ -0,0 +1,351 @@
+/*
+ *      FarSync X21 driver for Linux
+ *
+ *      Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
+ *
+ *      Copyright (C) 2001 FarSite Communications Ltd.
+ *      www.farsite.co.uk
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      Author: R.J.Dunlop      <bob.dunlop@farsite.co.uk>
+ *
+ *      For the most part this file only contains structures and information
+ *      that is visible to applications outside the driver. Shared memory
+ *      layout etc is internal to the driver and described within farsync.c.
+ *      Overlap exists in that the values used for some fields within the
+ *      ioctl interface extend into the cards firmware interface so values in
+ *      this file may not be changed arbitrarily.
+ */
+
+/*      What's in a name
+ *
+ *      The project name for this driver is Oscar. The driver is intended to be
+ *      used with the FarSite T-Series cards (T2P & T4P) running in the high
+ *      speed frame shifter mode. This is sometimes referred to as X.21 mode
+ *      which is a complete misnomer as the card continues to support V.24 and
+ *      V.35 as well as X.21.
+ *
+ *      A short common prefix is useful for routines within the driver to avoid
+ *      conflict with other similar drivers and I chosen to use "fst_" for this
+ *      purpose (FarSite T-series).
+ *
+ *      Finally the device driver needs a short network interface name. Since
+ *      "hdlc" is already in use I've chosen the even less informative "sync"
+ *      for the present.
+ */
+#define FST_NAME                "fst"           /* In debug/info etc */
+#define FST_NDEV_NAME           "sync"          /* For net interface */
+#define FST_DEV_NAME            "farsync"       /* For misc interfaces */
+
+
+/*      User version number
+ *
+ *      This version number is incremented with each official release of the
+ *      package and is a simplified number for normal user reference.
+ *      Individual files are tracked by the version control system and may
+ *      have individual versions (or IDs) that move much faster than the
+ *      the release version as individual updates are tracked.
+ */
+#define FST_USER_VERSION        "1.04"
+
+
+/*      Ioctl call command values
+ */
+#define FSTWRITE        (SIOCDEVPRIVATE+10)
+#define FSTCPURESET     (SIOCDEVPRIVATE+11)
+#define FSTCPURELEASE   (SIOCDEVPRIVATE+12)
+#define FSTGETCONF      (SIOCDEVPRIVATE+13)
+#define FSTSETCONF      (SIOCDEVPRIVATE+14)
+
+
+/*      FSTWRITE
+ *
+ *      Used to write a block of data (firmware etc) before the card is running
+ */
+struct fstioc_write {
+        unsigned int  size;
+        unsigned int  offset;
+        unsigned char data[0];
+};
+
+
+/*      FSTCPURESET and FSTCPURELEASE
+ *
+ *      These take no additional data.
+ *      FSTCPURESET forces the cards CPU into a reset state and holds it there.
+ *      FSTCPURELEASE releases the CPU from this reset state allowing it to run,
+ *      the reset vector should be setup before this ioctl is run.
+ */
+
+/*      FSTGETCONF and FSTSETCONF
+ *
+ *      Get and set a card/ports configuration.
+ *      In order to allow selective setting of items and for the kernel to
+ *      indicate a partial status response the first field "valid" is a bitmask
+ *      indicating which other fields in the structure are valid.
+ *      Many of the field names in this structure match those used in the
+ *      firmware shared memory configuration interface and come originally from
+ *      the NT header file Smc.h
+ *
+ *      When used with FSTGETCONF this structure should be zeroed before use.
+ *      This is to allow for possible future expansion when some of the fields
+ *      might be used to indicate a different (expanded) structure.
+ */
+struct fstioc_info {
+        unsigned int   valid;           /* Bits of structure that are valid */
+        unsigned int   nports;          /* Number of serial ports */
+        unsigned int   type;            /* Type index of card */
+        unsigned int   state;           /* State of card */
+        unsigned int   index;           /* Index of port ioctl was issued on */
+        unsigned int   smcFirmwareVersion;
+        unsigned long  kernelVersion;   /* What Kernel version we are working with */
+        unsigned short lineInterface;   /* Physical interface type */
+        unsigned char  proto;           /* Line protocol */
+        unsigned char  internalClock;   /* 1 => internal clock, 0 => external */
+        unsigned int   lineSpeed;       /* Speed in bps */
+        unsigned int   v24IpSts;        /* V.24 control input status */
+        unsigned int   v24OpSts;        /* V.24 control output status */
+        unsigned short clockStatus;     /* lsb: 0=> present, 1=> absent */
+        unsigned short cableStatus;     /* lsb: 0=> present, 1=> absent */
+        unsigned short cardMode;        /* lsb: LED id mode */
+        unsigned short debug;           /* Debug flags */
+        unsigned char  transparentMode; /* Not used always 0 */
+        unsigned char  invertClock;     /* Invert clock feature for syncing */
+        unsigned char  startingSlot;    /* Time slot to use for start of tx */
+        unsigned char  clockSource;     /* External or internal */
+        unsigned char  framing;         /* E1, T1 or J1 */
+        unsigned char  structure;       /* unframed, double, crc4, f4, f12, */
+                                        /* f24 f72 */
+        unsigned char  interface;       /* rj48c or bnc */
+        unsigned char  coding;          /* hdb3 b8zs */
+        unsigned char  lineBuildOut;    /* 0, -7.5, -15, -22 */
+        unsigned char  equalizer;       /* short or lon haul settings */
+        unsigned char  loopMode;        /* various loopbacks */
+        unsigned char  range;           /* cable lengths */
+        unsigned char  txBufferMode;    /* tx elastic buffer depth */
+        unsigned char  rxBufferMode;    /* rx elastic buffer depth */
+        unsigned char  losThreshold;    /* Attenuation on LOS signal */
+        unsigned char  idleCode;        /* Value to send as idle timeslot */
+        unsigned int   receiveBufferDelay; /* delay thro rx buffer timeslots */
+        unsigned int   framingErrorCount; /* framing errors */
+        unsigned int   codeViolationCount; /* code violations */
+        unsigned int   crcErrorCount;   /* CRC errors */
+        int            lineAttenuation; /* in dB*/
+        unsigned short lossOfSignal;
+        unsigned short receiveRemoteAlarm;
+        unsigned short alarmIndicationSignal;
+};
+
+/* "valid" bitmask */
+#define FSTVAL_NONE     0x00000000      /* Nothing valid (firmware not running).
+                                         * Slight misnomer. In fact nports,
+                                         * type, state and index will be set
+                                         * based on hardware detected.
+                                         */
+#define FSTVAL_OMODEM   0x0000001F      /* First 5 bits correspond to the
+                                         * output status bits defined for
+                                         * v24OpSts
+                                         */
+#define FSTVAL_SPEED    0x00000020      /* internalClock, lineSpeed, clockStatus
+                                         */
+#define FSTVAL_CABLE    0x00000040      /* lineInterface, cableStatus */
+#define FSTVAL_IMODEM   0x00000080      /* v24IpSts */
+#define FSTVAL_CARD     0x00000100      /* nports, type, state, index,
+                                         * smcFirmwareVersion
+                                         */
+#define FSTVAL_PROTO    0x00000200      /* proto */
+#define FSTVAL_MODE     0x00000400      /* cardMode */
+#define FSTVAL_PHASE    0x00000800      /* Clock phase */
+#define FSTVAL_TE1      0x00001000      /* T1E1 Configuration */
+#define FSTVAL_DEBUG    0x80000000      /* debug */
+#define FSTVAL_ALL      0x00001FFF      /* Note: does not include DEBUG flag */
+
+/* "type" */
+#define FST_TYPE_NONE   0               /* Probably should never happen */
+#define FST_TYPE_T2P    1               /* T2P X21 2 port card */
+#define FST_TYPE_T4P    2               /* T4P X21 4 port card */
+#define FST_TYPE_T1U    3               /* T1U X21 1 port card */
+#define FST_TYPE_T2U    4               /* T2U X21 2 port card */
+#define FST_TYPE_T4U    5               /* T4U X21 4 port card */
+#define FST_TYPE_TE1    6               /* T1E1 X21 1 port card */
+
+/* "family" */
+#define FST_FAMILY_TXP  0               /* T2P or T4P */
+#define FST_FAMILY_TXU  1               /* T1U or T2U or T4U */
+
+/* "state" */
+#define FST_UNINIT      0               /* Raw uninitialised state following
+                                         * system startup */
+#define FST_RESET       1               /* Processor held in reset state */
+#define FST_DOWNLOAD    2               /* Card being downloaded */
+#define FST_STARTING    3               /* Released following download */
+#define FST_RUNNING     4               /* Processor running */
+#define FST_BADVERSION  5               /* Bad shared memory version detected */
+#define FST_HALTED      6               /* Processor flagged a halt */
+#define FST_IFAILED     7               /* Firmware issued initialisation failed
+                                         * interrupt
+                                         */
+/* "lineInterface" */
+#define V24             1
+#define X21             2
+#define V35             3
+#define X21D            4
+#define T1              5
+#define E1              6
+#define J1              7
+
+/* "proto" */
+#define FST_RAW         4               /* Two way raw packets */
+#define FST_GEN_HDLC    5               /* Using "Generic HDLC" module */
+
+/* "internalClock" */
+#define INTCLK          1
+#define EXTCLK          0
+
+/* "v24IpSts" bitmask */
+#define IPSTS_CTS       0x00000001      /* Clear To Send (Indicate for X.21) */
+#define IPSTS_INDICATE  IPSTS_CTS
+#define IPSTS_DSR       0x00000002      /* Data Set Ready (T2P Port A) */
+#define IPSTS_DCD       0x00000004      /* Data Carrier Detect */
+#define IPSTS_RI        0x00000008      /* Ring Indicator (T2P Port A) */
+#define IPSTS_TMI       0x00000010      /* Test Mode Indicator (Not Supported)*/
+
+/* "v24OpSts" bitmask */
+#define OPSTS_RTS       0x00000001      /* Request To Send (Control for X.21) */
+#define OPSTS_CONTROL   OPSTS_RTS
+#define OPSTS_DTR       0x00000002      /* Data Terminal Ready */
+#define OPSTS_DSRS      0x00000004      /* Data Signalling Rate Select (Not
+                                         * Supported) */
+#define OPSTS_SS        0x00000008      /* Select Standby (Not Supported) */
+#define OPSTS_LL        0x00000010      /* Maintenance Test (Not Supported) */
+
+/* "cardMode" bitmask */
+#define CARD_MODE_IDENTIFY      0x0001
+
+/* 
+ * Constants for T1/E1 configuration
+ */
+
+/*
+ * Clock source
+ */
+#define CLOCKING_SLAVE       0
+#define CLOCKING_MASTER      1
+
+/*
+ * Framing
+ */
+#define FRAMING_E1           0
+#define FRAMING_J1           1
+#define FRAMING_T1           2
+
+/*
+ * Structure
+ */
+#define STRUCTURE_UNFRAMED   0
+#define STRUCTURE_E1_DOUBLE  1
+#define STRUCTURE_E1_CRC4    2
+#define STRUCTURE_E1_CRC4M   3
+#define STRUCTURE_T1_4       4
+#define STRUCTURE_T1_12      5
+#define STRUCTURE_T1_24      6
+#define STRUCTURE_T1_72      7
+
+/*
+ * Interface
+ */
+#define INTERFACE_RJ48C      0
+#define INTERFACE_BNC        1
+
+/*
+ * Coding
+ */
+
+#define CODING_HDB3          0
+#define CODING_NRZ           1
+#define CODING_CMI           2
+#define CODING_CMI_HDB3      3
+#define CODING_CMI_B8ZS      4
+#define CODING_AMI           5
+#define CODING_AMI_ZCS       6
+#define CODING_B8ZS          7
+
+/*
+ * Line Build Out
+ */
+#define LBO_0dB              0
+#define LBO_7dB5             1
+#define LBO_15dB             2
+#define LBO_22dB5            3
+
+/*
+ * Range for long haul t1 > 655ft
+ */
+#define RANGE_0_133_FT       0
+#define RANGE_0_40_M         RANGE_0_133_FT
+#define RANGE_133_266_FT     1
+#define RANGE_40_81_M        RANGE_133_266_FT
+#define RANGE_266_399_FT     2
+#define RANGE_81_122_M       RANGE_266_399_FT
+#define RANGE_399_533_FT     3
+#define RANGE_122_162_M       RANGE_399_533_FT
+#define RANGE_533_655_FT     4
+#define RANGE_162_200_M      RANGE_533_655_FT
+/*
+ * Receive Equaliser
+ */
+#define EQUALIZER_SHORT      0
+#define EQUALIZER_LONG       1
+
+/*
+ * Loop modes
+ */
+#define LOOP_NONE            0
+#define LOOP_LOCAL           1
+#define LOOP_PAYLOAD_EXC_TS0 2
+#define LOOP_PAYLOAD_INC_TS0 3
+#define LOOP_REMOTE          4
+
+/*
+ * Buffer modes
+ */
+#define BUFFER_2_FRAME       0
+#define BUFFER_1_FRAME       1
+#define BUFFER_96_BIT        2
+#define BUFFER_NONE          3
+
+/*      Debug support
+ *
+ *      These should only be enabled for development kernels, production code
+ *      should define FST_DEBUG=0 in order to exclude the code.
+ *      Setting FST_DEBUG=1 will include all the debug code but in a disabled
+ *      state, use the FSTSETCONF ioctl to enable specific debug actions, or
+ *      FST_DEBUG can be set to prime the debug selection.
+ */
+#define FST_DEBUG       0x0000
+#if FST_DEBUG
+
+extern int fst_debug_mask;              /* Bit mask of actions to debug, bits
+                                         * listed below. Note: Bit 0 is used
+                                         * to trigger the inclusion of this
+                                         * code, without enabling any actions.
+                                         */
+#define DBG_INIT        0x0002          /* Card detection and initialisation */
+#define DBG_OPEN        0x0004          /* Open and close sequences */
+#define DBG_PCI         0x0008          /* PCI config operations */
+#define DBG_IOCTL       0x0010          /* Ioctls and other config */
+#define DBG_INTR        0x0020          /* Interrupt routines (be careful) */
+#define DBG_TX          0x0040          /* Packet transmission */
+#define DBG_RX          0x0080          /* Packet reception */
+#define DBG_CMD         0x0100          /* Port command issuing */
+
+#define DBG_ASS         0xFFFF          /* Assert like statements. Code that
+                                         * should never be reached, if you see
+                                         * one of these then I've been an ass
+                                         */
+#endif  /* FST_DEBUG */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.c
new file mode 100644
index 0000000..cf49033
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.c
@@ -0,0 +1,721 @@
+/*
+ * Hitachi SCA HD64570 driver for Linux
+ *
+ * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Source of information: Hitachi HD64570 SCA User's Manual
+ *
+ * We use the following SCA memory map:
+ *
+ * Packet buffer descriptor rings - starting from winbase or win0base:
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
+ *
+ * Packet data buffers - starting from winbase + buff_offset:
+ * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers
+ * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers
+ * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers (if used)
+ * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers (if used)
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/hdlc.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include "hd64570.h"
+
+#define get_msci(port)	  (phy_node(port) ?   MSCI1_OFFSET :   MSCI0_OFFSET)
+#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+
+#define SCA_INTR_MSCI(node)    (node ? 0x10 : 0x01)
+#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
+#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
+
+
+static inline struct net_device *port_to_dev(port_t *port)
+{
+	return port->dev;
+}
+
+static inline int sca_intr_status(card_t *card)
+{
+	u8 result = 0;
+	u8 isr0 = sca_in(ISR0, card);
+	u8 isr1 = sca_in(ISR1, card);
+
+	if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
+	if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
+	if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
+	if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
+	if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
+	if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
+
+	if (!(result & SCA_INTR_DMAC_TX(0)))
+		if (sca_in(DSR_TX(0), card) & DSR_EOM)
+			result |= SCA_INTR_DMAC_TX(0);
+	if (!(result & SCA_INTR_DMAC_TX(1)))
+		if (sca_in(DSR_TX(1), card) & DSR_EOM)
+			result |= SCA_INTR_DMAC_TX(1);
+
+	return result;
+}
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+	return dev_to_hdlc(dev)->priv;
+}
+
+static inline u16 next_desc(port_t *port, u16 desc, int transmit)
+{
+	return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
+			     : port_to_card(port)->rx_ring_buffers);
+}
+
+
+static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
+{
+	u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
+	u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
+
+	desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
+	return log_node(port) * (rx_buffs + tx_buffs) +
+		transmit * rx_buffs + desc;
+}
+
+
+static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+{
+	/* Descriptor offset always fits in 16 bits */
+	return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
+}
+
+
+static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
+					     int transmit)
+{
+#ifdef PAGE0_ALWAYS_MAPPED
+	return (pkt_desc __iomem *)(win0base(port_to_card(port))
+				    + desc_offset(port, desc, transmit));
+#else
+	return (pkt_desc __iomem *)(winbase(port_to_card(port))
+				    + desc_offset(port, desc, transmit));
+#endif
+}
+
+
+static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
+{
+	return port_to_card(port)->buff_offset +
+		desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
+}
+
+
+static inline void sca_set_carrier(port_t *port)
+{
+	if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
+#ifdef DEBUG_LINK
+		printk(KERN_DEBUG "%s: sca_set_carrier on\n",
+		       port_to_dev(port)->name);
+#endif
+		netif_carrier_on(port_to_dev(port));
+	} else {
+#ifdef DEBUG_LINK
+		printk(KERN_DEBUG "%s: sca_set_carrier off\n",
+		       port_to_dev(port)->name);
+#endif
+		netif_carrier_off(port_to_dev(port));
+	}
+}
+
+
+static void sca_init_port(port_t *port)
+{
+	card_t *card = port_to_card(port);
+	int transmit, i;
+
+	port->rxin = 0;
+	port->txin = 0;
+	port->txlast = 0;
+
+#ifndef PAGE0_ALWAYS_MAPPED
+	openwin(card, 0);
+#endif
+
+	for (transmit = 0; transmit < 2; transmit++) {
+		u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
+		u16 buffs = transmit ? card->tx_ring_buffers
+			: card->rx_ring_buffers;
+
+		for (i = 0; i < buffs; i++) {
+			pkt_desc __iomem *desc = desc_address(port, i, transmit);
+			u16 chain_off = desc_offset(port, i + 1, transmit);
+			u32 buff_off = buffer_offset(port, i, transmit);
+
+			writew(chain_off, &desc->cp);
+			writel(buff_off, &desc->bp);
+			writew(0, &desc->len);
+			writeb(0, &desc->stat);
+		}
+
+		/* DMA disable - to halt state */
+		sca_out(0, transmit ? DSR_TX(phy_node(port)) :
+			DSR_RX(phy_node(port)), card);
+		/* software ABORT - to initial state */
+		sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
+			DCR_RX(phy_node(port)), card);
+
+		/* current desc addr */
+		sca_out(0, dmac + CPB, card); /* pointer base */
+		sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
+		if (!transmit)
+			sca_outw(desc_offset(port, buffs - 1, transmit),
+				 dmac + EDAL, card);
+		else
+			sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
+				 card);
+
+		/* clear frame end interrupt counter */
+		sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
+			DCR_RX(phy_node(port)), card);
+
+		if (!transmit) { /* Receive */
+			/* set buffer length */
+			sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
+			/* Chain mode, Multi-frame */
+			sca_out(0x14, DMR_RX(phy_node(port)), card);
+			sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
+				card);
+			/* DMA enable */
+			sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+		} else {	/* Transmit */
+			/* Chain mode, Multi-frame */
+			sca_out(0x14, DMR_TX(phy_node(port)), card);
+			/* enable underflow interrupts */
+			sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
+		}
+	}
+	sca_set_carrier(port);
+}
+
+
+#ifdef NEED_SCA_MSCI_INTR
+/* MSCI interrupt service */
+static inline void sca_msci_intr(port_t *port)
+{
+	u16 msci = get_msci(port);
+	card_t* card = port_to_card(port);
+	u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
+
+	/* Reset MSCI TX underrun and CDCD status bit */
+	sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
+
+	if (stat & ST1_UDRN) {
+		/* TX Underrun error detected */
+		port_to_dev(port)->stats.tx_errors++;
+		port_to_dev(port)->stats.tx_fifo_errors++;
+	}
+
+	if (stat & ST1_CDCD)
+		sca_set_carrier(port);
+}
+#endif
+
+
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
+			  u16 rxin)
+{
+	struct net_device *dev = port_to_dev(port);
+	struct sk_buff *skb;
+	u16 len;
+	u32 buff;
+	u32 maxlen;
+	u8 page;
+
+	len = readw(&desc->len);
+	skb = dev_alloc_skb(len);
+	if (!skb) {
+		dev->stats.rx_dropped++;
+		return;
+	}
+
+	buff = buffer_offset(port, rxin, 0);
+	page = buff / winsize(card);
+	buff = buff % winsize(card);
+	maxlen = winsize(card) - buff;
+
+	openwin(card, page);
+
+	if (len > maxlen) {
+		memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
+		openwin(card, page + 1);
+		memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
+	} else
+		memcpy_fromio(skb->data, winbase(card) + buff, len);
+
+#ifndef PAGE0_ALWAYS_MAPPED
+	openwin(card, 0);	/* select pkt_desc table page back */
+#endif
+	skb_put(skb, len);
+#ifdef DEBUG_PKT
+	printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
+	debug_frame(skb);
+#endif
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+	skb->protocol = hdlc_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
+
+/* Receive DMA interrupt service */
+static inline void sca_rx_intr(port_t *port)
+{
+	struct net_device *dev = port_to_dev(port);
+	u16 dmac = get_dmac_rx(port);
+	card_t *card = port_to_card(port);
+	u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
+
+	/* Reset DSR status bits */
+	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+		DSR_RX(phy_node(port)), card);
+
+	if (stat & DSR_BOF)
+		/* Dropped one or more frames */
+		dev->stats.rx_over_errors++;
+
+	while (1) {
+		u32 desc_off = desc_offset(port, port->rxin, 0);
+		pkt_desc __iomem *desc;
+		u32 cda = sca_inw(dmac + CDAL, card);
+
+		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+			break;	/* No frame received */
+
+		desc = desc_address(port, port->rxin, 0);
+		stat = readb(&desc->stat);
+		if (!(stat & ST_RX_EOM))
+			port->rxpart = 1; /* partial frame received */
+		else if ((stat & ST_ERROR_MASK) || port->rxpart) {
+			dev->stats.rx_errors++;
+			if (stat & ST_RX_OVERRUN)
+				dev->stats.rx_fifo_errors++;
+			else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
+					  ST_RX_RESBIT)) || port->rxpart)
+				dev->stats.rx_frame_errors++;
+			else if (stat & ST_RX_CRC)
+				dev->stats.rx_crc_errors++;
+			if (stat & ST_RX_EOM)
+				port->rxpart = 0; /* received last fragment */
+		} else
+			sca_rx(card, port, desc, port->rxin);
+
+		/* Set new error descriptor address */
+		sca_outw(desc_off, dmac + EDAL, card);
+		port->rxin = next_desc(port, port->rxin, 0);
+	}
+
+	/* make sure RX DMA is enabled */
+	sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+}
+
+
+/* Transmit DMA interrupt service */
+static inline void sca_tx_intr(port_t *port)
+{
+	struct net_device *dev = port_to_dev(port);
+	u16 dmac = get_dmac_tx(port);
+	card_t* card = port_to_card(port);
+	u8 stat;
+
+	spin_lock(&port->lock);
+
+	stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
+
+	/* Reset DSR status bits */
+	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+		DSR_TX(phy_node(port)), card);
+
+	while (1) {
+		pkt_desc __iomem *desc;
+
+		u32 desc_off = desc_offset(port, port->txlast, 1);
+		u32 cda = sca_inw(dmac + CDAL, card);
+		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+			break;	/* Transmitter is/will_be sending this frame */
+
+		desc = desc_address(port, port->txlast, 1);
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += readw(&desc->len);
+		writeb(0, &desc->stat);	/* Free descriptor */
+		port->txlast = next_desc(port, port->txlast, 1);
+	}
+
+	netif_wake_queue(dev);
+	spin_unlock(&port->lock);
+}
+
+
+static irqreturn_t sca_intr(int irq, void* dev_id)
+{
+	card_t *card = dev_id;
+	int i;
+	u8 stat;
+	int handled = 0;
+	u8 page = sca_get_page(card);
+
+	while((stat = sca_intr_status(card)) != 0) {
+		handled = 1;
+		for (i = 0; i < 2; i++) {
+			port_t *port = get_port(card, i);
+			if (port) {
+				if (stat & SCA_INTR_MSCI(i))
+					sca_msci_intr(port);
+
+				if (stat & SCA_INTR_DMAC_RX(i))
+					sca_rx_intr(port);
+
+				if (stat & SCA_INTR_DMAC_TX(i))
+					sca_tx_intr(port);
+			}
+		}
+	}
+
+	openwin(card, page);		/* Restore original page */
+	return IRQ_RETVAL(handled);
+}
+
+
+static void sca_set_port(port_t *port)
+{
+	card_t* card = port_to_card(port);
+	u16 msci = get_msci(port);
+	u8 md2 = sca_in(msci + MD2, card);
+	unsigned int tmc, br = 10, brv = 1024;
+
+
+	if (port->settings.clock_rate > 0) {
+		/* Try lower br for better accuracy*/
+		do {
+			br--;
+			brv >>= 1; /* brv = 2^9 = 512 max in specs */
+
+			/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
+			tmc = CLOCK_BASE / brv / port->settings.clock_rate;
+		}while (br > 1 && tmc <= 128);
+
+		if (tmc < 1) {
+			tmc = 1;
+			br = 0;	/* For baud=CLOCK_BASE we use tmc=1 br=0 */
+			brv = 1;
+		} else if (tmc > 255)
+			tmc = 256; /* tmc=0 means 256 - low baud rates */
+
+		port->settings.clock_rate = CLOCK_BASE / brv / tmc;
+	} else {
+		br = 9; /* Minimum clock rate */
+		tmc = 256;	/* 8bit = 0 */
+		port->settings.clock_rate = CLOCK_BASE / (256 * 512);
+	}
+
+	port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
+	port->txs = (port->txs & ~CLK_BRG_MASK) | br;
+	port->tmc = tmc;
+
+	/* baud divisor - time constant*/
+	sca_out(port->tmc, msci + TMC, card);
+
+	/* Set BRG bits */
+	sca_out(port->rxs, msci + RXS, card);
+	sca_out(port->txs, msci + TXS, card);
+
+	if (port->settings.loopback)
+		md2 |= MD2_LOOPBACK;
+	else
+		md2 &= ~MD2_LOOPBACK;
+
+	sca_out(md2, msci + MD2, card);
+
+}
+
+
+static void sca_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t* card = port_to_card(port);
+	u16 msci = get_msci(port);
+	u8 md0, md2;
+
+	switch(port->encoding) {
+	case ENCODING_NRZ:	md2 = MD2_NRZ;		break;
+	case ENCODING_NRZI:	md2 = MD2_NRZI;		break;
+	case ENCODING_FM_MARK:	md2 = MD2_FM_MARK;	break;
+	case ENCODING_FM_SPACE:	md2 = MD2_FM_SPACE;	break;
+	default:		md2 = MD2_MANCHESTER;
+	}
+
+	if (port->settings.loopback)
+		md2 |= MD2_LOOPBACK;
+
+	switch(port->parity) {
+	case PARITY_CRC16_PR0:	     md0 = MD0_HDLC | MD0_CRC_16_0;  break;
+	case PARITY_CRC16_PR1:	     md0 = MD0_HDLC | MD0_CRC_16;    break;
+	case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
+	case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU;   break;
+	default:		     md0 = MD0_HDLC | MD0_CRC_NONE;
+	}
+
+	sca_out(CMD_RESET, msci + CMD, card);
+	sca_out(md0, msci + MD0, card);
+	sca_out(0x00, msci + MD1, card); /* no address field check */
+	sca_out(md2, msci + MD2, card);
+	sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
+	sca_out(CTL_IDLE, msci + CTL, card);
+
+	/* Allow at least 8 bytes before requesting RX DMA operation */
+	/* TX with higher priority and possibly with shorter transfers */
+	sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
+	sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
+	sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
+
+/* We're using the following interrupts:
+   - TXINT (DMAC completed all transmisions, underrun or DCD change)
+   - all DMA interrupts
+*/
+	sca_set_carrier(port);
+
+	/* MSCI TX INT and RX INT A IRQ enable */
+	sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
+	sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
+	sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
+		IER0, card); /* TXINT and RXINT */
+	/* enable DMA IRQ */
+	sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
+		IER1, card);
+
+	sca_out(port->tmc, msci + TMC, card); /* Restore registers */
+	sca_out(port->rxs, msci + RXS, card);
+	sca_out(port->txs, msci + TXS, card);
+	sca_out(CMD_TX_ENABLE, msci + CMD, card);
+	sca_out(CMD_RX_ENABLE, msci + CMD, card);
+
+	netif_start_queue(dev);
+}
+
+
+static void sca_close(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t* card = port_to_card(port);
+
+	/* reset channel */
+	sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
+	/* disable MSCI interrupts */
+	sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
+		IER0, card);
+	/* disable DMA interrupts */
+	sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
+		IER1, card);
+
+	netif_stop_queue(dev);
+}
+
+
+static int sca_attach(struct net_device *dev, unsigned short encoding,
+		      unsigned short parity)
+{
+	if (encoding != ENCODING_NRZ &&
+	    encoding != ENCODING_NRZI &&
+	    encoding != ENCODING_FM_MARK &&
+	    encoding != ENCODING_FM_SPACE &&
+	    encoding != ENCODING_MANCHESTER)
+		return -EINVAL;
+
+	if (parity != PARITY_NONE &&
+	    parity != PARITY_CRC16_PR0 &&
+	    parity != PARITY_CRC16_PR1 &&
+	    parity != PARITY_CRC16_PR0_CCITT &&
+	    parity != PARITY_CRC16_PR1_CCITT)
+		return -EINVAL;
+
+	dev_to_port(dev)->encoding = encoding;
+	dev_to_port(dev)->parity = parity;
+	return 0;
+}
+
+
+#ifdef DEBUG_RINGS
+static void sca_dump_rings(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t *card = port_to_card(port);
+	u16 cnt;
+#ifndef PAGE0_ALWAYS_MAPPED
+	u8 page = sca_get_page(card);
+
+	openwin(card, 0);
+#endif
+
+	printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
+	       sca_inw(get_dmac_rx(port) + CDAL, card),
+	       sca_inw(get_dmac_rx(port) + EDAL, card),
+	       sca_in(DSR_RX(phy_node(port)), card), port->rxin,
+	       sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
+	for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
+		pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+	pr_cont("\n");
+
+	printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+	       "last=%u %sactive",
+	       sca_inw(get_dmac_tx(port) + CDAL, card),
+	       sca_inw(get_dmac_tx(port) + EDAL, card),
+	       sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
+	       sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
+
+	for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
+		pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+	pr_cont("\n");
+
+	printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
+	       " FST: %02x CST: %02x %02x\n",
+	       sca_in(get_msci(port) + MD0, card),
+	       sca_in(get_msci(port) + MD1, card),
+	       sca_in(get_msci(port) + MD2, card),
+	       sca_in(get_msci(port) + ST0, card),
+	       sca_in(get_msci(port) + ST1, card),
+	       sca_in(get_msci(port) + ST2, card),
+	       sca_in(get_msci(port) + ST3, card),
+	       sca_in(get_msci(port) + FST, card),
+	       sca_in(get_msci(port) + CST0, card),
+	       sca_in(get_msci(port) + CST1, card));
+
+	printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
+	       sca_in(ISR1, card), sca_in(ISR2, card));
+
+#ifndef PAGE0_ALWAYS_MAPPED
+	openwin(card, page); /* Restore original page */
+#endif
+}
+#endif /* DEBUG_RINGS */
+
+
+static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t *card = port_to_card(port);
+	pkt_desc __iomem *desc;
+	u32 buff, len;
+	u8 page;
+	u32 maxlen;
+
+	spin_lock_irq(&port->lock);
+
+	desc = desc_address(port, port->txin + 1, 1);
+	BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
+
+#ifdef DEBUG_PKT
+	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+	debug_frame(skb);
+#endif
+
+	desc = desc_address(port, port->txin, 1);
+	buff = buffer_offset(port, port->txin, 1);
+	len = skb->len;
+	page = buff / winsize(card);
+	buff = buff % winsize(card);
+	maxlen = winsize(card) - buff;
+
+	openwin(card, page);
+	if (len > maxlen) {
+		memcpy_toio(winbase(card) + buff, skb->data, maxlen);
+		openwin(card, page + 1);
+		memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
+	} else
+		memcpy_toio(winbase(card) + buff, skb->data, len);
+
+#ifndef PAGE0_ALWAYS_MAPPED
+	openwin(card, 0);	/* select pkt_desc table page back */
+#endif
+	writew(len, &desc->len);
+	writeb(ST_TX_EOM, &desc->stat);
+
+	port->txin = next_desc(port, port->txin, 1);
+	sca_outw(desc_offset(port, port->txin, 1),
+		 get_dmac_tx(port) + EDAL, card);
+
+	sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
+
+	desc = desc_address(port, port->txin + 1, 1);
+	if (readb(&desc->stat)) /* allow 1 packet gap */
+		netif_stop_queue(dev);
+
+	spin_unlock_irq(&port->lock);
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+#ifdef NEED_DETECT_RAM
+static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
+				    u32 ramsize)
+{
+	/* Round RAM size to 32 bits, fill from end to start */
+	u32 i = ramsize &= ~3;
+	u32 size = winsize(card);
+
+	openwin(card, (i - 4) / size); /* select last window */
+
+	do {
+		i -= 4;
+		if ((i + 4) % size == 0)
+			openwin(card, i / size);
+		writel(i ^ 0x12345678, rambase + i % size);
+	} while (i > 0);
+
+	for (i = 0; i < ramsize ; i += 4) {
+		if (i % size == 0)
+			openwin(card, i / size);
+
+		if (readl(rambase + i % size) != (i ^ 0x12345678))
+			break;
+	}
+
+	return i;
+}
+#endif /* NEED_DETECT_RAM */
+
+
+static void __devinit sca_init(card_t *card, int wait_states)
+{
+	sca_out(wait_states, WCRL, card); /* Wait Control */
+	sca_out(wait_states, WCRM, card);
+	sca_out(wait_states, WCRH, card);
+
+	sca_out(0, DMER, card);	/* DMA Master disable */
+	sca_out(0x03, PCR, card); /* DMA priority */
+	sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
+	sca_out(0, DSR_TX(0), card);
+	sca_out(0, DSR_RX(1), card);
+	sca_out(0, DSR_TX(1), card);
+	sca_out(DMER_DME, DMER, card); /* DMA Master enable */
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.h
new file mode 100644
index 0000000..e4f539a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64570.h
@@ -0,0 +1,241 @@
+#ifndef __HD64570_H
+#define __HD64570_H
+
+/* SCA HD64570 register definitions - all addresses for mode 0 (8086 MPU)
+   and 1 (64180 MPU). For modes 2 and 3, XOR the address with 0x01.
+
+   Source: HD64570 SCA User's Manual
+*/
+
+
+
+/* SCA Control Registers */
+#define LPR    0x00		/* Low Power */
+
+/* Wait controller registers */
+#define PABR0  0x02		/* Physical Address Boundary 0 */
+#define PABR1  0x03		/* Physical Address Boundary 1 */
+#define WCRL   0x04		/* Wait Control L */
+#define WCRM   0x05		/* Wait Control M */
+#define WCRH   0x06		/* Wait Control H */
+
+#define PCR    0x08		/* DMA Priority Control */
+#define DMER   0x09		/* DMA Master Enable */
+
+
+/* Interrupt registers */
+#define ISR0   0x10		/* Interrupt Status 0  */
+#define ISR1   0x11		/* Interrupt Status 1  */
+#define ISR2   0x12		/* Interrupt Status 2  */
+
+#define IER0   0x14		/* Interrupt Enable 0  */
+#define IER1   0x15		/* Interrupt Enable 1  */
+#define IER2   0x16		/* Interrupt Enable 2  */
+
+#define ITCR   0x18		/* Interrupt Control */
+#define IVR    0x1A		/* Interrupt Vector */
+#define IMVR   0x1C		/* Interrupt Modified Vector */
+
+
+
+/* MSCI channel (port) 0 registers - offset 0x20
+   MSCI channel (port) 1 registers - offset 0x40 */
+
+#define MSCI0_OFFSET 0x20
+#define MSCI1_OFFSET 0x40
+
+#define TRBL   0x00		/* TX/RX buffer L */ 
+#define TRBH   0x01		/* TX/RX buffer H */ 
+#define ST0    0x02		/* Status 0 */
+#define ST1    0x03		/* Status 1 */
+#define ST2    0x04		/* Status 2 */
+#define ST3    0x05		/* Status 3 */
+#define FST    0x06		/* Frame Status  */
+#define IE0    0x08		/* Interrupt Enable 0 */
+#define IE1    0x09		/* Interrupt Enable 1 */
+#define IE2    0x0A		/* Interrupt Enable 2 */
+#define FIE    0x0B		/* Frame Interrupt Enable  */
+#define CMD    0x0C		/* Command */
+#define MD0    0x0E		/* Mode 0 */
+#define MD1    0x0F		/* Mode 1 */
+#define MD2    0x10		/* Mode 2 */
+#define CTL    0x11		/* Control */
+#define SA0    0x12		/* Sync/Address 0 */
+#define SA1    0x13		/* Sync/Address 1 */
+#define IDL    0x14		/* Idle Pattern */
+#define TMC    0x15		/* Time Constant */
+#define RXS    0x16		/* RX Clock Source */
+#define TXS    0x17		/* TX Clock Source */
+#define TRC0   0x18		/* TX Ready Control 0 */ 
+#define TRC1   0x19		/* TX Ready Control 1 */ 
+#define RRC    0x1A		/* RX Ready Control */ 
+#define CST0   0x1C		/* Current Status 0 */
+#define CST1   0x1D		/* Current Status 1 */
+
+
+/* Timer channel 0 (port 0 RX) registers - offset 0x60
+   Timer channel 1 (port 0 TX) registers - offset 0x68
+   Timer channel 2 (port 1 RX) registers - offset 0x70
+   Timer channel 3 (port 1 TX) registers - offset 0x78
+*/
+
+#define TIMER0RX_OFFSET 0x60
+#define TIMER0TX_OFFSET 0x68
+#define TIMER1RX_OFFSET 0x70
+#define TIMER1TX_OFFSET 0x78
+
+#define TCNTL  0x00		/* Up-counter L */
+#define TCNTH  0x01		/* Up-counter H */
+#define TCONRL 0x02		/* Constant L */
+#define TCONRH 0x03		/* Constant H */
+#define TCSR   0x04		/* Control/Status */
+#define TEPR   0x05		/* Expand Prescale */
+
+
+
+/* DMA channel 0 (port 0 RX) registers - offset 0x80
+   DMA channel 1 (port 0 TX) registers - offset 0xA0
+   DMA channel 2 (port 1 RX) registers - offset 0xC0
+   DMA channel 3 (port 1 TX) registers - offset 0xE0
+*/
+
+#define DMAC0RX_OFFSET 0x80
+#define DMAC0TX_OFFSET 0xA0
+#define DMAC1RX_OFFSET 0xC0
+#define DMAC1TX_OFFSET 0xE0
+
+#define BARL   0x00		/* Buffer Address L (chained block) */
+#define BARH   0x01		/* Buffer Address H (chained block) */
+#define BARB   0x02		/* Buffer Address B (chained block) */
+
+#define DARL   0x00		/* RX Destination Addr L (single block) */
+#define DARH   0x01		/* RX Destination Addr H (single block) */
+#define DARB   0x02		/* RX Destination Addr B (single block) */
+
+#define SARL   0x04		/* TX Source Address L (single block) */
+#define SARH   0x05		/* TX Source Address H (single block) */
+#define SARB   0x06		/* TX Source Address B (single block) */
+
+#define CPB    0x06		/* Chain Pointer Base (chained block) */
+
+#define CDAL   0x08		/* Current Descriptor Addr L (chained block) */
+#define CDAH   0x09		/* Current Descriptor Addr H (chained block) */
+#define EDAL   0x0A		/* Error Descriptor Addr L (chained block) */
+#define EDAH   0x0B		/* Error Descriptor Addr H (chained block) */
+#define BFLL   0x0C		/* RX Receive Buffer Length L (chained block)*/
+#define BFLH   0x0D		/* RX Receive Buffer Length H (chained block)*/
+#define BCRL   0x0E		/* Byte Count L */
+#define BCRH   0x0F		/* Byte Count H */
+#define DSR    0x10		/* DMA Status */
+#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DMR    0x11		/* DMA Mode */
+#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define FCT    0x13		/* Frame End Interrupt Counter */
+#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DIR    0x14		/* DMA Interrupt Enable */
+#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DCR    0x15		/* DMA Command  */
+#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+
+
+
+
+/* Descriptor Structure */
+
+typedef struct {
+	u16 cp;			/* Chain Pointer */
+	u32 bp;			/* Buffer Pointer (24 bits) */
+	u16 len;		/* Data Length */
+	u8 stat;		/* Status */
+	u8 unused;		/* pads to 2-byte boundary */
+}__packed pkt_desc;
+
+
+/* Packet Descriptor Status bits */
+
+#define ST_TX_EOM     0x80	/* End of frame */
+#define ST_TX_EOT     0x01	/* End of transmition */
+
+#define ST_RX_EOM     0x80	/* End of frame */
+#define ST_RX_SHORT   0x40	/* Short frame */
+#define ST_RX_ABORT   0x20	/* Abort */
+#define ST_RX_RESBIT  0x10	/* Residual bit */
+#define ST_RX_OVERRUN 0x08	/* Overrun */
+#define ST_RX_CRC     0x04	/* CRC */
+
+#define ST_ERROR_MASK 0x7C
+
+#define DIR_EOTE      0x80      /* Transfer completed */
+#define DIR_EOME      0x40      /* Frame Transfer Completed (chained-block) */
+#define DIR_BOFE      0x20      /* Buffer Overflow/Underflow (chained-block)*/
+#define DIR_COFE      0x10      /* Counter Overflow (chained-block) */
+
+
+#define DSR_EOT       0x80      /* Transfer completed */
+#define DSR_EOM       0x40      /* Frame Transfer Completed (chained-block) */
+#define DSR_BOF       0x20      /* Buffer Overflow/Underflow (chained-block)*/
+#define DSR_COF       0x10      /* Counter Overflow (chained-block) */
+#define DSR_DE        0x02	/* DMA Enable */
+#define DSR_DWE       0x01      /* DMA Write Disable */
+
+/* DMA Master Enable Register (DMER) bits */
+#define DMER_DME      0x80	/* DMA Master Enable */
+
+
+#define CMD_RESET     0x21	/* Reset Channel */
+#define CMD_TX_ENABLE 0x02	/* Start transmitter */
+#define CMD_RX_ENABLE 0x12	/* Start receiver */
+
+#define MD0_HDLC      0x80	/* Bit-sync HDLC mode */
+#define MD0_CRC_ENA   0x04	/* Enable CRC code calculation */
+#define MD0_CRC_CCITT 0x02	/* CCITT CRC instead of CRC-16 */
+#define MD0_CRC_PR1   0x01	/* Initial all-ones instead of all-zeros */
+
+#define MD0_CRC_NONE  0x00
+#define MD0_CRC_16_0  0x04
+#define MD0_CRC_16    0x05
+#define MD0_CRC_ITU_0 0x06
+#define MD0_CRC_ITU   0x07
+
+#define MD2_NRZ	      0x00
+#define MD2_NRZI      0x20
+#define MD2_MANCHESTER 0x80
+#define MD2_FM_MARK   0xA0
+#define MD2_FM_SPACE  0xC0
+#define MD2_LOOPBACK  0x03      /* Local data Loopback */
+
+#define CTL_NORTS     0x01
+#define CTL_IDLE      0x10	/* Transmit an idle pattern */
+#define CTL_UDRNC     0x20	/* Idle after CRC or FCS+flag transmition */
+
+#define ST0_TXRDY     0x02	/* TX ready */
+#define ST0_RXRDY     0x01	/* RX ready */
+
+#define ST1_UDRN      0x80	/* MSCI TX underrun */
+#define ST1_CDCD      0x04	/* DCD level changed */
+
+#define ST3_CTS       0x08	/* modem input - /CTS */
+#define ST3_DCD       0x04	/* modem input - /DCD */
+
+#define IE0_TXINT     0x80	/* TX INT MSCI interrupt enable */
+#define IE0_RXINTA    0x40	/* RX INT A MSCI interrupt enable */
+#define IE1_UDRN      0x80	/* TX underrun MSCI interrupt enable */
+#define IE1_CDCD      0x04	/* DCD level changed */
+
+#define DCR_ABORT     0x01	/* Software abort command */
+#define DCR_CLEAR_EOF 0x02	/* Clear EOF interrupt */
+
+/* TX and RX Clock Source - RXS and TXS */
+#define CLK_BRG_MASK  0x0F
+#define CLK_LINE_RX   0x00	/* TX/RX clock line input */
+#define CLK_LINE_TX   0x00	/* TX/RX line input */
+#define CLK_BRG_RX    0x40	/* internal baud rate generator */
+#define CLK_BRG_TX    0x40	/* internal baud rate generator */
+#define CLK_RXCLK_TX  0x60	/* TX clock from RX clock */
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.c
new file mode 100644
index 0000000..e2779fa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.c
@@ -0,0 +1,641 @@
+/*
+ * Hitachi (now Renesas) SCA-II HD64572 driver for Linux
+ *
+ * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Source of information: HD64572 SCA-II User's Manual
+ *
+ * We use the following SCA memory map:
+ *
+ * Packet buffer descriptor rings - starting from card->rambase:
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
+ * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
+ * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
+ *
+ * Packet data buffers - starting from card->rambase + buff_offset:
+ * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers
+ * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers
+ * rx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 RX buffers (if used)
+ * tx_ring_buffers * HDLC_MAX_MRU     = logical channel #0 TX buffers (if used)
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/hdlc.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include "hd64572.h"
+
+#define NAPI_WEIGHT		16
+
+#define get_msci(port)	  (port->chan ?   MSCI1_OFFSET :   MSCI0_OFFSET)
+#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+
+#define sca_in(reg, card)	     readb(card->scabase + (reg))
+#define sca_out(value, reg, card)    writeb(value, card->scabase + (reg))
+#define sca_inw(reg, card)	     readw(card->scabase + (reg))
+#define sca_outw(value, reg, card)   writew(value, card->scabase + (reg))
+#define sca_inl(reg, card)	     readl(card->scabase + (reg))
+#define sca_outl(value, reg, card)   writel(value, card->scabase + (reg))
+
+static int sca_poll(struct napi_struct *napi, int budget);
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+	return dev_to_hdlc(dev)->priv;
+}
+
+static inline void enable_intr(port_t *port)
+{
+	/* enable DMIB and MSCI RXINTA interrupts */
+	sca_outl(sca_inl(IER0, port->card) |
+		 (port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
+}
+
+static inline void disable_intr(port_t *port)
+{
+	sca_outl(sca_inl(IER0, port->card) &
+		 (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
+}
+
+static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
+{
+	u16 rx_buffs = port->card->rx_ring_buffers;
+	u16 tx_buffs = port->card->tx_ring_buffers;
+
+	desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
+	return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
+}
+
+
+static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
+{
+	/* Descriptor offset always fits in 16 bits */
+	return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
+}
+
+
+static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
+					     int transmit)
+{
+	return (pkt_desc __iomem *)(port->card->rambase +
+				    desc_offset(port, desc, transmit));
+}
+
+
+static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
+{
+	return port->card->buff_offset +
+		desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
+}
+
+
+static inline void sca_set_carrier(port_t *port)
+{
+	if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
+#ifdef DEBUG_LINK
+		printk(KERN_DEBUG "%s: sca_set_carrier on\n",
+		       port->netdev.name);
+#endif
+		netif_carrier_on(port->netdev);
+	} else {
+#ifdef DEBUG_LINK
+		printk(KERN_DEBUG "%s: sca_set_carrier off\n",
+		       port->netdev.name);
+#endif
+		netif_carrier_off(port->netdev);
+	}
+}
+
+
+static void sca_init_port(port_t *port)
+{
+	card_t *card = port->card;
+	u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
+	int transmit, i;
+
+	port->rxin = 0;
+	port->txin = 0;
+	port->txlast = 0;
+
+	for (transmit = 0; transmit < 2; transmit++) {
+		u16 buffs = transmit ? card->tx_ring_buffers
+			: card->rx_ring_buffers;
+
+		for (i = 0; i < buffs; i++) {
+			pkt_desc __iomem *desc = desc_address(port, i, transmit);
+			u16 chain_off = desc_offset(port, i + 1, transmit);
+			u32 buff_off = buffer_offset(port, i, transmit);
+
+			writel(chain_off, &desc->cp);
+			writel(buff_off, &desc->bp);
+			writew(0, &desc->len);
+			writeb(0, &desc->stat);
+		}
+	}
+
+	/* DMA disable - to halt state */
+	sca_out(0, DSR_RX(port->chan), card);
+	sca_out(0, DSR_TX(port->chan), card);
+
+	/* software ABORT - to initial state */
+	sca_out(DCR_ABORT, DCR_RX(port->chan), card);
+	sca_out(DCR_ABORT, DCR_TX(port->chan), card);
+
+	/* current desc addr */
+	sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
+	sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
+		 dmac_rx + EDAL, card);
+	sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
+	sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
+
+	/* clear frame end interrupt counter */
+	sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
+	sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
+
+	/* Receive */
+	sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
+	sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
+	sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
+	sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
+
+	/* Transmit */
+	sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
+	sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
+
+	sca_set_carrier(port);
+	netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
+}
+
+
+/* MSCI interrupt service */
+static inline void sca_msci_intr(port_t *port)
+{
+	u16 msci = get_msci(port);
+	card_t* card = port->card;
+
+	if (sca_in(msci + ST1, card) & ST1_CDCD) {
+		/* Reset MSCI CDCD status bit */
+		sca_out(ST1_CDCD, msci + ST1, card);
+		sca_set_carrier(port);
+	}
+}
+
+
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
+			  u16 rxin)
+{
+	struct net_device *dev = port->netdev;
+	struct sk_buff *skb;
+	u16 len;
+	u32 buff;
+
+	len = readw(&desc->len);
+	skb = dev_alloc_skb(len);
+	if (!skb) {
+		dev->stats.rx_dropped++;
+		return;
+	}
+
+	buff = buffer_offset(port, rxin, 0);
+	memcpy_fromio(skb->data, card->rambase + buff, len);
+
+	skb_put(skb, len);
+#ifdef DEBUG_PKT
+	printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
+	debug_frame(skb);
+#endif
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+	skb->protocol = hdlc_type_trans(skb, dev);
+	netif_receive_skb(skb);
+}
+
+
+/* Receive DMA service */
+static inline int sca_rx_done(port_t *port, int budget)
+{
+	struct net_device *dev = port->netdev;
+	u16 dmac = get_dmac_rx(port);
+	card_t *card = port->card;
+	u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
+	int received = 0;
+
+	/* Reset DSR status bits */
+	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+		DSR_RX(port->chan), card);
+
+	if (stat & DSR_BOF)
+		/* Dropped one or more frames */
+		dev->stats.rx_over_errors++;
+
+	while (received < budget) {
+		u32 desc_off = desc_offset(port, port->rxin, 0);
+		pkt_desc __iomem *desc;
+		u32 cda = sca_inl(dmac + CDAL, card);
+
+		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+			break;	/* No frame received */
+
+		desc = desc_address(port, port->rxin, 0);
+		stat = readb(&desc->stat);
+		if (!(stat & ST_RX_EOM))
+			port->rxpart = 1; /* partial frame received */
+		else if ((stat & ST_ERROR_MASK) || port->rxpart) {
+			dev->stats.rx_errors++;
+			if (stat & ST_RX_OVERRUN)
+				dev->stats.rx_fifo_errors++;
+			else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
+					  ST_RX_RESBIT)) || port->rxpart)
+				dev->stats.rx_frame_errors++;
+			else if (stat & ST_RX_CRC)
+				dev->stats.rx_crc_errors++;
+			if (stat & ST_RX_EOM)
+				port->rxpart = 0; /* received last fragment */
+		} else {
+			sca_rx(card, port, desc, port->rxin);
+			received++;
+		}
+
+		/* Set new error descriptor address */
+		sca_outl(desc_off, dmac + EDAL, card);
+		port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
+	}
+
+	/* make sure RX DMA is enabled */
+	sca_out(DSR_DE, DSR_RX(port->chan), card);
+	return received;
+}
+
+
+/* Transmit DMA service */
+static inline void sca_tx_done(port_t *port)
+{
+	struct net_device *dev = port->netdev;
+	card_t* card = port->card;
+	u8 stat;
+	unsigned count = 0;
+
+	spin_lock(&port->lock);
+
+	stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
+
+	/* Reset DSR status bits */
+	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+		DSR_TX(port->chan), card);
+
+	while (1) {
+		pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
+		u8 stat = readb(&desc->stat);
+
+		if (!(stat & ST_TX_OWNRSHP))
+			break; /* not yet transmitted */
+		if (stat & ST_TX_UNDRRUN) {
+			dev->stats.tx_errors++;
+			dev->stats.tx_fifo_errors++;
+		} else {
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += readw(&desc->len);
+		}
+		writeb(0, &desc->stat);	/* Free descriptor */
+		count++;
+		port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
+	}
+
+	if (count)
+		netif_wake_queue(dev);
+	spin_unlock(&port->lock);
+}
+
+
+static int sca_poll(struct napi_struct *napi, int budget)
+{
+	port_t *port = container_of(napi, port_t, napi);
+	u32 isr0 = sca_inl(ISR0, port->card);
+	int received = 0;
+
+	if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
+		sca_msci_intr(port);
+
+	if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
+		sca_tx_done(port);
+
+	if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
+		received = sca_rx_done(port, budget);
+
+	if (received < budget) {
+		napi_complete(napi);
+		enable_intr(port);
+	}
+
+	return received;
+}
+
+static irqreturn_t sca_intr(int irq, void *dev_id)
+{
+	card_t *card = dev_id;
+	u32 isr0 = sca_inl(ISR0, card);
+	int i, handled = 0;
+
+	for (i = 0; i < 2; i++) {
+		port_t *port = get_port(card, i);
+		if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
+			handled = 1;
+			disable_intr(port);
+			napi_schedule(&port->napi);
+		}
+	}
+
+	return IRQ_RETVAL(handled);
+}
+
+
+static void sca_set_port(port_t *port)
+{
+	card_t* card = port->card;
+	u16 msci = get_msci(port);
+	u8 md2 = sca_in(msci + MD2, card);
+	unsigned int tmc, br = 10, brv = 1024;
+
+
+	if (port->settings.clock_rate > 0) {
+		/* Try lower br for better accuracy*/
+		do {
+			br--;
+			brv >>= 1; /* brv = 2^9 = 512 max in specs */
+
+			/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
+			tmc = CLOCK_BASE / brv / port->settings.clock_rate;
+		}while (br > 1 && tmc <= 128);
+
+		if (tmc < 1) {
+			tmc = 1;
+			br = 0;	/* For baud=CLOCK_BASE we use tmc=1 br=0 */
+			brv = 1;
+		} else if (tmc > 255)
+			tmc = 256; /* tmc=0 means 256 - low baud rates */
+
+		port->settings.clock_rate = CLOCK_BASE / brv / tmc;
+	} else {
+		br = 9; /* Minimum clock rate */
+		tmc = 256;	/* 8bit = 0 */
+		port->settings.clock_rate = CLOCK_BASE / (256 * 512);
+	}
+
+	port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
+	port->txs = (port->txs & ~CLK_BRG_MASK) | br;
+	port->tmc = tmc;
+
+	/* baud divisor - time constant*/
+	sca_out(port->tmc, msci + TMCR, card);
+	sca_out(port->tmc, msci + TMCT, card);
+
+	/* Set BRG bits */
+	sca_out(port->rxs, msci + RXS, card);
+	sca_out(port->txs, msci + TXS, card);
+
+	if (port->settings.loopback)
+		md2 |= MD2_LOOPBACK;
+	else
+		md2 &= ~MD2_LOOPBACK;
+
+	sca_out(md2, msci + MD2, card);
+
+}
+
+
+static void sca_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t* card = port->card;
+	u16 msci = get_msci(port);
+	u8 md0, md2;
+
+	switch(port->encoding) {
+	case ENCODING_NRZ:	md2 = MD2_NRZ;		break;
+	case ENCODING_NRZI:	md2 = MD2_NRZI;		break;
+	case ENCODING_FM_MARK:	md2 = MD2_FM_MARK;	break;
+	case ENCODING_FM_SPACE:	md2 = MD2_FM_SPACE;	break;
+	default:		md2 = MD2_MANCHESTER;
+	}
+
+	if (port->settings.loopback)
+		md2 |= MD2_LOOPBACK;
+
+	switch(port->parity) {
+	case PARITY_CRC16_PR0:	     md0 = MD0_HDLC | MD0_CRC_16_0;  break;
+	case PARITY_CRC16_PR1:	     md0 = MD0_HDLC | MD0_CRC_16;    break;
+	case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
+	case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU;   break;
+	default:		     md0 = MD0_HDLC | MD0_CRC_NONE;
+	}
+
+	sca_out(CMD_RESET, msci + CMD, card);
+	sca_out(md0, msci + MD0, card);
+	sca_out(0x00, msci + MD1, card); /* no address field check */
+	sca_out(md2, msci + MD2, card);
+	sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
+	/* Skip the rest of underrun frame */
+	sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
+	sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
+	sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
+	sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
+	sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
+	sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
+
+/* We're using the following interrupts:
+   - RXINTA (DCD changes only)
+   - DMIB (EOM - single frame transfer complete)
+*/
+	sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
+
+	sca_out(port->tmc, msci + TMCR, card);
+	sca_out(port->tmc, msci + TMCT, card);
+	sca_out(port->rxs, msci + RXS, card);
+	sca_out(port->txs, msci + TXS, card);
+	sca_out(CMD_TX_ENABLE, msci + CMD, card);
+	sca_out(CMD_RX_ENABLE, msci + CMD, card);
+
+	sca_set_carrier(port);
+	enable_intr(port);
+	napi_enable(&port->napi);
+	netif_start_queue(dev);
+}
+
+
+static void sca_close(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+
+	/* reset channel */
+	sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
+	disable_intr(port);
+	napi_disable(&port->napi);
+	netif_stop_queue(dev);
+}
+
+
+static int sca_attach(struct net_device *dev, unsigned short encoding,
+		      unsigned short parity)
+{
+	if (encoding != ENCODING_NRZ &&
+	    encoding != ENCODING_NRZI &&
+	    encoding != ENCODING_FM_MARK &&
+	    encoding != ENCODING_FM_SPACE &&
+	    encoding != ENCODING_MANCHESTER)
+		return -EINVAL;
+
+	if (parity != PARITY_NONE &&
+	    parity != PARITY_CRC16_PR0 &&
+	    parity != PARITY_CRC16_PR1 &&
+	    parity != PARITY_CRC32_PR1_CCITT &&
+	    parity != PARITY_CRC16_PR1_CCITT)
+		return -EINVAL;
+
+	dev_to_port(dev)->encoding = encoding;
+	dev_to_port(dev)->parity = parity;
+	return 0;
+}
+
+
+#ifdef DEBUG_RINGS
+static void sca_dump_rings(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t *card = port->card;
+	u16 cnt;
+
+	printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
+	       sca_inl(get_dmac_rx(port) + CDAL, card),
+	       sca_inl(get_dmac_rx(port) + EDAL, card),
+	       sca_in(DSR_RX(port->chan), card), port->rxin,
+	       sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
+	for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
+		pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+	pr_cont("\n");
+
+	printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+	       "last=%u %sactive",
+	       sca_inl(get_dmac_tx(port) + CDAL, card),
+	       sca_inl(get_dmac_tx(port) + EDAL, card),
+	       sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
+	       sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
+
+	for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
+		pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+	pr_cont("\n");
+
+	printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
+	       " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
+	       sca_in(get_msci(port) + MD0, card),
+	       sca_in(get_msci(port) + MD1, card),
+	       sca_in(get_msci(port) + MD2, card),
+	       sca_in(get_msci(port) + ST0, card),
+	       sca_in(get_msci(port) + ST1, card),
+	       sca_in(get_msci(port) + ST2, card),
+	       sca_in(get_msci(port) + ST3, card),
+	       sca_in(get_msci(port) + ST4, card),
+	       sca_in(get_msci(port) + FST, card),
+	       sca_in(get_msci(port) + CST0, card),
+	       sca_in(get_msci(port) + CST1, card));
+
+	printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
+	       sca_inl(ISR0, card), sca_inl(ISR1, card));
+}
+#endif /* DEBUG_RINGS */
+
+
+static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	card_t *card = port->card;
+	pkt_desc __iomem *desc;
+	u32 buff, len;
+
+	spin_lock_irq(&port->lock);
+
+	desc = desc_address(port, port->txin + 1, 1);
+	BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
+
+#ifdef DEBUG_PKT
+	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+	debug_frame(skb);
+#endif
+
+	desc = desc_address(port, port->txin, 1);
+	buff = buffer_offset(port, port->txin, 1);
+	len = skb->len;
+	memcpy_toio(card->rambase + buff, skb->data, len);
+
+	writew(len, &desc->len);
+	writeb(ST_TX_EOM, &desc->stat);
+
+	port->txin = (port->txin + 1) % card->tx_ring_buffers;
+	sca_outl(desc_offset(port, port->txin, 1),
+		 get_dmac_tx(port) + EDAL, card);
+
+	sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
+
+	desc = desc_address(port, port->txin + 1, 1);
+	if (readb(&desc->stat)) /* allow 1 packet gap */
+		netif_stop_queue(dev);
+
+	spin_unlock_irq(&port->lock);
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
+				    u32 ramsize)
+{
+	/* Round RAM size to 32 bits, fill from end to start */
+	u32 i = ramsize &= ~3;
+
+	do {
+		i -= 4;
+		writel(i ^ 0x12345678, rambase + i);
+	} while (i > 0);
+
+	for (i = 0; i < ramsize ; i += 4) {
+		if (readl(rambase + i) != (i ^ 0x12345678))
+			break;
+	}
+
+	return i;
+}
+
+
+static void __devinit sca_init(card_t *card, int wait_states)
+{
+	sca_out(wait_states, WCRL, card); /* Wait Control */
+	sca_out(wait_states, WCRM, card);
+	sca_out(wait_states, WCRH, card);
+
+	sca_out(0, DMER, card);	/* DMA Master disable */
+	sca_out(0x03, PCR, card); /* DMA priority */
+	sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
+	sca_out(0, DSR_TX(0), card);
+	sca_out(0, DSR_RX(1), card);
+	sca_out(0, DSR_TX(1), card);
+	sca_out(DMER_DME, DMER, card); /* DMA Master enable */
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.h
new file mode 100644
index 0000000..96567c2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hd64572.h
@@ -0,0 +1,527 @@
+/*
+ * hd64572.h	Description of the Hitachi HD64572 (SCA-II), valid for 
+ * 		CPU modes 0 & 2.
+ *
+ * Author:	Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright:   (c) 2000-2001 Cyclades Corp.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ * $Log: hd64572.h,v $
+ * Revision 3.1  2001/06/15 12:41:10  regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1  2001/06/13 20:24:49  daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 1.0 2000/01/25 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef __HD64572_H
+#define __HD64572_H
+
+/* Illegal Access Register */
+#define	ILAR	0x00
+
+/* Wait Controller Registers */
+#define PABR0L	0x20	/* Physical Addr Boundary Register 0 L */
+#define PABR0H	0x21	/* Physical Addr Boundary Register 0 H */
+#define PABR1L	0x22	/* Physical Addr Boundary Register 1 L */
+#define PABR1H	0x23	/* Physical Addr Boundary Register 1 H */
+#define WCRL	0x24	/* Wait Control Register L */
+#define WCRM	0x25	/* Wait Control Register M */
+#define WCRH	0x26	/* Wait Control Register H */
+
+/* Interrupt Registers */
+#define IVR	0x60	/* Interrupt Vector Register */
+#define IMVR	0x64	/* Interrupt Modified Vector Register */
+#define ITCR	0x68	/* Interrupt Control Register */
+#define ISR0	0x6c	/* Interrupt Status Register 0 */
+#define ISR1	0x70	/* Interrupt Status Register 1 */
+#define IER0	0x74	/* Interrupt Enable Register 0 */
+#define IER1	0x78	/* Interrupt Enable Register 1 */
+
+/* Register Access Macros (chan is 0 or 1 in _any_ case) */
+#define	M_REG(reg, chan)	(reg + 0x80*chan)		/* MSCI */
+#define	DRX_REG(reg, chan)	(reg + 0x40*chan)		/* DMA Rx */
+#define	DTX_REG(reg, chan)	(reg + 0x20*(2*chan + 1))	/* DMA Tx */
+#define	TRX_REG(reg, chan)	(reg + 0x20*chan)		/* Timer Rx */
+#define	TTX_REG(reg, chan)	(reg + 0x10*(2*chan + 1))	/* Timer Tx */
+#define	ST_REG(reg, chan)	(reg + 0x80*chan)		/* Status Cnt */
+#define IR0_DRX(val, chan)	((val)<<(8*(chan)))		/* Int DMA Rx */
+#define IR0_DTX(val, chan)	((val)<<(4*(2*chan + 1)))	/* Int DMA Tx */
+#define IR0_M(val, chan)	((val)<<(8*(chan)))		/* Int MSCI */
+
+/* MSCI Channel Registers */
+#define MSCI0_OFFSET 0x00
+#define MSCI1_OFFSET 0x80
+
+#define MD0	0x138	/* Mode reg 0 */
+#define MD1	0x139	/* Mode reg 1 */
+#define MD2	0x13a	/* Mode reg 2 */
+#define MD3	0x13b	/* Mode reg 3 */
+#define CTL	0x130	/* Control reg */
+#define RXS	0x13c	/* RX clock source */
+#define TXS	0x13d	/* TX clock source */
+#define EXS	0x13e	/* External clock input selection */
+#define TMCT	0x144	/* Time constant (Tx) */
+#define TMCR	0x145	/* Time constant (Rx) */
+#define CMD	0x128	/* Command reg */
+#define ST0	0x118	/* Status reg 0 */
+#define ST1	0x119	/* Status reg 1 */
+#define ST2	0x11a	/* Status reg 2 */
+#define ST3	0x11b	/* Status reg 3 */
+#define ST4	0x11c	/* Status reg 4 */
+#define FST	0x11d	/* frame Status reg  */
+#define IE0	0x120	/* Interrupt enable reg 0 */
+#define IE1	0x121	/* Interrupt enable reg 1 */
+#define IE2	0x122	/* Interrupt enable reg 2 */
+#define IE4	0x124	/* Interrupt enable reg 4 */
+#define FIE	0x125	/* Frame Interrupt enable reg  */
+#define SA0	0x140	/* Syn Address reg 0 */
+#define SA1	0x141	/* Syn Address reg 1 */
+#define IDL	0x142	/* Idle register */
+#define TRBL	0x100	/* TX/RX buffer reg L */ 
+#define TRBK	0x101	/* TX/RX buffer reg K */ 
+#define TRBJ	0x102	/* TX/RX buffer reg J */ 
+#define TRBH	0x103	/* TX/RX buffer reg H */ 
+#define TRC0	0x148	/* TX Ready control reg 0 */ 
+#define TRC1	0x149	/* TX Ready control reg 1 */ 
+#define RRC	0x14a	/* RX Ready control reg */ 
+#define CST0	0x108	/* Current Status Register 0 */ 
+#define CST1	0x109	/* Current Status Register 1 */ 
+#define CST2	0x10a	/* Current Status Register 2 */ 
+#define CST3	0x10b	/* Current Status Register 3 */ 
+#define GPO	0x131	/* General Purpose Output Pin Ctl Reg */
+#define TFS	0x14b	/* Tx Start Threshold Ctl Reg */
+#define TFN	0x143	/* Inter-transmit-frame Time Fill Ctl Reg */
+#define TBN	0x110	/* Tx Buffer Number Reg */
+#define RBN	0x111	/* Rx Buffer Number Reg */
+#define TNR0	0x150	/* Tx DMA Request Ctl Reg 0 */
+#define TNR1	0x151	/* Tx DMA Request Ctl Reg 1 */
+#define TCR	0x152	/* Tx DMA Critical Request Reg */
+#define RNR	0x154	/* Rx DMA Request Ctl Reg */
+#define RCR	0x156	/* Rx DMA Critical Request Reg */
+
+/* Timer Registers */
+#define TIMER0RX_OFFSET 0x00
+#define TIMER0TX_OFFSET 0x10
+#define TIMER1RX_OFFSET 0x20
+#define TIMER1TX_OFFSET 0x30
+
+#define TCNTL	0x200	/* Timer Upcounter L */
+#define TCNTH	0x201	/* Timer Upcounter H */
+#define TCONRL	0x204	/* Timer Constant Register L */
+#define TCONRH	0x205	/* Timer Constant Register H */
+#define TCSR	0x206	/* Timer Control/Status Register */
+#define TEPR	0x207	/* Timer Expand Prescale Register */
+
+/* DMA registers */
+#define PCR		0x40		/* DMA priority control reg */
+#define DRR		0x44		/* DMA reset reg */
+#define DMER		0x07		/* DMA Master Enable reg */
+#define BTCR		0x08		/* Burst Tx Ctl Reg */
+#define BOLR		0x0c		/* Back-off Length Reg */
+#define DSR_RX(chan)	(0x48 + 2*chan)	/* DMA Status Reg (Rx) */
+#define DSR_TX(chan)	(0x49 + 2*chan)	/* DMA Status Reg (Tx) */
+#define DIR_RX(chan)	(0x4c + 2*chan)	/* DMA Interrupt Enable Reg (Rx) */
+#define DIR_TX(chan)	(0x4d + 2*chan)	/* DMA Interrupt Enable Reg (Tx) */
+#define FCT_RX(chan)	(0x50 + 2*chan)	/* Frame End Interrupt Counter (Rx) */
+#define FCT_TX(chan)	(0x51 + 2*chan)	/* Frame End Interrupt Counter (Tx) */
+#define DMR_RX(chan)	(0x54 + 2*chan)	/* DMA Mode Reg (Rx) */
+#define DMR_TX(chan)	(0x55 + 2*chan)	/* DMA Mode Reg (Tx) */
+#define DCR_RX(chan)	(0x58 + 2*chan)	/* DMA Command Reg (Rx) */
+#define DCR_TX(chan)	(0x59 + 2*chan)	/* DMA Command Reg (Tx) */
+
+/* DMA Channel Registers */
+#define DMAC0RX_OFFSET 0x00
+#define DMAC0TX_OFFSET 0x20
+#define DMAC1RX_OFFSET 0x40
+#define DMAC1TX_OFFSET 0x60
+
+#define DARL	0x80	/* Dest Addr Register L (single-block, RX only) */
+#define DARH	0x81	/* Dest Addr Register H (single-block, RX only) */
+#define DARB	0x82	/* Dest Addr Register B (single-block, RX only) */
+#define DARBH	0x83	/* Dest Addr Register BH (single-block, RX only) */
+#define SARL	0x80	/* Source Addr Register L (single-block, TX only) */
+#define SARH	0x81	/* Source Addr Register H (single-block, TX only) */
+#define SARB	0x82	/* Source Addr Register B (single-block, TX only) */
+#define DARBH	0x83	/* Source Addr Register BH (single-block, TX only) */
+#define BARL	0x80	/* Buffer Addr Register L (chained-block) */
+#define BARH	0x81	/* Buffer Addr Register H (chained-block) */
+#define BARB	0x82	/* Buffer Addr Register B (chained-block) */
+#define BARBH	0x83	/* Buffer Addr Register BH (chained-block) */
+#define CDAL	0x84	/* Current Descriptor Addr Register L */
+#define CDAH	0x85	/* Current Descriptor Addr Register H */
+#define CDAB	0x86	/* Current Descriptor Addr Register B */
+#define CDABH	0x87	/* Current Descriptor Addr Register BH */
+#define EDAL	0x88	/* Error Descriptor Addr Register L */
+#define EDAH	0x89	/* Error Descriptor Addr Register H */
+#define EDAB	0x8a	/* Error Descriptor Addr Register B */
+#define EDABH	0x8b	/* Error Descriptor Addr Register BH */
+#define BFLL	0x90	/* RX Buffer Length L (only RX) */
+#define BFLH	0x91	/* RX Buffer Length H (only RX) */
+#define BCRL	0x8c	/* Byte Count Register L */
+#define BCRH	0x8d	/* Byte Count Register H */
+
+/* Block Descriptor Structure */
+typedef struct {
+	unsigned long	next;		/* pointer to next block descriptor */
+	unsigned long	ptbuf;		/* buffer pointer */
+	unsigned short	len;		/* data length */
+	unsigned char	status;		/* status */
+	unsigned char	filler[5];	/* alignment filler (16 bytes) */ 
+} pcsca_bd_t;
+
+/* Block Descriptor Structure */
+typedef struct {
+	u32 cp;			/* pointer to next block descriptor */
+	u32 bp;			/* buffer pointer */
+	u16 len;		/* data length */
+	u8 stat;		/* status */
+	u8 unused;		/* pads to 4-byte boundary */
+}pkt_desc;
+
+
+/*
+	Descriptor Status definitions:
+
+	Bit	Transmission	Reception
+
+	7	EOM		EOM
+	6	-		Short Frame
+	5	-		Abort
+	4	-		Residual bit
+	3	Underrun	Overrun	
+	2	-		CRC
+	1	Ownership	Ownership
+	0	EOT		-
+*/
+#define DST_EOT		0x01	/* End of transmit command */
+#define DST_OSB		0x02	/* Ownership bit */
+#define DST_CRC		0x04	/* CRC Error */
+#define DST_OVR		0x08	/* Overrun */
+#define DST_UDR		0x08	/* Underrun */
+#define DST_RBIT	0x10	/* Residual bit */
+#define DST_ABT		0x20	/* Abort */
+#define DST_SHRT	0x40	/* Short Frame  */
+#define DST_EOM		0x80	/* End of Message  */
+
+/* Packet Descriptor Status bits */
+
+#define ST_TX_EOM     0x80	/* End of frame */
+#define ST_TX_UNDRRUN 0x08
+#define ST_TX_OWNRSHP 0x02
+#define ST_TX_EOT     0x01	/* End of transmition */
+
+#define ST_RX_EOM     0x80	/* End of frame */
+#define ST_RX_SHORT   0x40	/* Short frame */
+#define ST_RX_ABORT   0x20	/* Abort */
+#define ST_RX_RESBIT  0x10	/* Residual bit */
+#define ST_RX_OVERRUN 0x08	/* Overrun */
+#define ST_RX_CRC     0x04	/* CRC */
+#define ST_RX_OWNRSHP 0x02
+
+#define ST_ERROR_MASK 0x7C
+
+/* Status Counter Registers */
+#define CMCR	0x158	/* Counter Master Ctl Reg */
+#define TECNTL	0x160	/* Tx EOM Counter L */
+#define TECNTM	0x161	/* Tx EOM Counter M */
+#define TECNTH	0x162	/* Tx EOM Counter H */
+#define TECCR	0x163	/* Tx EOM Counter Ctl Reg */
+#define URCNTL	0x164	/* Underrun Counter L */
+#define URCNTH	0x165	/* Underrun Counter H */
+#define URCCR	0x167	/* Underrun Counter Ctl Reg */
+#define RECNTL	0x168	/* Rx EOM Counter L */
+#define RECNTM	0x169	/* Rx EOM Counter M */
+#define RECNTH	0x16a	/* Rx EOM Counter H */
+#define RECCR	0x16b	/* Rx EOM Counter Ctl Reg */
+#define ORCNTL	0x16c	/* Overrun Counter L */
+#define ORCNTH	0x16d	/* Overrun Counter H */
+#define ORCCR	0x16f	/* Overrun Counter Ctl Reg */
+#define CECNTL	0x170	/* CRC Counter L */
+#define CECNTH	0x171	/* CRC Counter H */
+#define CECCR	0x173	/* CRC Counter Ctl Reg */
+#define ABCNTL	0x174	/* Abort frame Counter L */
+#define ABCNTH	0x175	/* Abort frame Counter H */
+#define ABCCR	0x177	/* Abort frame Counter Ctl Reg */
+#define SHCNTL	0x178	/* Short frame Counter L */
+#define SHCNTH	0x179	/* Short frame Counter H */
+#define SHCCR	0x17b	/* Short frame Counter Ctl Reg */
+#define RSCNTL	0x17c	/* Residual bit Counter L */
+#define RSCNTH	0x17d	/* Residual bit Counter H */
+#define RSCCR	0x17f	/* Residual bit Counter Ctl Reg */
+
+/* Register Programming Constants */
+
+#define IR0_DMIC	0x00000001
+#define IR0_DMIB	0x00000002
+#define IR0_DMIA	0x00000004
+#define IR0_EFT		0x00000008
+#define IR0_DMAREQ	0x00010000
+#define IR0_TXINT	0x00020000
+#define IR0_RXINTB	0x00040000
+#define IR0_RXINTA	0x00080000
+#define IR0_TXRDY	0x00100000
+#define IR0_RXRDY	0x00200000
+
+#define MD0_CRC16_0	0x00
+#define MD0_CRC16_1	0x01
+#define MD0_CRC32	0x02
+#define MD0_CRC_CCITT	0x03
+#define MD0_CRCC0	0x04
+#define MD0_CRCC1	0x08
+#define MD0_AUTO_ENA	0x10
+#define MD0_ASYNC	0x00
+#define MD0_BY_MSYNC	0x20
+#define MD0_BY_BISYNC	0x40
+#define MD0_BY_EXT	0x60
+#define MD0_BIT_SYNC	0x80
+#define MD0_TRANSP	0xc0
+
+#define MD0_HDLC        0x80	/* Bit-sync HDLC mode */
+
+#define MD0_CRC_NONE	0x00
+#define MD0_CRC_16_0	0x04
+#define MD0_CRC_16	0x05
+#define MD0_CRC_ITU32	0x06
+#define MD0_CRC_ITU	0x07
+
+#define MD1_NOADDR	0x00
+#define MD1_SADDR1	0x40
+#define MD1_SADDR2	0x80
+#define MD1_DADDR	0xc0
+
+#define MD2_NRZI_IEEE	0x40
+#define MD2_MANCHESTER	0x80
+#define MD2_FM_MARK	0xA0
+#define MD2_FM_SPACE	0xC0
+#define MD2_LOOPBACK	0x03	/* Local data Loopback */
+
+#define MD2_F_DUPLEX	0x00
+#define MD2_AUTO_ECHO	0x01
+#define MD2_LOOP_HI_Z	0x02
+#define MD2_LOOP_MIR	0x03
+#define MD2_ADPLL_X8	0x00
+#define MD2_ADPLL_X16	0x08
+#define MD2_ADPLL_X32	0x10
+#define MD2_NRZ		0x00
+#define MD2_NRZI	0x20
+#define MD2_NRZ_IEEE	0x40
+#define MD2_MANCH	0x00
+#define MD2_FM1		0x20
+#define MD2_FM0		0x40
+#define MD2_FM		0x80
+
+#define CTL_RTS		0x01
+#define CTL_DTR		0x02
+#define CTL_SYN		0x04
+#define CTL_IDLC	0x10
+#define CTL_UDRNC	0x20
+#define CTL_URSKP	0x40
+#define CTL_URCT	0x80
+
+#define CTL_NORTS	0x01
+#define CTL_NODTR	0x02
+#define CTL_IDLE	0x10
+
+#define	RXS_BR0		0x01
+#define	RXS_BR1		0x02
+#define	RXS_BR2		0x04
+#define	RXS_BR3		0x08
+#define	RXS_ECLK	0x00
+#define	RXS_ECLK_NS	0x20
+#define	RXS_IBRG	0x40
+#define	RXS_PLL1	0x50
+#define	RXS_PLL2	0x60
+#define	RXS_PLL3	0x70
+#define	RXS_DRTXC	0x80
+
+#define	TXS_BR0		0x01
+#define	TXS_BR1		0x02
+#define	TXS_BR2		0x04
+#define	TXS_BR3		0x08
+#define	TXS_ECLK	0x00
+#define	TXS_IBRG	0x40
+#define	TXS_RCLK	0x60
+#define	TXS_DTRXC	0x80
+
+#define	EXS_RES0	0x01
+#define	EXS_RES1	0x02
+#define	EXS_RES2	0x04
+#define	EXS_TES0	0x10
+#define	EXS_TES1	0x20
+#define	EXS_TES2	0x40
+
+#define CLK_BRG_MASK	0x0F
+#define CLK_PIN_OUT	0x80
+#define CLK_LINE    	0x00	/* clock line input */
+#define CLK_BRG     	0x40	/* internal baud rate generator */
+#define CLK_TX_RXCLK	0x60	/* TX clock from RX clock */
+
+#define CMD_RX_RST	0x11
+#define CMD_RX_ENA	0x12
+#define CMD_RX_DIS	0x13
+#define CMD_RX_CRC_INIT	0x14
+#define CMD_RX_MSG_REJ	0x15
+#define CMD_RX_MP_SRCH	0x16
+#define CMD_RX_CRC_EXC	0x17
+#define CMD_RX_CRC_FRC	0x18
+#define CMD_TX_RST	0x01
+#define CMD_TX_ENA	0x02
+#define CMD_TX_DISA	0x03
+#define CMD_TX_CRC_INIT	0x04
+#define CMD_TX_CRC_EXC	0x05
+#define CMD_TX_EOM	0x06
+#define CMD_TX_ABORT	0x07
+#define CMD_TX_MP_ON	0x08
+#define CMD_TX_BUF_CLR	0x09
+#define CMD_TX_DISB	0x0b
+#define CMD_CH_RST	0x21
+#define CMD_SRCH_MODE	0x31
+#define CMD_NOP		0x00
+
+#define CMD_RESET	0x21
+#define CMD_TX_ENABLE	0x02
+#define CMD_RX_ENABLE	0x12
+
+#define ST0_RXRDY	0x01
+#define ST0_TXRDY	0x02
+#define ST0_RXINTB	0x20
+#define ST0_RXINTA	0x40
+#define ST0_TXINT	0x80
+
+#define ST1_IDLE	0x01
+#define ST1_ABORT	0x02
+#define ST1_CDCD	0x04
+#define ST1_CCTS	0x08
+#define ST1_SYN_FLAG	0x10
+#define ST1_CLMD	0x20
+#define ST1_TXIDLE	0x40
+#define ST1_UDRN	0x80
+
+#define ST2_CRCE	0x04
+#define ST2_ONRN	0x08
+#define ST2_RBIT	0x10
+#define ST2_ABORT	0x20
+#define ST2_SHORT	0x40
+#define ST2_EOM		0x80
+
+#define ST3_RX_ENA	0x01
+#define ST3_TX_ENA	0x02
+#define ST3_DCD		0x04
+#define ST3_CTS		0x08
+#define ST3_SRCH_MODE	0x10
+#define ST3_SLOOP	0x20
+#define ST3_GPI		0x80
+
+#define ST4_RDNR	0x01
+#define ST4_RDCR	0x02
+#define ST4_TDNR	0x04
+#define ST4_TDCR	0x08
+#define ST4_OCLM	0x20
+#define ST4_CFT		0x40
+#define ST4_CGPI	0x80
+
+#define FST_CRCEF	0x04
+#define FST_OVRNF	0x08
+#define FST_RBIF	0x10
+#define FST_ABTF	0x20
+#define FST_SHRTF	0x40
+#define FST_EOMF	0x80
+
+#define IE0_RXRDY	0x01
+#define IE0_TXRDY	0x02
+#define IE0_RXINTB	0x20
+#define IE0_RXINTA	0x40
+#define IE0_TXINT	0x80
+#define IE0_UDRN	0x00008000 /* TX underrun MSCI interrupt enable */
+#define IE0_CDCD	0x00000400 /* CD level change interrupt enable */
+
+#define IE1_IDLD	0x01
+#define IE1_ABTD	0x02
+#define IE1_CDCD	0x04
+#define IE1_CCTS	0x08
+#define IE1_SYNCD	0x10
+#define IE1_CLMD	0x20
+#define IE1_IDL		0x40
+#define IE1_UDRN	0x80
+
+#define IE2_CRCE	0x04
+#define IE2_OVRN	0x08
+#define IE2_RBIT	0x10
+#define IE2_ABT		0x20
+#define IE2_SHRT	0x40
+#define IE2_EOM		0x80
+
+#define IE4_RDNR	0x01
+#define IE4_RDCR	0x02
+#define IE4_TDNR	0x04
+#define IE4_TDCR	0x08
+#define IE4_OCLM	0x20
+#define IE4_CFT		0x40
+#define IE4_CGPI	0x80
+
+#define FIE_CRCEF	0x04
+#define FIE_OVRNF	0x08
+#define FIE_RBIF	0x10
+#define FIE_ABTF	0x20
+#define FIE_SHRTF	0x40
+#define FIE_EOMF	0x80
+
+#define DSR_DWE		0x01
+#define DSR_DE		0x02
+#define DSR_REF		0x04
+#define DSR_UDRF	0x04
+#define DSR_COA		0x08
+#define DSR_COF		0x10
+#define DSR_BOF		0x20
+#define DSR_EOM		0x40
+#define DSR_EOT		0x80
+
+#define DIR_REF		0x04
+#define DIR_UDRF	0x04
+#define DIR_COA		0x08
+#define DIR_COF		0x10
+#define DIR_BOF		0x20
+#define DIR_EOM		0x40
+#define DIR_EOT		0x80
+
+#define DIR_REFE	0x04
+#define DIR_UDRFE	0x04
+#define DIR_COAE	0x08
+#define DIR_COFE	0x10
+#define DIR_BOFE	0x20
+#define DIR_EOME	0x40
+#define DIR_EOTE	0x80
+
+#define DMR_CNTE	0x02
+#define DMR_NF		0x04
+#define DMR_SEOME	0x08
+#define DMR_TMOD	0x10
+
+#define DMER_DME        0x80	/* DMA Master Enable */
+
+#define DCR_SW_ABT	0x01
+#define DCR_FCT_CLR	0x02
+
+#define DCR_ABORT	0x01
+#define DCR_CLEAR_EOF	0x02
+
+#define PCR_COTE	0x80
+#define PCR_PR0		0x01
+#define PCR_PR1		0x02
+#define PCR_PR2		0x04
+#define PCR_CCC		0x08
+#define PCR_BRC		0x10
+#define PCR_OSB		0x40
+#define PCR_BURST	0x80
+
+#endif /* (__HD64572_H) */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc.c
new file mode 100644
index 0000000..10cc7df
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc.c
@@ -0,0 +1,385 @@
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Currently supported:
+ *	* raw IP-in-HDLC
+ *	* Cisco HDLC
+ *	* Frame Relay with ANSI or CCITT LMI (both user and network side)
+ *	* PPP
+ *	* X.25
+ *
+ * Use sethdlc utility to set line parameters, protocol and PVCs
+ *
+ * How does it work:
+ * - proto->open(), close(), start(), stop() calls are serialized.
+ *   The order is: open, [ start, stop ... ] close ...
+ * - proto->start() and stop() are called with spin_lock_irq held.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+
+
+static const char* version = "HDLC support module revision 1.22";
+
+#undef DEBUG_LINK
+
+static struct hdlc_proto *first_proto;
+
+int hdlc_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
+		    struct packet_type *p, struct net_device *orig_dev)
+{
+	struct hdlc_device *hdlc = dev_to_hdlc(dev);
+
+	if (!net_eq(dev_net(dev), &init_net)) {
+		kfree_skb(skb);
+		return 0;
+	}
+
+	BUG_ON(!hdlc->proto->netif_rx);
+	return hdlc->proto->netif_rx(skb);
+}
+
+netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+
+	if (hdlc->proto->xmit)
+		return hdlc->proto->xmit(skb, dev);
+
+	return hdlc->xmit(skb, dev); /* call hardware driver directly */
+}
+
+static inline void hdlc_proto_start(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	if (hdlc->proto->start)
+		hdlc->proto->start(dev);
+}
+
+
+
+static inline void hdlc_proto_stop(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	if (hdlc->proto->stop)
+		hdlc->proto->stop(dev);
+}
+
+
+
+static int hdlc_device_event(struct notifier_block *this, unsigned long event,
+			     void *ptr)
+{
+	struct net_device *dev = ptr;
+	hdlc_device *hdlc;
+	unsigned long flags;
+	int on;
+
+	if (!net_eq(dev_net(dev), &init_net))
+		return NOTIFY_DONE;
+
+	if (!(dev->priv_flags & IFF_WAN_HDLC))
+		return NOTIFY_DONE; /* not an HDLC device */
+
+	if (event != NETDEV_CHANGE)
+		return NOTIFY_DONE; /* Only interested in carrier changes */
+
+	on = netif_carrier_ok(dev);
+
+#ifdef DEBUG_LINK
+	printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n",
+	       dev->name, on);
+#endif
+
+	hdlc = dev_to_hdlc(dev);
+	spin_lock_irqsave(&hdlc->state_lock, flags);
+
+	if (hdlc->carrier == on)
+		goto carrier_exit; /* no change in DCD line level */
+
+	hdlc->carrier = on;
+
+	if (!hdlc->open)
+		goto carrier_exit;
+
+	if (hdlc->carrier) {
+		netdev_info(dev, "Carrier detected\n");
+		hdlc_proto_start(dev);
+	} else {
+		netdev_info(dev, "Carrier lost\n");
+		hdlc_proto_stop(dev);
+	}
+
+carrier_exit:
+	spin_unlock_irqrestore(&hdlc->state_lock, flags);
+	return NOTIFY_DONE;
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being opened */
+int hdlc_open(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+	printk(KERN_DEBUG "%s: hdlc_open() carrier %i open %i\n", dev->name,
+	       hdlc->carrier, hdlc->open);
+#endif
+
+	if (hdlc->proto == NULL)
+		return -ENOSYS;	/* no protocol attached */
+
+	if (hdlc->proto->open) {
+		int result = hdlc->proto->open(dev);
+		if (result)
+			return result;
+	}
+
+	spin_lock_irq(&hdlc->state_lock);
+
+	if (hdlc->carrier) {
+		netdev_info(dev, "Carrier detected\n");
+		hdlc_proto_start(dev);
+	} else
+		netdev_info(dev, "No carrier\n");
+
+	hdlc->open = 1;
+
+	spin_unlock_irq(&hdlc->state_lock);
+	return 0;
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being closed */
+void hdlc_close(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+	printk(KERN_DEBUG "%s: hdlc_close() carrier %i open %i\n", dev->name,
+	       hdlc->carrier, hdlc->open);
+#endif
+
+	spin_lock_irq(&hdlc->state_lock);
+
+	hdlc->open = 0;
+	if (hdlc->carrier)
+		hdlc_proto_stop(dev);
+
+	spin_unlock_irq(&hdlc->state_lock);
+
+	if (hdlc->proto->close)
+		hdlc->proto->close(dev);
+}
+
+
+
+int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct hdlc_proto *proto = first_proto;
+	int result;
+
+	if (cmd != SIOCWANDEV)
+		return -EINVAL;
+
+	if (dev_to_hdlc(dev)->proto) {
+		result = dev_to_hdlc(dev)->proto->ioctl(dev, ifr);
+		if (result != -EINVAL)
+			return result;
+	}
+
+	/* Not handled by currently attached protocol (if any) */
+
+	while (proto) {
+		if ((result = proto->ioctl(dev, ifr)) != -EINVAL)
+			return result;
+		proto = proto->next;
+	}
+	return -EINVAL;
+}
+
+static const struct header_ops hdlc_null_ops;
+
+static void hdlc_setup_dev(struct net_device *dev)
+{
+	/* Re-init all variables changed by HDLC protocol drivers,
+	 * including ether_setup() called from hdlc_raw_eth.c.
+	 */
+	dev->flags		 = IFF_POINTOPOINT | IFF_NOARP;
+	dev->priv_flags		 = IFF_WAN_HDLC;
+	dev->mtu		 = HDLC_MAX_MTU;
+	dev->type		 = ARPHRD_RAWHDLC;
+	dev->hard_header_len	 = 16;
+	dev->addr_len		 = 0;
+	dev->header_ops		 = &hdlc_null_ops;
+}
+
+static void hdlc_setup(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+
+	hdlc_setup_dev(dev);
+	hdlc->carrier = 1;
+	hdlc->open = 0;
+	spin_lock_init(&hdlc->state_lock);
+}
+
+struct net_device *alloc_hdlcdev(void *priv)
+{
+	struct net_device *dev;
+	dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
+	if (dev)
+		dev_to_hdlc(dev)->priv = priv;
+	return dev;
+}
+
+void unregister_hdlc_device(struct net_device *dev)
+{
+	rtnl_lock();
+	unregister_netdevice(dev);
+	detach_hdlc_protocol(dev);
+	rtnl_unlock();
+}
+
+
+
+int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
+			 size_t size)
+{
+	detach_hdlc_protocol(dev);
+
+	if (!try_module_get(proto->module))
+		return -ENOSYS;
+
+	if (size)
+		if ((dev_to_hdlc(dev)->state = kmalloc(size,
+						       GFP_KERNEL)) == NULL) {
+			netdev_warn(dev,
+				    "Memory squeeze on hdlc_proto_attach()\n");
+			module_put(proto->module);
+			return -ENOBUFS;
+		}
+	dev_to_hdlc(dev)->proto = proto;
+	return 0;
+}
+
+
+void detach_hdlc_protocol(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+
+	if (hdlc->proto) {
+		if (hdlc->proto->detach)
+			hdlc->proto->detach(dev);
+		module_put(hdlc->proto->module);
+		hdlc->proto = NULL;
+	}
+	kfree(hdlc->state);
+	hdlc->state = NULL;
+	hdlc_setup_dev(dev);
+}
+
+
+void register_hdlc_protocol(struct hdlc_proto *proto)
+{
+	rtnl_lock();
+	proto->next = first_proto;
+	first_proto = proto;
+	rtnl_unlock();
+}
+
+
+void unregister_hdlc_protocol(struct hdlc_proto *proto)
+{
+	struct hdlc_proto **p;
+
+	rtnl_lock();
+	p = &first_proto;
+	while (*p != proto) {
+		BUG_ON(!*p);
+		p = &((*p)->next);
+	}
+	*p = proto->next;
+	rtnl_unlock();
+}
+
+
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("HDLC support module");
+MODULE_LICENSE("GPL v2");
+
+EXPORT_SYMBOL(hdlc_change_mtu);
+EXPORT_SYMBOL(hdlc_start_xmit);
+EXPORT_SYMBOL(hdlc_open);
+EXPORT_SYMBOL(hdlc_close);
+EXPORT_SYMBOL(hdlc_ioctl);
+EXPORT_SYMBOL(alloc_hdlcdev);
+EXPORT_SYMBOL(unregister_hdlc_device);
+EXPORT_SYMBOL(register_hdlc_protocol);
+EXPORT_SYMBOL(unregister_hdlc_protocol);
+EXPORT_SYMBOL(attach_hdlc_protocol);
+EXPORT_SYMBOL(detach_hdlc_protocol);
+
+static struct packet_type hdlc_packet_type __read_mostly = {
+	.type = cpu_to_be16(ETH_P_HDLC),
+	.func = hdlc_rcv,
+};
+
+
+static struct notifier_block hdlc_notifier = {
+	.notifier_call = hdlc_device_event,
+};
+
+
+static int __init hdlc_module_init(void)
+{
+	int result;
+
+	pr_info("%s\n", version);
+	if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
+		return result;
+	dev_add_pack(&hdlc_packet_type);
+	return 0;
+}
+
+
+
+static void __exit hdlc_module_exit(void)
+{
+	dev_remove_pack(&hdlc_packet_type);
+	unregister_netdevice_notifier(&hdlc_notifier);
+}
+
+
+module_init(hdlc_module_init);
+module_exit(hdlc_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_cisco.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_cisco.c
new file mode 100644
index 0000000..3f20808
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_cisco.c
@@ -0,0 +1,408 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Cisco HDLC support
+ *
+ * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+#undef DEBUG_HARD_HEADER
+
+#define CISCO_MULTICAST		0x8F	/* Cisco multicast address */
+#define CISCO_UNICAST		0x0F	/* Cisco unicast address */
+#define CISCO_KEEPALIVE		0x8035	/* Cisco keepalive protocol */
+#define CISCO_SYS_INFO		0x2000	/* Cisco interface/system info */
+#define CISCO_ADDR_REQ		0	/* Cisco address request */
+#define CISCO_ADDR_REPLY	1	/* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ	2	/* Cisco keepalive request */
+
+
+struct hdlc_header {
+	u8 address;
+	u8 control;
+	__be16 protocol;
+}__packed;
+
+
+struct cisco_packet {
+	__be32 type;		/* code */
+	__be32 par1;
+	__be32 par2;
+	__be16 rel;		/* reliability */
+	__be32 time;
+}__packed;
+#define	CISCO_PACKET_LEN	18
+#define	CISCO_BIG_PACKET_LEN	20
+
+
+struct cisco_state {
+	cisco_proto settings;
+
+	struct timer_list timer;
+	spinlock_t lock;
+	unsigned long last_poll;
+	int up;
+	u32 txseq; /* TX sequence number, 0 = none */
+	u32 rxseq; /* RX sequence number */
+};
+
+
+static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+
+static inline struct cisco_state* state(hdlc_device *hdlc)
+{
+	return (struct cisco_state *)hdlc->state;
+}
+
+
+static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
+			     u16 type, const void *daddr, const void *saddr,
+			     unsigned int len)
+{
+	struct hdlc_header *data;
+#ifdef DEBUG_HARD_HEADER
+	printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+#endif
+
+	skb_push(skb, sizeof(struct hdlc_header));
+	data = (struct hdlc_header*)skb->data;
+	if (type == CISCO_KEEPALIVE)
+		data->address = CISCO_MULTICAST;
+	else
+		data->address = CISCO_UNICAST;
+	data->control = 0;
+	data->protocol = htons(type);
+
+	return sizeof(struct hdlc_header);
+}
+
+
+
+static void cisco_keepalive_send(struct net_device *dev, u32 type,
+				 __be32 par1, __be32 par2)
+{
+	struct sk_buff *skb;
+	struct cisco_packet *data;
+
+	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
+			    sizeof(struct cisco_packet));
+	if (!skb) {
+		netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
+		return;
+	}
+	skb_reserve(skb, 4);
+	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
+	data = (struct cisco_packet*)(skb->data + 4);
+
+	data->type = htonl(type);
+	data->par1 = par1;
+	data->par2 = par2;
+	data->rel = cpu_to_be16(0xFFFF);
+	/* we will need do_div here if 1000 % HZ != 0 */
+	data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
+
+	skb_put(skb, sizeof(struct cisco_packet));
+	skb->priority = TC_PRIO_CONTROL;
+	skb->dev = dev;
+	skb_reset_network_header(skb);
+
+	dev_queue_xmit(skb);
+}
+
+
+
+static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hdlc_header *data = (struct hdlc_header*)skb->data;
+
+	if (skb->len < sizeof(struct hdlc_header))
+		return cpu_to_be16(ETH_P_HDLC);
+
+	if (data->address != CISCO_MULTICAST &&
+	    data->address != CISCO_UNICAST)
+		return cpu_to_be16(ETH_P_HDLC);
+
+	switch (data->protocol) {
+	case cpu_to_be16(ETH_P_IP):
+	case cpu_to_be16(ETH_P_IPX):
+	case cpu_to_be16(ETH_P_IPV6):
+		skb_pull(skb, sizeof(struct hdlc_header));
+		return data->protocol;
+	default:
+		return cpu_to_be16(ETH_P_HDLC);
+	}
+}
+
+
+static int cisco_rx(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct cisco_state *st = state(hdlc);
+	struct hdlc_header *data = (struct hdlc_header*)skb->data;
+	struct cisco_packet *cisco_data;
+	struct in_device *in_dev;
+	__be32 addr, mask;
+	u32 ack;
+
+	if (skb->len < sizeof(struct hdlc_header))
+		goto rx_error;
+
+	if (data->address != CISCO_MULTICAST &&
+	    data->address != CISCO_UNICAST)
+		goto rx_error;
+
+	switch (ntohs(data->protocol)) {
+	case CISCO_SYS_INFO:
+		/* Packet is not needed, drop it. */
+		dev_kfree_skb_any(skb);
+		return NET_RX_SUCCESS;
+
+	case CISCO_KEEPALIVE:
+		if ((skb->len != sizeof(struct hdlc_header) +
+		     CISCO_PACKET_LEN) &&
+		    (skb->len != sizeof(struct hdlc_header) +
+		     CISCO_BIG_PACKET_LEN)) {
+			netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
+				    skb->len);
+			goto rx_error;
+		}
+
+		cisco_data = (struct cisco_packet*)(skb->data + sizeof
+						    (struct hdlc_header));
+
+		switch (ntohl (cisco_data->type)) {
+		case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
+			rcu_read_lock();
+			in_dev = __in_dev_get_rcu(dev);
+			addr = 0;
+			mask = ~cpu_to_be32(0); /* is the mask correct? */
+
+			if (in_dev != NULL) {
+				struct in_ifaddr **ifap = &in_dev->ifa_list;
+
+				while (*ifap != NULL) {
+					if (strcmp(dev->name,
+						   (*ifap)->ifa_label) == 0) {
+						addr = (*ifap)->ifa_local;
+						mask = (*ifap)->ifa_mask;
+						break;
+					}
+					ifap = &(*ifap)->ifa_next;
+				}
+
+				cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
+						     addr, mask);
+			}
+			rcu_read_unlock();
+			dev_kfree_skb_any(skb);
+			return NET_RX_SUCCESS;
+
+		case CISCO_ADDR_REPLY:
+			netdev_info(dev, "Unexpected Cisco IP address reply\n");
+			goto rx_error;
+
+		case CISCO_KEEPALIVE_REQ:
+			spin_lock(&st->lock);
+			st->rxseq = ntohl(cisco_data->par1);
+			ack = ntohl(cisco_data->par2);
+			if (ack && (ack == st->txseq ||
+				    /* our current REQ may be in transit */
+				    ack == st->txseq - 1)) {
+				st->last_poll = jiffies;
+				if (!st->up) {
+					u32 sec, min, hrs, days;
+					sec = ntohl(cisco_data->time) / 1000;
+					min = sec / 60; sec -= min * 60;
+					hrs = min / 60; min -= hrs * 60;
+					days = hrs / 24; hrs -= days * 24;
+					netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
+						    days, hrs, min, sec);
+					netif_dormant_off(dev);
+					st->up = 1;
+				}
+			}
+			spin_unlock(&st->lock);
+
+			dev_kfree_skb_any(skb);
+			return NET_RX_SUCCESS;
+		} /* switch (keepalive type) */
+	} /* switch (protocol) */
+
+	netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
+	dev_kfree_skb_any(skb);
+	return NET_RX_DROP;
+
+rx_error:
+	dev->stats.rx_errors++; /* Mark error */
+	dev_kfree_skb_any(skb);
+	return NET_RX_DROP;
+}
+
+
+
+static void cisco_timer(unsigned long arg)
+{
+	struct net_device *dev = (struct net_device *)arg;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct cisco_state *st = state(hdlc);
+
+	spin_lock(&st->lock);
+	if (st->up &&
+	    time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
+		st->up = 0;
+		netdev_info(dev, "Link down\n");
+		netif_dormant_on(dev);
+	}
+
+	cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
+			     htonl(st->rxseq));
+	spin_unlock(&st->lock);
+
+	st->timer.expires = jiffies + st->settings.interval * HZ;
+	st->timer.function = cisco_timer;
+	st->timer.data = arg;
+	add_timer(&st->timer);
+}
+
+
+
+static void cisco_start(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct cisco_state *st = state(hdlc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&st->lock, flags);
+	st->up = st->txseq = st->rxseq = 0;
+	spin_unlock_irqrestore(&st->lock, flags);
+
+	init_timer(&st->timer);
+	st->timer.expires = jiffies + HZ; /* First poll after 1 s */
+	st->timer.function = cisco_timer;
+	st->timer.data = (unsigned long)dev;
+	add_timer(&st->timer);
+}
+
+
+
+static void cisco_stop(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct cisco_state *st = state(hdlc);
+	unsigned long flags;
+
+	del_timer_sync(&st->timer);
+
+	spin_lock_irqsave(&st->lock, flags);
+	netif_dormant_on(dev);
+	st->up = st->txseq = 0;
+	spin_unlock_irqrestore(&st->lock, flags);
+}
+
+
+static struct hdlc_proto proto = {
+	.start		= cisco_start,
+	.stop		= cisco_stop,
+	.type_trans	= cisco_type_trans,
+	.ioctl		= cisco_ioctl,
+	.netif_rx	= cisco_rx,
+	.module		= THIS_MODULE,
+};
+
+static const struct header_ops cisco_header_ops = {
+	.create = cisco_hard_header,
+};
+
+static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
+	const size_t size = sizeof(cisco_proto);
+	cisco_proto new_settings;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	int result;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto)
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_CISCO;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_PROTO_CISCO:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		if (copy_from_user(&new_settings, cisco_s, size))
+			return -EFAULT;
+
+		if (new_settings.interval < 1 ||
+		    new_settings.timeout < 2)
+			return -EINVAL;
+
+		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+		if (result)
+			return result;
+
+		result = attach_hdlc_protocol(dev, &proto,
+					      sizeof(struct cisco_state));
+		if (result)
+			return result;
+
+		memcpy(&state(hdlc)->settings, &new_settings, size);
+		spin_lock_init(&state(hdlc)->lock);
+		dev->header_ops = &cisco_header_ops;
+		dev->type = ARPHRD_CISCO;
+		netif_dormant_on(dev);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_fr.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_fr.c
new file mode 100644
index 0000000..7c6cb4f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_fr.c
@@ -0,0 +1,1295 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Frame Relay support
+ *
+ * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+
+            Theory of PVC state
+
+ DCE mode:
+
+ (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
+         0,x -> 1,1 if "link reliable" when sending FULL STATUS
+         1,1 -> 1,0 if received FULL STATUS ACK
+
+ (active)    -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
+             -> 1 when "PVC up" and (exist,new) = 1,0
+
+ DTE mode:
+ (exist,new,active) = FULL STATUS if "link reliable"
+		    = 0, 0, 0 if "link unreliable"
+ No LMI:
+ active = open and "link reliable"
+ exist = new = not used
+
+ CCITT LMI: ITU-T Q.933 Annex A
+ ANSI LMI: ANSI T1.617 Annex D
+ CISCO LMI: the original, aka "Gang of Four" LMI
+
+*/
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#undef DEBUG_PKT
+#undef DEBUG_ECN
+#undef DEBUG_LINK
+#undef DEBUG_PROTO
+#undef DEBUG_PVC
+
+#define FR_UI			0x03
+#define FR_PAD			0x00
+
+#define NLPID_IP		0xCC
+#define NLPID_IPV6		0x8E
+#define NLPID_SNAP		0x80
+#define NLPID_PAD		0x00
+#define NLPID_CCITT_ANSI_LMI	0x08
+#define NLPID_CISCO_LMI		0x09
+
+
+#define LMI_CCITT_ANSI_DLCI	   0 /* LMI DLCI */
+#define LMI_CISCO_DLCI		1023
+
+#define LMI_CALLREF		0x00 /* Call Reference */
+#define LMI_ANSI_LOCKSHIFT	0x95 /* ANSI locking shift */
+#define LMI_ANSI_CISCO_REPTYPE	0x01 /* report type */
+#define LMI_CCITT_REPTYPE	0x51
+#define LMI_ANSI_CISCO_ALIVE	0x03 /* keep alive */
+#define LMI_CCITT_ALIVE		0x53
+#define LMI_ANSI_CISCO_PVCSTAT	0x07 /* PVC status */
+#define LMI_CCITT_PVCSTAT	0x57
+
+#define LMI_FULLREP		0x00 /* full report  */
+#define LMI_INTEGRITY		0x01 /* link integrity report */
+#define LMI_SINGLE		0x02 /* single PVC report */
+
+#define LMI_STATUS_ENQUIRY      0x75
+#define LMI_STATUS              0x7D /* reply */
+
+#define LMI_REPT_LEN               1 /* report type element length */
+#define LMI_INTEG_LEN              2 /* link integrity element length */
+
+#define LMI_CCITT_CISCO_LENGTH	  13 /* LMI frame lengths */
+#define LMI_ANSI_LENGTH		  14
+
+
+typedef struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned ea1:	1;
+	unsigned cr:	1;
+	unsigned dlcih:	6;
+
+	unsigned ea2:	1;
+	unsigned de:	1;
+	unsigned becn:	1;
+	unsigned fecn:	1;
+	unsigned dlcil:	4;
+#else
+	unsigned dlcih:	6;
+	unsigned cr:	1;
+	unsigned ea1:	1;
+
+	unsigned dlcil:	4;
+	unsigned fecn:	1;
+	unsigned becn:	1;
+	unsigned de:	1;
+	unsigned ea2:	1;
+#endif
+}__packed fr_hdr;
+
+
+typedef struct pvc_device_struct {
+	struct net_device *frad;
+	struct net_device *main;
+	struct net_device *ether;	/* bridged Ethernet interface	*/
+	struct pvc_device_struct *next;	/* Sorted in ascending DLCI order */
+	int dlci;
+	int open_count;
+
+	struct {
+		unsigned int new: 1;
+		unsigned int active: 1;
+		unsigned int exist: 1;
+		unsigned int deleted: 1;
+		unsigned int fecn: 1;
+		unsigned int becn: 1;
+		unsigned int bandwidth;	/* Cisco LMI reporting only */
+	}state;
+}pvc_device;
+
+struct frad_state {
+	fr_proto settings;
+	pvc_device *first_pvc;
+	int dce_pvc_count;
+
+	struct timer_list timer;
+	unsigned long last_poll;
+	int reliable;
+	int dce_changed;
+	int request;
+	int fullrep_sent;
+	u32 last_errors; /* last errors bit list */
+	u8 n391cnt;
+	u8 txseq; /* TX sequence number */
+	u8 rxseq; /* RX sequence number */
+};
+
+
+static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+
+static inline u16 q922_to_dlci(u8 *hdr)
+{
+	return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
+}
+
+
+static inline void dlci_to_q922(u8 *hdr, u16 dlci)
+{
+	hdr[0] = (dlci >> 2) & 0xFC;
+	hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
+}
+
+
+static inline struct frad_state* state(hdlc_device *hdlc)
+{
+	return(struct frad_state *)(hdlc->state);
+}
+
+
+static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
+{
+	pvc_device *pvc = state(hdlc)->first_pvc;
+
+	while (pvc) {
+		if (pvc->dlci == dlci)
+			return pvc;
+		if (pvc->dlci > dlci)
+			return NULL; /* the list is sorted */
+		pvc = pvc->next;
+	}
+
+	return NULL;
+}
+
+
+static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
+
+	while (*pvc_p) {
+		if ((*pvc_p)->dlci == dlci)
+			return *pvc_p;
+		if ((*pvc_p)->dlci > dlci)
+			break;	/* the list is sorted */
+		pvc_p = &(*pvc_p)->next;
+	}
+
+	pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
+#ifdef DEBUG_PVC
+	printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
+#endif
+	if (!pvc)
+		return NULL;
+
+	pvc->dlci = dlci;
+	pvc->frad = dev;
+	pvc->next = *pvc_p;	/* Put it in the chain */
+	*pvc_p = pvc;
+	return pvc;
+}
+
+
+static inline int pvc_is_used(pvc_device *pvc)
+{
+	return pvc->main || pvc->ether;
+}
+
+
+static inline void pvc_carrier(int on, pvc_device *pvc)
+{
+	if (on) {
+		if (pvc->main)
+			if (!netif_carrier_ok(pvc->main))
+				netif_carrier_on(pvc->main);
+		if (pvc->ether)
+			if (!netif_carrier_ok(pvc->ether))
+				netif_carrier_on(pvc->ether);
+	} else {
+		if (pvc->main)
+			if (netif_carrier_ok(pvc->main))
+				netif_carrier_off(pvc->main);
+		if (pvc->ether)
+			if (netif_carrier_ok(pvc->ether))
+				netif_carrier_off(pvc->ether);
+	}
+}
+
+
+static inline void delete_unused_pvcs(hdlc_device *hdlc)
+{
+	pvc_device **pvc_p = &state(hdlc)->first_pvc;
+
+	while (*pvc_p) {
+		if (!pvc_is_used(*pvc_p)) {
+			pvc_device *pvc = *pvc_p;
+#ifdef DEBUG_PVC
+			printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
+#endif
+			*pvc_p = pvc->next;
+			kfree(pvc);
+			continue;
+		}
+		pvc_p = &(*pvc_p)->next;
+	}
+}
+
+
+static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
+{
+	if (type == ARPHRD_ETHER)
+		return &pvc->ether;
+	else
+		return &pvc->main;
+}
+
+
+static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+{
+	u16 head_len;
+	struct sk_buff *skb = *skb_p;
+
+	switch (skb->protocol) {
+	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
+		head_len = 4;
+		skb_push(skb, head_len);
+		skb->data[3] = NLPID_CCITT_ANSI_LMI;
+		break;
+
+	case cpu_to_be16(NLPID_CISCO_LMI):
+		head_len = 4;
+		skb_push(skb, head_len);
+		skb->data[3] = NLPID_CISCO_LMI;
+		break;
+
+	case cpu_to_be16(ETH_P_IP):
+		head_len = 4;
+		skb_push(skb, head_len);
+		skb->data[3] = NLPID_IP;
+		break;
+
+	case cpu_to_be16(ETH_P_IPV6):
+		head_len = 4;
+		skb_push(skb, head_len);
+		skb->data[3] = NLPID_IPV6;
+		break;
+
+	case cpu_to_be16(ETH_P_802_3):
+		head_len = 10;
+		if (skb_headroom(skb) < head_len) {
+			struct sk_buff *skb2 = skb_realloc_headroom(skb,
+								    head_len);
+			if (!skb2)
+				return -ENOBUFS;
+			dev_kfree_skb(skb);
+			skb = *skb_p = skb2;
+		}
+		skb_push(skb, head_len);
+		skb->data[3] = FR_PAD;
+		skb->data[4] = NLPID_SNAP;
+		skb->data[5] = FR_PAD;
+		skb->data[6] = 0x80;
+		skb->data[7] = 0xC2;
+		skb->data[8] = 0x00;
+		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
+		break;
+
+	default:
+		head_len = 10;
+		skb_push(skb, head_len);
+		skb->data[3] = FR_PAD;
+		skb->data[4] = NLPID_SNAP;
+		skb->data[5] = FR_PAD;
+		skb->data[6] = FR_PAD;
+		skb->data[7] = FR_PAD;
+		*(__be16*)(skb->data + 8) = skb->protocol;
+	}
+
+	dlci_to_q922(skb->data, dlci);
+	skb->data[2] = FR_UI;
+	return 0;
+}
+
+
+
+static int pvc_open(struct net_device *dev)
+{
+	pvc_device *pvc = dev->ml_priv;
+
+	if ((pvc->frad->flags & IFF_UP) == 0)
+		return -EIO;  /* Frad must be UP in order to activate PVC */
+
+	if (pvc->open_count++ == 0) {
+		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+		if (state(hdlc)->settings.lmi == LMI_NONE)
+			pvc->state.active = netif_carrier_ok(pvc->frad);
+
+		pvc_carrier(pvc->state.active, pvc);
+		state(hdlc)->dce_changed = 1;
+	}
+	return 0;
+}
+
+
+
+static int pvc_close(struct net_device *dev)
+{
+	pvc_device *pvc = dev->ml_priv;
+
+	if (--pvc->open_count == 0) {
+		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+		if (state(hdlc)->settings.lmi == LMI_NONE)
+			pvc->state.active = 0;
+
+		if (state(hdlc)->settings.dce) {
+			state(hdlc)->dce_changed = 1;
+			pvc->state.active = 0;
+		}
+	}
+	return 0;
+}
+
+
+
+static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	pvc_device *pvc = dev->ml_priv;
+	fr_proto_pvc_info info;
+
+	if (ifr->ifr_settings.type == IF_GET_PROTO) {
+		if (dev->type == ARPHRD_ETHER)
+			ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
+		else
+			ifr->ifr_settings.type = IF_PROTO_FR_PVC;
+
+		if (ifr->ifr_settings.size < sizeof(info)) {
+			/* data size wanted */
+			ifr->ifr_settings.size = sizeof(info);
+			return -ENOBUFS;
+		}
+
+		info.dlci = pvc->dlci;
+		memcpy(info.master, pvc->frad->name, IFNAMSIZ);
+		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
+				 &info, sizeof(info)))
+			return -EFAULT;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	pvc_device *pvc = dev->ml_priv;
+
+	if (pvc->state.active) {
+		if (dev->type == ARPHRD_ETHER) {
+			int pad = ETH_ZLEN - skb->len;
+			if (pad > 0) { /* Pad the frame with zeros */
+				int len = skb->len;
+				if (skb_tailroom(skb) < pad)
+					if (pskb_expand_head(skb, 0, pad,
+							     GFP_ATOMIC)) {
+						dev->stats.tx_dropped++;
+						dev_kfree_skb(skb);
+						return NETDEV_TX_OK;
+					}
+				skb_put(skb, pad);
+				memset(skb->data + len, 0, pad);
+			}
+			skb->protocol = cpu_to_be16(ETH_P_802_3);
+		}
+		if (!fr_hard_header(&skb, pvc->dlci)) {
+			dev->stats.tx_bytes += skb->len;
+			dev->stats.tx_packets++;
+			if (pvc->state.fecn) /* TX Congestion counter */
+				dev->stats.tx_compressed++;
+			skb->dev = pvc->frad;
+			dev_queue_xmit(skb);
+			return NETDEV_TX_OK;
+		}
+	}
+
+	dev->stats.tx_dropped++;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static inline void fr_log_dlci_active(pvc_device *pvc)
+{
+	netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
+		    pvc->dlci,
+		    pvc->main ? pvc->main->name : "",
+		    pvc->main && pvc->ether ? " " : "",
+		    pvc->ether ? pvc->ether->name : "",
+		    pvc->state.new ? " new" : "",
+		    !pvc->state.exist ? "deleted" :
+		    pvc->state.active ? "active" : "inactive");
+}
+
+
+
+static inline u8 fr_lmi_nextseq(u8 x)
+{
+	x++;
+	return x ? x : 1;
+}
+
+
+static void fr_lmi_send(struct net_device *dev, int fullrep)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct sk_buff *skb;
+	pvc_device *pvc = state(hdlc)->first_pvc;
+	int lmi = state(hdlc)->settings.lmi;
+	int dce = state(hdlc)->settings.dce;
+	int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
+	int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
+	u8 *data;
+	int i = 0;
+
+	if (dce && fullrep) {
+		len += state(hdlc)->dce_pvc_count * (2 + stat_len);
+		if (len > HDLC_MAX_MRU) {
+			netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
+			return;
+		}
+	}
+
+	skb = dev_alloc_skb(len);
+	if (!skb) {
+		netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
+		return;
+	}
+	memset(skb->data, 0, len);
+	skb_reserve(skb, 4);
+	if (lmi == LMI_CISCO) {
+		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
+		fr_hard_header(&skb, LMI_CISCO_DLCI);
+	} else {
+		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
+		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+	}
+	data = skb_tail_pointer(skb);
+	data[i++] = LMI_CALLREF;
+	data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
+	if (lmi == LMI_ANSI)
+		data[i++] = LMI_ANSI_LOCKSHIFT;
+	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
+		LMI_ANSI_CISCO_REPTYPE;
+	data[i++] = LMI_REPT_LEN;
+	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
+	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
+	data[i++] = LMI_INTEG_LEN;
+	data[i++] = state(hdlc)->txseq =
+		fr_lmi_nextseq(state(hdlc)->txseq);
+	data[i++] = state(hdlc)->rxseq;
+
+	if (dce && fullrep) {
+		while (pvc) {
+			data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
+				LMI_ANSI_CISCO_PVCSTAT;
+			data[i++] = stat_len;
+
+			/* LMI start/restart */
+			if (state(hdlc)->reliable && !pvc->state.exist) {
+				pvc->state.exist = pvc->state.new = 1;
+				fr_log_dlci_active(pvc);
+			}
+
+			/* ifconfig PVC up */
+			if (pvc->open_count && !pvc->state.active &&
+			    pvc->state.exist && !pvc->state.new) {
+				pvc_carrier(1, pvc);
+				pvc->state.active = 1;
+				fr_log_dlci_active(pvc);
+			}
+
+			if (lmi == LMI_CISCO) {
+				data[i] = pvc->dlci >> 8;
+				data[i + 1] = pvc->dlci & 0xFF;
+			} else {
+				data[i] = (pvc->dlci >> 4) & 0x3F;
+				data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
+				data[i + 2] = 0x80;
+			}
+
+			if (pvc->state.new)
+				data[i + 2] |= 0x08;
+			else if (pvc->state.active)
+				data[i + 2] |= 0x02;
+
+			i += stat_len;
+			pvc = pvc->next;
+		}
+	}
+
+	skb_put(skb, i);
+	skb->priority = TC_PRIO_CONTROL;
+	skb->dev = dev;
+	skb_reset_network_header(skb);
+
+	dev_queue_xmit(skb);
+}
+
+
+
+static void fr_set_link_state(int reliable, struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	pvc_device *pvc = state(hdlc)->first_pvc;
+
+	state(hdlc)->reliable = reliable;
+	if (reliable) {
+		netif_dormant_off(dev);
+		state(hdlc)->n391cnt = 0; /* Request full status */
+		state(hdlc)->dce_changed = 1;
+
+		if (state(hdlc)->settings.lmi == LMI_NONE) {
+			while (pvc) {	/* Activate all PVCs */
+				pvc_carrier(1, pvc);
+				pvc->state.exist = pvc->state.active = 1;
+				pvc->state.new = 0;
+				pvc = pvc->next;
+			}
+		}
+	} else {
+		netif_dormant_on(dev);
+		while (pvc) {		/* Deactivate all PVCs */
+			pvc_carrier(0, pvc);
+			pvc->state.exist = pvc->state.active = 0;
+			pvc->state.new = 0;
+			if (!state(hdlc)->settings.dce)
+				pvc->state.bandwidth = 0;
+			pvc = pvc->next;
+		}
+	}
+}
+
+
+static void fr_timer(unsigned long arg)
+{
+	struct net_device *dev = (struct net_device *)arg;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	int i, cnt = 0, reliable;
+	u32 list;
+
+	if (state(hdlc)->settings.dce) {
+		reliable = state(hdlc)->request &&
+			time_before(jiffies, state(hdlc)->last_poll +
+				    state(hdlc)->settings.t392 * HZ);
+		state(hdlc)->request = 0;
+	} else {
+		state(hdlc)->last_errors <<= 1; /* Shift the list */
+		if (state(hdlc)->request) {
+			if (state(hdlc)->reliable)
+				netdev_info(dev, "No LMI status reply received\n");
+			state(hdlc)->last_errors |= 1;
+		}
+
+		list = state(hdlc)->last_errors;
+		for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
+			cnt += (list & 1);	/* errors count */
+
+		reliable = (cnt < state(hdlc)->settings.n392);
+	}
+
+	if (state(hdlc)->reliable != reliable) {
+		netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
+		fr_set_link_state(reliable, dev);
+	}
+
+	if (state(hdlc)->settings.dce)
+		state(hdlc)->timer.expires = jiffies +
+			state(hdlc)->settings.t392 * HZ;
+	else {
+		if (state(hdlc)->n391cnt)
+			state(hdlc)->n391cnt--;
+
+		fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
+
+		state(hdlc)->last_poll = jiffies;
+		state(hdlc)->request = 1;
+		state(hdlc)->timer.expires = jiffies +
+			state(hdlc)->settings.t391 * HZ;
+	}
+
+	state(hdlc)->timer.function = fr_timer;
+	state(hdlc)->timer.data = arg;
+	add_timer(&state(hdlc)->timer);
+}
+
+
+static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	pvc_device *pvc;
+	u8 rxseq, txseq;
+	int lmi = state(hdlc)->settings.lmi;
+	int dce = state(hdlc)->settings.dce;
+	int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
+
+	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
+			LMI_CCITT_CISCO_LENGTH)) {
+		netdev_info(dev, "Short LMI frame\n");
+		return 1;
+	}
+
+	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
+			     NLPID_CCITT_ANSI_LMI)) {
+		netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
+		return 1;
+	}
+
+	if (skb->data[4] != LMI_CALLREF) {
+		netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
+			    skb->data[4]);
+		return 1;
+	}
+
+	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
+		netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
+			    skb->data[5]);
+		return 1;
+	}
+
+	if (lmi == LMI_ANSI) {
+		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
+			netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
+				    skb->data[6]);
+			return 1;
+		}
+		i = 7;
+	} else
+		i = 6;
+
+	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
+			     LMI_ANSI_CISCO_REPTYPE)) {
+		netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
+			    skb->data[i]);
+		return 1;
+	}
+
+	if (skb->data[++i] != LMI_REPT_LEN) {
+		netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
+			    skb->data[i]);
+		return 1;
+	}
+
+	reptype = skb->data[++i];
+	if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
+		netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
+			    reptype);
+		return 1;
+	}
+
+	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
+			       LMI_ANSI_CISCO_ALIVE)) {
+		netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
+			    skb->data[i]);
+		return 1;
+	}
+
+	if (skb->data[++i] != LMI_INTEG_LEN) {
+		netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
+			    skb->data[i]);
+		return 1;
+	}
+	i++;
+
+	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
+	rxseq = skb->data[i++];	/* Should confirm our sequence */
+
+	txseq = state(hdlc)->txseq;
+
+	if (dce)
+		state(hdlc)->last_poll = jiffies;
+
+	error = 0;
+	if (!state(hdlc)->reliable)
+		error = 1;
+
+	if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
+		state(hdlc)->n391cnt = 0;
+		error = 1;
+	}
+
+	if (dce) {
+		if (state(hdlc)->fullrep_sent && !error) {
+/* Stop sending full report - the last one has been confirmed by DTE */
+			state(hdlc)->fullrep_sent = 0;
+			pvc = state(hdlc)->first_pvc;
+			while (pvc) {
+				if (pvc->state.new) {
+					pvc->state.new = 0;
+
+/* Tell DTE that new PVC is now active */
+					state(hdlc)->dce_changed = 1;
+				}
+				pvc = pvc->next;
+			}
+		}
+
+		if (state(hdlc)->dce_changed) {
+			reptype = LMI_FULLREP;
+			state(hdlc)->fullrep_sent = 1;
+			state(hdlc)->dce_changed = 0;
+		}
+
+		state(hdlc)->request = 1; /* got request */
+		fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
+		return 0;
+	}
+
+	/* DTE */
+
+	state(hdlc)->request = 0; /* got response, no request pending */
+
+	if (error)
+		return 0;
+
+	if (reptype != LMI_FULLREP)
+		return 0;
+
+	pvc = state(hdlc)->first_pvc;
+
+	while (pvc) {
+		pvc->state.deleted = 1;
+		pvc = pvc->next;
+	}
+
+	no_ram = 0;
+	while (skb->len >= i + 2 + stat_len) {
+		u16 dlci;
+		u32 bw;
+		unsigned int active, new;
+
+		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
+				       LMI_ANSI_CISCO_PVCSTAT)) {
+			netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
+				    skb->data[i]);
+			return 1;
+		}
+
+		if (skb->data[++i] != stat_len) {
+			netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
+				    skb->data[i]);
+			return 1;
+		}
+		i++;
+
+		new = !! (skb->data[i + 2] & 0x08);
+		active = !! (skb->data[i + 2] & 0x02);
+		if (lmi == LMI_CISCO) {
+			dlci = (skb->data[i] << 8) | skb->data[i + 1];
+			bw = (skb->data[i + 3] << 16) |
+				(skb->data[i + 4] << 8) |
+				(skb->data[i + 5]);
+		} else {
+			dlci = ((skb->data[i] & 0x3F) << 4) |
+				((skb->data[i + 1] & 0x78) >> 3);
+			bw = 0;
+		}
+
+		pvc = add_pvc(dev, dlci);
+
+		if (!pvc && !no_ram) {
+			netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
+			no_ram = 1;
+		}
+
+		if (pvc) {
+			pvc->state.exist = 1;
+			pvc->state.deleted = 0;
+			if (active != pvc->state.active ||
+			    new != pvc->state.new ||
+			    bw != pvc->state.bandwidth ||
+			    !pvc->state.exist) {
+				pvc->state.new = new;
+				pvc->state.active = active;
+				pvc->state.bandwidth = bw;
+				pvc_carrier(active, pvc);
+				fr_log_dlci_active(pvc);
+			}
+		}
+
+		i += stat_len;
+	}
+
+	pvc = state(hdlc)->first_pvc;
+
+	while (pvc) {
+		if (pvc->state.deleted && pvc->state.exist) {
+			pvc_carrier(0, pvc);
+			pvc->state.active = pvc->state.new = 0;
+			pvc->state.exist = 0;
+			pvc->state.bandwidth = 0;
+			fr_log_dlci_active(pvc);
+		}
+		pvc = pvc->next;
+	}
+
+	/* Next full report after N391 polls */
+	state(hdlc)->n391cnt = state(hdlc)->settings.n391;
+
+	return 0;
+}
+
+
+static int fr_rx(struct sk_buff *skb)
+{
+	struct net_device *frad = skb->dev;
+	hdlc_device *hdlc = dev_to_hdlc(frad);
+	fr_hdr *fh = (fr_hdr*)skb->data;
+	u8 *data = skb->data;
+	u16 dlci;
+	pvc_device *pvc;
+	struct net_device *dev = NULL;
+
+	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+		goto rx_error;
+
+	dlci = q922_to_dlci(skb->data);
+
+	if ((dlci == LMI_CCITT_ANSI_DLCI &&
+	     (state(hdlc)->settings.lmi == LMI_ANSI ||
+	      state(hdlc)->settings.lmi == LMI_CCITT)) ||
+	    (dlci == LMI_CISCO_DLCI &&
+	     state(hdlc)->settings.lmi == LMI_CISCO)) {
+		if (fr_lmi_recv(frad, skb))
+			goto rx_error;
+		dev_kfree_skb_any(skb);
+		return NET_RX_SUCCESS;
+	}
+
+	pvc = find_pvc(hdlc, dlci);
+	if (!pvc) {
+#ifdef DEBUG_PKT
+		netdev_info(frad, "No PVC for received frame's DLCI %d\n",
+			    dlci);
+#endif
+		dev_kfree_skb_any(skb);
+		return NET_RX_DROP;
+	}
+
+	if (pvc->state.fecn != fh->fecn) {
+#ifdef DEBUG_ECN
+		printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
+		       dlci, fh->fecn ? "N" : "FF");
+#endif
+		pvc->state.fecn ^= 1;
+	}
+
+	if (pvc->state.becn != fh->becn) {
+#ifdef DEBUG_ECN
+		printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
+		       dlci, fh->becn ? "N" : "FF");
+#endif
+		pvc->state.becn ^= 1;
+	}
+
+
+	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+		frad->stats.rx_dropped++;
+		return NET_RX_DROP;
+	}
+
+	if (data[3] == NLPID_IP) {
+		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+		dev = pvc->main;
+		skb->protocol = htons(ETH_P_IP);
+
+	} else if (data[3] == NLPID_IPV6) {
+		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+		dev = pvc->main;
+		skb->protocol = htons(ETH_P_IPV6);
+
+	} else if (skb->len > 10 && data[3] == FR_PAD &&
+		   data[4] == NLPID_SNAP && data[5] == FR_PAD) {
+		u16 oui = ntohs(*(__be16*)(data + 6));
+		u16 pid = ntohs(*(__be16*)(data + 8));
+		skb_pull(skb, 10);
+
+		switch ((((u32)oui) << 16) | pid) {
+		case ETH_P_ARP: /* routed frame with SNAP */
+		case ETH_P_IPX:
+		case ETH_P_IP:	/* a long variant */
+		case ETH_P_IPV6:
+			dev = pvc->main;
+			skb->protocol = htons(pid);
+			break;
+
+		case 0x80C20007: /* bridged Ethernet frame */
+			if ((dev = pvc->ether) != NULL)
+				skb->protocol = eth_type_trans(skb, dev);
+			break;
+
+		default:
+			netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
+				    oui, pid);
+			dev_kfree_skb_any(skb);
+			return NET_RX_DROP;
+		}
+	} else {
+		netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
+			    data[3], skb->len);
+		dev_kfree_skb_any(skb);
+		return NET_RX_DROP;
+	}
+
+	if (dev) {
+		dev->stats.rx_packets++; /* PVC traffic */
+		dev->stats.rx_bytes += skb->len;
+		if (pvc->state.becn)
+			dev->stats.rx_compressed++;
+		skb->dev = dev;
+		netif_rx(skb);
+		return NET_RX_SUCCESS;
+	} else {
+		dev_kfree_skb_any(skb);
+		return NET_RX_DROP;
+	}
+
+ rx_error:
+	frad->stats.rx_errors++; /* Mark error */
+	dev_kfree_skb_any(skb);
+	return NET_RX_DROP;
+}
+
+
+
+static void fr_start(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+	printk(KERN_DEBUG "fr_start\n");
+#endif
+	if (state(hdlc)->settings.lmi != LMI_NONE) {
+		state(hdlc)->reliable = 0;
+		state(hdlc)->dce_changed = 1;
+		state(hdlc)->request = 0;
+		state(hdlc)->fullrep_sent = 0;
+		state(hdlc)->last_errors = 0xFFFFFFFF;
+		state(hdlc)->n391cnt = 0;
+		state(hdlc)->txseq = state(hdlc)->rxseq = 0;
+
+		init_timer(&state(hdlc)->timer);
+		/* First poll after 1 s */
+		state(hdlc)->timer.expires = jiffies + HZ;
+		state(hdlc)->timer.function = fr_timer;
+		state(hdlc)->timer.data = (unsigned long)dev;
+		add_timer(&state(hdlc)->timer);
+	} else
+		fr_set_link_state(1, dev);
+}
+
+
+static void fr_stop(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+	printk(KERN_DEBUG "fr_stop\n");
+#endif
+	if (state(hdlc)->settings.lmi != LMI_NONE)
+		del_timer_sync(&state(hdlc)->timer);
+	fr_set_link_state(0, dev);
+}
+
+
+static void fr_close(struct net_device *dev)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	pvc_device *pvc = state(hdlc)->first_pvc;
+
+	while (pvc) {		/* Shutdown all PVCs for this FRAD */
+		if (pvc->main)
+			dev_close(pvc->main);
+		if (pvc->ether)
+			dev_close(pvc->ether);
+		pvc = pvc->next;
+	}
+}
+
+
+static void pvc_setup(struct net_device *dev)
+{
+	dev->type = ARPHRD_DLCI;
+	dev->flags = IFF_POINTOPOINT;
+	dev->hard_header_len = 10;
+	dev->addr_len = 2;
+	dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static const struct net_device_ops pvc_ops = {
+	.ndo_open       = pvc_open,
+	.ndo_stop       = pvc_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = pvc_xmit,
+	.ndo_do_ioctl   = pvc_ioctl,
+};
+
+static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
+{
+	hdlc_device *hdlc = dev_to_hdlc(frad);
+	pvc_device *pvc;
+	struct net_device *dev;
+	int used;
+
+	if ((pvc = add_pvc(frad, dlci)) == NULL) {
+		netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
+		return -ENOBUFS;
+	}
+
+	if (*get_dev_p(pvc, type))
+		return -EEXIST;
+
+	used = pvc_is_used(pvc);
+
+	if (type == ARPHRD_ETHER) {
+		dev = alloc_netdev(0, "pvceth%d", ether_setup);
+		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	} else
+		dev = alloc_netdev(0, "pvc%d", pvc_setup);
+
+	if (!dev) {
+		netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
+		delete_unused_pvcs(hdlc);
+		return -ENOBUFS;
+	}
+
+	if (type == ARPHRD_ETHER)
+		eth_hw_addr_random(dev);
+	else {
+		*(__be16*)dev->dev_addr = htons(dlci);
+		dlci_to_q922(dev->broadcast, dlci);
+	}
+	dev->netdev_ops = &pvc_ops;
+	dev->mtu = HDLC_MAX_MTU;
+	dev->tx_queue_len = 0;
+	dev->ml_priv = pvc;
+
+	if (register_netdevice(dev) != 0) {
+		free_netdev(dev);
+		delete_unused_pvcs(hdlc);
+		return -EIO;
+	}
+
+	dev->destructor = free_netdev;
+	*get_dev_p(pvc, type) = dev;
+	if (!used) {
+		state(hdlc)->dce_changed = 1;
+		state(hdlc)->dce_pvc_count++;
+	}
+	return 0;
+}
+
+
+
+static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
+{
+	pvc_device *pvc;
+	struct net_device *dev;
+
+	if ((pvc = find_pvc(hdlc, dlci)) == NULL)
+		return -ENOENT;
+
+	if ((dev = *get_dev_p(pvc, type)) == NULL)
+		return -ENOENT;
+
+	if (dev->flags & IFF_UP)
+		return -EBUSY;		/* PVC in use */
+
+	unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
+	*get_dev_p(pvc, type) = NULL;
+
+	if (!pvc_is_used(pvc)) {
+		state(hdlc)->dce_pvc_count--;
+		state(hdlc)->dce_changed = 1;
+	}
+	delete_unused_pvcs(hdlc);
+	return 0;
+}
+
+
+
+static void fr_destroy(struct net_device *frad)
+{
+	hdlc_device *hdlc = dev_to_hdlc(frad);
+	pvc_device *pvc = state(hdlc)->first_pvc;
+	state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
+	state(hdlc)->dce_pvc_count = 0;
+	state(hdlc)->dce_changed = 1;
+
+	while (pvc) {
+		pvc_device *next = pvc->next;
+		/* destructors will free_netdev() main and ether */
+		if (pvc->main)
+			unregister_netdevice(pvc->main);
+
+		if (pvc->ether)
+			unregister_netdevice(pvc->ether);
+
+		kfree(pvc);
+		pvc = next;
+	}
+}
+
+
+static struct hdlc_proto proto = {
+	.close		= fr_close,
+	.start		= fr_start,
+	.stop		= fr_stop,
+	.detach		= fr_destroy,
+	.ioctl		= fr_ioctl,
+	.netif_rx	= fr_rx,
+	.module		= THIS_MODULE,
+};
+
+
+static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
+	const size_t size = sizeof(fr_proto);
+	fr_proto new_settings;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	fr_proto_pvc pvc;
+	int result;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_FR;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(fr_s, &state(hdlc)->settings, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_PROTO_FR:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		if (copy_from_user(&new_settings, fr_s, size))
+			return -EFAULT;
+
+		if (new_settings.lmi == LMI_DEFAULT)
+			new_settings.lmi = LMI_ANSI;
+
+		if ((new_settings.lmi != LMI_NONE &&
+		     new_settings.lmi != LMI_ANSI &&
+		     new_settings.lmi != LMI_CCITT &&
+		     new_settings.lmi != LMI_CISCO) ||
+		    new_settings.t391 < 1 ||
+		    new_settings.t392 < 2 ||
+		    new_settings.n391 < 1 ||
+		    new_settings.n392 < 1 ||
+		    new_settings.n393 < new_settings.n392 ||
+		    new_settings.n393 > 32 ||
+		    (new_settings.dce != 0 &&
+		     new_settings.dce != 1))
+			return -EINVAL;
+
+		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+		if (result)
+			return result;
+
+		if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
+			result = attach_hdlc_protocol(dev, &proto,
+						      sizeof(struct frad_state));
+			if (result)
+				return result;
+			state(hdlc)->first_pvc = NULL;
+			state(hdlc)->dce_pvc_count = 0;
+		}
+		memcpy(&state(hdlc)->settings, &new_settings, size);
+		dev->type = ARPHRD_FRAD;
+		return 0;
+
+	case IF_PROTO_FR_ADD_PVC:
+	case IF_PROTO_FR_DEL_PVC:
+	case IF_PROTO_FR_ADD_ETH_PVC:
+	case IF_PROTO_FR_DEL_ETH_PVC:
+		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
+			return -EINVAL;
+
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
+				   sizeof(fr_proto_pvc)))
+			return -EFAULT;
+
+		if (pvc.dlci <= 0 || pvc.dlci >= 1024)
+			return -EINVAL;	/* Only 10 bits, DLCI 0 reserved */
+
+		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
+		    ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
+			result = ARPHRD_ETHER; /* bridged Ethernet device */
+		else
+			result = ARPHRD_DLCI;
+
+		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
+		    ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
+			return fr_add_pvc(dev, pvc.dlci, result);
+		else
+			return fr_del_pvc(hdlc, pvc.dlci, result);
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_ppp.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_ppp.c
new file mode 100644
index 0000000..0d76455
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_ppp.c
@@ -0,0 +1,716 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Point-to-point protocol support
+ *
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define DEBUG_CP		0 /* also bytes# to dump */
+#define DEBUG_STATE		0
+#define DEBUG_HARD_HEADER	0
+
+#define HDLC_ADDR_ALLSTATIONS	0xFF
+#define HDLC_CTRL_UI		0x03
+
+#define PID_LCP			0xC021
+#define PID_IP			0x0021
+#define PID_IPCP		0x8021
+#define PID_IPV6		0x0057
+#define PID_IPV6CP		0x8057
+
+enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
+enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
+      CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
+      LCP_DISC_REQ, CP_CODES};
+#if DEBUG_CP
+static const char *const code_names[CP_CODES] = {
+	"0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
+	"TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
+};
+static char debug_buffer[64 + 3 * DEBUG_CP];
+#endif
+
+enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
+
+struct hdlc_header {
+	u8 address;
+	u8 control;
+	__be16 protocol;
+};
+
+struct cp_header {
+	u8 code;
+	u8 id;
+	__be16 len;
+};
+
+
+struct proto {
+	struct net_device *dev;
+	struct timer_list timer;
+	unsigned long timeout;
+	u16 pid;		/* protocol ID */
+	u8 state;
+	u8 cr_id;		/* ID of last Configuration-Request */
+	u8 restart_counter;
+};
+
+struct ppp {
+	struct proto protos[IDX_COUNT];
+	spinlock_t lock;
+	unsigned long last_pong;
+	unsigned int req_timeout, cr_retries, term_retries;
+	unsigned int keepalive_interval, keepalive_timeout;
+	u8 seq;			/* local sequence number for requests */
+	u8 echo_id;		/* ID of last Echo-Request (LCP) */
+};
+
+enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
+      STATES, STATE_MASK = 0xF};
+enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
+      RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
+enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
+      SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
+
+#if DEBUG_STATE
+static const char *const state_names[STATES] = {
+	"Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
+	"Opened"
+};
+static const char *const event_names[EVENTS] = {
+	"Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
+	"RTR", "RTA", "RUC", "RXJ+", "RXJ-"
+};
+#endif
+
+static struct sk_buff_head tx_queue; /* used when holding the spin lock */
+
+static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+static inline struct ppp* get_ppp(struct net_device *dev)
+{
+	return (struct ppp *)dev_to_hdlc(dev)->state;
+}
+
+static inline struct proto* get_proto(struct net_device *dev, u16 pid)
+{
+	struct ppp *ppp = get_ppp(dev);
+
+	switch (pid) {
+	case PID_LCP:
+		return &ppp->protos[IDX_LCP];
+	case PID_IPCP:
+		return &ppp->protos[IDX_IPCP];
+	case PID_IPV6CP:
+		return &ppp->protos[IDX_IPV6CP];
+	default:
+		return NULL;
+	}
+}
+
+static inline const char* proto_name(u16 pid)
+{
+	switch (pid) {
+	case PID_LCP:
+		return "LCP";
+	case PID_IPCP:
+		return "IPCP";
+	case PID_IPV6CP:
+		return "IPV6CP";
+	default:
+		return NULL;
+	}
+}
+
+static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hdlc_header *data = (struct hdlc_header*)skb->data;
+
+	if (skb->len < sizeof(struct hdlc_header))
+		return htons(ETH_P_HDLC);
+	if (data->address != HDLC_ADDR_ALLSTATIONS ||
+	    data->control != HDLC_CTRL_UI)
+		return htons(ETH_P_HDLC);
+
+	switch (data->protocol) {
+	case cpu_to_be16(PID_IP):
+		skb_pull(skb, sizeof(struct hdlc_header));
+		return htons(ETH_P_IP);
+
+	case cpu_to_be16(PID_IPV6):
+		skb_pull(skb, sizeof(struct hdlc_header));
+		return htons(ETH_P_IPV6);
+
+	default:
+		return htons(ETH_P_HDLC);
+	}
+}
+
+
+static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
+			   u16 type, const void *daddr, const void *saddr,
+			   unsigned int len)
+{
+	struct hdlc_header *data;
+#if DEBUG_HARD_HEADER
+	printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
+#endif
+
+	skb_push(skb, sizeof(struct hdlc_header));
+	data = (struct hdlc_header*)skb->data;
+
+	data->address = HDLC_ADDR_ALLSTATIONS;
+	data->control = HDLC_CTRL_UI;
+	switch (type) {
+	case ETH_P_IP:
+		data->protocol = htons(PID_IP);
+		break;
+	case ETH_P_IPV6:
+		data->protocol = htons(PID_IPV6);
+		break;
+	case PID_LCP:
+	case PID_IPCP:
+	case PID_IPV6CP:
+		data->protocol = htons(type);
+		break;
+	default:		/* unknown protocol */
+		data->protocol = 0;
+	}
+	return sizeof(struct hdlc_header);
+}
+
+
+static void ppp_tx_flush(void)
+{
+	struct sk_buff *skb;
+	while ((skb = skb_dequeue(&tx_queue)) != NULL)
+		dev_queue_xmit(skb);
+}
+
+static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
+		      u8 id, unsigned int len, const void *data)
+{
+	struct sk_buff *skb;
+	struct cp_header *cp;
+	unsigned int magic_len = 0;
+	static u32 magic;
+
+#if DEBUG_CP
+	int i;
+	char *ptr;
+#endif
+
+	if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
+		magic_len = sizeof(magic);
+
+	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
+			    sizeof(struct cp_header) + magic_len + len);
+	if (!skb) {
+		netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
+		return;
+	}
+	skb_reserve(skb, sizeof(struct hdlc_header));
+
+	cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
+	cp->code = code;
+	cp->id = id;
+	cp->len = htons(sizeof(struct cp_header) + magic_len + len);
+
+	if (magic_len)
+		memcpy(skb_put(skb, magic_len), &magic, magic_len);
+	if (len)
+		memcpy(skb_put(skb, len), data, len);
+
+#if DEBUG_CP
+	BUG_ON(code >= CP_CODES);
+	ptr = debug_buffer;
+	*ptr = '\x0';
+	for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
+		sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
+		ptr += strlen(ptr);
+	}
+	printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
+	       proto_name(pid), code_names[code], id, debug_buffer);
+#endif
+
+	ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
+
+	skb->priority = TC_PRIO_CONTROL;
+	skb->dev = dev;
+	skb_reset_network_header(skb);
+	skb_queue_tail(&tx_queue, skb);
+}
+
+
+/* State transition table (compare STD-51)
+   Events                                   Actions
+   TO+  = Timeout with counter > 0          irc = Initialize-Restart-Count
+   TO-  = Timeout with counter expired      zrc = Zero-Restart-Count
+
+   RCR+ = Receive-Configure-Request (Good)  scr = Send-Configure-Request
+   RCR- = Receive-Configure-Request (Bad)
+   RCA  = Receive-Configure-Ack             sca = Send-Configure-Ack
+   RCN  = Receive-Configure-Nak/Rej         scn = Send-Configure-Nak/Rej
+
+   RTR  = Receive-Terminate-Request         str = Send-Terminate-Request
+   RTA  = Receive-Terminate-Ack             sta = Send-Terminate-Ack
+
+   RUC  = Receive-Unknown-Code              scj = Send-Code-Reject
+   RXJ+ = Receive-Code-Reject (permitted)
+       or Receive-Protocol-Reject
+   RXJ- = Receive-Code-Reject (catastrophic)
+       or Receive-Protocol-Reject
+*/
+static int cp_table[EVENTS][STATES] = {
+	/* CLOSED     STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
+	     0           1         2       3       4      5          6    */
+	{IRC|SCR|3,     INV     , INV ,   INV   , INV ,  INV    ,   INV   }, /* START */
+	{   INV   ,      0      ,  0  ,    0    ,  0  ,   0     ,    0    }, /* STOP */
+	{   INV   ,     INV     ,STR|2,  SCR|3  ,SCR|3,  SCR|5  ,   INV   }, /* TO+ */
+	{   INV   ,     INV     ,  1  ,    1    ,  1  ,    1    ,   INV   }, /* TO- */
+	{  STA|0  ,IRC|SCR|SCA|5,  2  ,  SCA|5  ,SCA|6,  SCA|5  ,SCR|SCA|5}, /* RCR+ */
+	{  STA|0  ,IRC|SCR|SCN|3,  2  ,  SCN|3  ,SCN|4,  SCN|3  ,SCR|SCN|3}, /* RCR- */
+	{  STA|0  ,    STA|1    ,  2  ,  IRC|4  ,SCR|3,    6    , SCR|3   }, /* RCA */
+	{  STA|0  ,    STA|1    ,  2  ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3   }, /* RCN */
+	{  STA|0  ,    STA|1    ,STA|2,  STA|3  ,STA|3,  STA|3  ,ZRC|STA|2}, /* RTR */
+	{    0    ,      1      ,  1  ,    3    ,  3  ,    5    ,  SCR|3  }, /* RTA */
+	{  SCJ|0  ,    SCJ|1    ,SCJ|2,  SCJ|3  ,SCJ|4,  SCJ|5  ,  SCJ|6  }, /* RUC */
+	{    0    ,      1      ,  2  ,    3    ,  3  ,    5    ,    6    }, /* RXJ+ */
+	{    0    ,      1      ,  1  ,    1    ,  1  ,    1    ,IRC|STR|2}, /* RXJ- */
+};
+
+
+/* SCA: RCR+ must supply id, len and data
+   SCN: RCR- must supply code, id, len and data
+   STA: RTR must supply id
+   SCJ: RUC must supply CP packet len and data */
+static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
+			 u8 id, unsigned int len, const void *data)
+{
+	int old_state, action;
+	struct ppp *ppp = get_ppp(dev);
+	struct proto *proto = get_proto(dev, pid);
+
+	old_state = proto->state;
+	BUG_ON(old_state >= STATES);
+	BUG_ON(event >= EVENTS);
+
+#if DEBUG_STATE
+	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
+	       proto_name(pid), event_names[event], state_names[proto->state]);
+#endif
+
+	action = cp_table[event][old_state];
+
+	proto->state = action & STATE_MASK;
+	if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
+		mod_timer(&proto->timer, proto->timeout =
+			  jiffies + ppp->req_timeout * HZ);
+	if (action & ZRC)
+		proto->restart_counter = 0;
+	if (action & IRC)
+		proto->restart_counter = (proto->state == STOPPING) ?
+			ppp->term_retries : ppp->cr_retries;
+
+	if (action & SCR)	/* send Configure-Request */
+		ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
+			  0, NULL);
+	if (action & SCA)	/* send Configure-Ack */
+		ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
+	if (action & SCN)	/* send Configure-Nak/Reject */
+		ppp_tx_cp(dev, pid, code, id, len, data);
+	if (action & STR)	/* send Terminate-Request */
+		ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
+	if (action & STA)	/* send Terminate-Ack */
+		ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
+	if (action & SCJ)	/* send Code-Reject */
+		ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
+
+	if (old_state != OPENED && proto->state == OPENED) {
+		netdev_info(dev, "%s up\n", proto_name(pid));
+		if (pid == PID_LCP) {
+			netif_dormant_off(dev);
+			ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
+			ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
+			ppp->last_pong = jiffies;
+			mod_timer(&proto->timer, proto->timeout =
+				  jiffies + ppp->keepalive_interval * HZ);
+		}
+	}
+	if (old_state == OPENED && proto->state != OPENED) {
+		netdev_info(dev, "%s down\n", proto_name(pid));
+		if (pid == PID_LCP) {
+			netif_dormant_on(dev);
+			ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
+			ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
+		}
+	}
+	if (old_state != CLOSED && proto->state == CLOSED)
+		del_timer(&proto->timer);
+
+#if DEBUG_STATE
+	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
+	       proto_name(pid), event_names[event], state_names[proto->state]);
+#endif
+}
+
+
+static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
+			    unsigned int req_len, const u8 *data)
+{
+	static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
+	const u8 *opt;
+	u8 *out;
+	unsigned int len = req_len, nak_len = 0, rej_len = 0;
+
+	if (!(out = kmalloc(len, GFP_ATOMIC))) {
+		dev->stats.rx_dropped++;
+		return;	/* out of memory, ignore CR packet */
+	}
+
+	for (opt = data; len; len -= opt[1], opt += opt[1]) {
+		if (len < 2 || len < opt[1]) {
+			dev->stats.rx_errors++;
+			kfree(out);
+			return; /* bad packet, drop silently */
+		}
+
+		if (pid == PID_LCP)
+			switch (opt[0]) {
+			case LCP_OPTION_MRU:
+				continue; /* MRU always OK and > 1500 bytes? */
+
+			case LCP_OPTION_ACCM: /* async control character map */
+				if (!memcmp(opt, valid_accm,
+					    sizeof(valid_accm)))
+					continue;
+				if (!rej_len) { /* NAK it */
+					memcpy(out + nak_len, valid_accm,
+					       sizeof(valid_accm));
+					nak_len += sizeof(valid_accm);
+					continue;
+				}
+				break;
+			case LCP_OPTION_MAGIC:
+				if (opt[1] != 6 || (!opt[2] && !opt[3] &&
+						    !opt[4] && !opt[5]))
+					break; /* reject invalid magic number */
+				continue;
+			}
+		/* reject this option */
+		memcpy(out + rej_len, opt, opt[1]);
+		rej_len += opt[1];
+	}
+
+	if (rej_len)
+		ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
+	else if (nak_len)
+		ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
+	else
+		ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
+
+	kfree(out);
+}
+
+static int ppp_rx(struct sk_buff *skb)
+{
+	struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
+	struct net_device *dev = skb->dev;
+	struct ppp *ppp = get_ppp(dev);
+	struct proto *proto;
+	struct cp_header *cp;
+	unsigned long flags;
+	unsigned int len;
+	u16 pid;
+#if DEBUG_CP
+	int i;
+	char *ptr;
+#endif
+
+	spin_lock_irqsave(&ppp->lock, flags);
+	/* Check HDLC header */
+	if (skb->len < sizeof(struct hdlc_header))
+		goto rx_error;
+	cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
+	if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
+	    hdr->control != HDLC_CTRL_UI)
+		goto rx_error;
+
+	pid = ntohs(hdr->protocol);
+	proto = get_proto(dev, pid);
+	if (!proto) {
+		if (ppp->protos[IDX_LCP].state == OPENED)
+			ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
+				  ++ppp->seq, skb->len + 2, &hdr->protocol);
+		goto rx_error;
+	}
+
+	len = ntohs(cp->len);
+	if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
+	    skb->len < len /* truncated packet? */)
+		goto rx_error;
+	skb_pull(skb, sizeof(struct cp_header));
+	len -= sizeof(struct cp_header);
+
+	/* HDLC and CP headers stripped from skb */
+#if DEBUG_CP
+	if (cp->code < CP_CODES)
+		sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
+			cp->id);
+	else
+		sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
+	ptr = debug_buffer + strlen(debug_buffer);
+	for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
+		sprintf(ptr, " %02X", skb->data[i]);
+		ptr += strlen(ptr);
+	}
+	printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
+	       debug_buffer);
+#endif
+
+	/* LCP only */
+	if (pid == PID_LCP)
+		switch (cp->code) {
+		case LCP_PROTO_REJ:
+			pid = ntohs(*(__be16*)skb->data);
+			if (pid == PID_LCP || pid == PID_IPCP ||
+			    pid == PID_IPV6CP)
+				ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
+					     0, NULL);
+			goto out;
+
+		case LCP_ECHO_REQ: /* send Echo-Reply */
+			if (len >= 4 && proto->state == OPENED)
+				ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
+					  cp->id, len - 4, skb->data + 4);
+			goto out;
+
+		case LCP_ECHO_REPLY:
+			if (cp->id == ppp->echo_id)
+				ppp->last_pong = jiffies;
+			goto out;
+
+		case LCP_DISC_REQ: /* discard */
+			goto out;
+		}
+
+	/* LCP, IPCP and IPV6CP */
+	switch (cp->code) {
+	case CP_CONF_REQ:
+		ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
+		break;
+
+	case CP_CONF_ACK:
+		if (cp->id == proto->cr_id)
+			ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
+		break;
+
+	case CP_CONF_REJ:
+	case CP_CONF_NAK:
+		if (cp->id == proto->cr_id)
+			ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
+		break;
+
+	case CP_TERM_REQ:
+		ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
+		break;
+
+	case CP_TERM_ACK:
+		ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
+		break;
+
+	case CP_CODE_REJ:
+		ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
+		break;
+
+	default:
+		len += sizeof(struct cp_header);
+		if (len > dev->mtu)
+			len = dev->mtu;
+		ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
+		break;
+	}
+	goto out;
+
+rx_error:
+	dev->stats.rx_errors++;
+out:
+	spin_unlock_irqrestore(&ppp->lock, flags);
+	dev_kfree_skb_any(skb);
+	ppp_tx_flush();
+	return NET_RX_DROP;
+}
+
+static void ppp_timer(unsigned long arg)
+{
+	struct proto *proto = (struct proto *)arg;
+	struct ppp *ppp = get_ppp(proto->dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ppp->lock, flags);
+	switch (proto->state) {
+	case STOPPING:
+	case REQ_SENT:
+	case ACK_RECV:
+	case ACK_SENT:
+		if (proto->restart_counter) {
+			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+				     0, NULL);
+			proto->restart_counter--;
+		} else
+			ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
+				     0, NULL);
+		break;
+
+	case OPENED:
+		if (proto->pid != PID_LCP)
+			break;
+		if (time_after(jiffies, ppp->last_pong +
+			       ppp->keepalive_timeout * HZ)) {
+			netdev_info(proto->dev, "Link down\n");
+			ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
+			ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
+		} else {	/* send keep-alive packet */
+			ppp->echo_id = ++ppp->seq;
+			ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
+				  ppp->echo_id, 0, NULL);
+			proto->timer.expires = jiffies +
+				ppp->keepalive_interval * HZ;
+			add_timer(&proto->timer);
+		}
+		break;
+	}
+	spin_unlock_irqrestore(&ppp->lock, flags);
+	ppp_tx_flush();
+}
+
+
+static void ppp_start(struct net_device *dev)
+{
+	struct ppp *ppp = get_ppp(dev);
+	int i;
+
+	for (i = 0; i < IDX_COUNT; i++) {
+		struct proto *proto = &ppp->protos[i];
+		proto->dev = dev;
+		init_timer(&proto->timer);
+		proto->timer.function = ppp_timer;
+		proto->timer.data = (unsigned long)proto;
+		proto->state = CLOSED;
+	}
+	ppp->protos[IDX_LCP].pid = PID_LCP;
+	ppp->protos[IDX_IPCP].pid = PID_IPCP;
+	ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
+
+	ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
+}
+
+static void ppp_stop(struct net_device *dev)
+{
+	ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
+}
+
+static void ppp_close(struct net_device *dev)
+{
+	ppp_tx_flush();
+}
+
+static struct hdlc_proto proto = {
+	.start		= ppp_start,
+	.stop		= ppp_stop,
+	.close		= ppp_close,
+	.type_trans	= ppp_type_trans,
+	.ioctl		= ppp_ioctl,
+	.netif_rx	= ppp_rx,
+	.module		= THIS_MODULE,
+};
+
+static const struct header_ops ppp_header_ops = {
+	.create = ppp_hard_header,
+};
+
+static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	struct ppp *ppp;
+	int result;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto)
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_PPP;
+		return 0; /* return protocol only, no settable parameters */
+
+	case IF_PROTO_PPP:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		/* no settable parameters */
+
+		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+		if (result)
+			return result;
+
+		result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
+		if (result)
+			return result;
+
+		ppp = get_ppp(dev);
+		spin_lock_init(&ppp->lock);
+		ppp->req_timeout = 2;
+		ppp->cr_retries = 10;
+		ppp->term_retries = 2;
+		ppp->keepalive_interval = 10;
+		ppp->keepalive_timeout = 60;
+
+		dev->hard_header_len = sizeof(struct hdlc_header);
+		dev->header_ops = &ppp_header_ops;
+		dev->type = ARPHRD_PPP;
+		netif_dormant_on(dev);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	skb_queue_head_init(&tx_queue);
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw.c
new file mode 100644
index 0000000..5dc153e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw.c
@@ -0,0 +1,114 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC support
+ *
+ * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+
+static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	return cpu_to_be16(ETH_P_IP);
+}
+
+static struct hdlc_proto proto = {
+	.type_trans	= raw_type_trans,
+	.ioctl		= raw_ioctl,
+	.module		= THIS_MODULE,
+};
+
+
+static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+	const size_t size = sizeof(raw_hdlc_proto);
+	raw_hdlc_proto new_settings;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	int result;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto)
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_HDLC;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(raw_s, hdlc->state, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_PROTO_HDLC:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		if (copy_from_user(&new_settings, raw_s, size))
+			return -EFAULT;
+
+		if (new_settings.encoding == ENCODING_DEFAULT)
+			new_settings.encoding = ENCODING_NRZ;
+
+		if (new_settings.parity == PARITY_DEFAULT)
+			new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+		result = hdlc->attach(dev, new_settings.encoding,
+				      new_settings.parity);
+		if (result)
+			return result;
+
+		result = attach_hdlc_protocol(dev, &proto,
+					      sizeof(raw_hdlc_proto));
+		if (result)
+			return result;
+		memcpy(hdlc->state, &new_settings, size);
+		dev->type = ARPHRD_RAWHDLC;
+		netif_dormant_off(dev);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw_eth.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw_eth.c
new file mode 100644
index 0000000..3ab72b3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_raw_eth.c
@@ -0,0 +1,132 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC Ethernet emulation support
+ *
+ * Copyright (C) 2002-2006 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	int pad = ETH_ZLEN - skb->len;
+	if (pad > 0) {		/* Pad the frame with zeros */
+		int len = skb->len;
+		if (skb_tailroom(skb) < pad)
+			if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
+				dev->stats.tx_dropped++;
+				dev_kfree_skb(skb);
+				return 0;
+			}
+		skb_put(skb, pad);
+		memset(skb->data + len, 0, pad);
+	}
+	return dev_to_hdlc(dev)->xmit(skb, dev);
+}
+
+
+static struct hdlc_proto proto = {
+	.type_trans	= eth_type_trans,
+	.xmit		= eth_tx,
+	.ioctl		= raw_eth_ioctl,
+	.module		= THIS_MODULE,
+};
+
+
+static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+	const size_t size = sizeof(raw_hdlc_proto);
+	raw_hdlc_proto new_settings;
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	int result, old_qlen;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto)
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(raw_s, hdlc->state, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_PROTO_HDLC_ETH:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		if (copy_from_user(&new_settings, raw_s, size))
+			return -EFAULT;
+
+		if (new_settings.encoding == ENCODING_DEFAULT)
+			new_settings.encoding = ENCODING_NRZ;
+
+		if (new_settings.parity == PARITY_DEFAULT)
+			new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+		result = hdlc->attach(dev, new_settings.encoding,
+				      new_settings.parity);
+		if (result)
+			return result;
+
+		result = attach_hdlc_protocol(dev, &proto,
+					      sizeof(raw_hdlc_proto));
+		if (result)
+			return result;
+		memcpy(hdlc->state, &new_settings, size);
+		old_qlen = dev->tx_queue_len;
+		ether_setup(dev);
+		dev->tx_queue_len = old_qlen;
+		eth_hw_addr_random(dev);
+		netif_dormant_off(dev);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_x25.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_x25.c
new file mode 100644
index 0000000..a49aec5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hdlc_x25.c
@@ -0,0 +1,243 @@
+/*
+ * Generic HDLC support routines for Linux
+ * X.25 support
+ *
+ * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/lapb.h>
+#include <linux/module.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <net/x25device.h>
+
+static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+
+/* These functions are callbacks called by LAPB layer */
+
+static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
+{
+	struct sk_buff *skb;
+	unsigned char *ptr;
+
+	if ((skb = dev_alloc_skb(1)) == NULL) {
+		netdev_err(dev, "out of memory\n");
+		return;
+	}
+
+	ptr = skb_put(skb, 1);
+	*ptr = code;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
+
+
+static void x25_connected(struct net_device *dev, int reason)
+{
+	x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
+}
+
+
+
+static void x25_disconnected(struct net_device *dev, int reason)
+{
+	x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
+}
+
+
+
+static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+	unsigned char *ptr;
+
+	skb_push(skb, 1);
+
+	if (skb_cow(skb, 1))
+		return NET_RX_DROP;
+
+	ptr  = skb->data;
+	*ptr = X25_IFACE_DATA;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	return netif_rx(skb);
+}
+
+
+
+static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	hdlc->xmit(skb, dev); /* Ignore return value :-( */
+}
+
+
+
+static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int result;
+
+
+	/* X.25 to LAPB */
+	switch (skb->data[0]) {
+	case X25_IFACE_DATA:	/* Data to be transmitted */
+		skb_pull(skb, 1);
+		if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+			dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+
+	case X25_IFACE_CONNECT:
+		if ((result = lapb_connect_request(dev))!= LAPB_OK) {
+			if (result == LAPB_CONNECTED)
+				/* Send connect confirm. msg to level 3 */
+				x25_connected(dev, 0);
+			else
+				netdev_err(dev, "LAPB connect request failed, error code = %i\n",
+					   result);
+		}
+		break;
+
+	case X25_IFACE_DISCONNECT:
+		if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
+			if (result == LAPB_NOTCONNECTED)
+				/* Send disconnect confirm. msg to level 3 */
+				x25_disconnected(dev, 0);
+			else
+				netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
+					   result);
+		}
+		break;
+
+	default:		/* to be defined */
+		break;
+	}
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+
+static int x25_open(struct net_device *dev)
+{
+	int result;
+	static const struct lapb_register_struct cb = {
+		.connect_confirmation = x25_connected,
+		.connect_indication = x25_connected,
+		.disconnect_confirmation = x25_disconnected,
+		.disconnect_indication = x25_disconnected,
+		.data_indication = x25_data_indication,
+		.data_transmit = x25_data_transmit,
+	};
+
+	result = lapb_register(dev, &cb);
+	if (result != LAPB_OK)
+		return result;
+	return 0;
+}
+
+
+
+static void x25_close(struct net_device *dev)
+{
+	lapb_unregister(dev);
+}
+
+
+
+static int x25_rx(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+
+	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+		dev->stats.rx_dropped++;
+		return NET_RX_DROP;
+	}
+
+	if (lapb_data_received(dev, skb) == LAPB_OK)
+		return NET_RX_SUCCESS;
+
+	dev->stats.rx_errors++;
+	dev_kfree_skb_any(skb);
+	return NET_RX_DROP;
+}
+
+
+static struct hdlc_proto proto = {
+	.open		= x25_open,
+	.close		= x25_close,
+	.ioctl		= x25_ioctl,
+	.netif_rx	= x25_rx,
+	.xmit		= x25_xmit,
+	.module		= THIS_MODULE,
+};
+
+
+static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+	hdlc_device *hdlc = dev_to_hdlc(dev);
+	int result;
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_PROTO:
+		if (dev_to_hdlc(dev)->proto != &proto)
+			return -EINVAL;
+		ifr->ifr_settings.type = IF_PROTO_X25;
+		return 0; /* return protocol only, no settable parameters */
+
+	case IF_PROTO_X25:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+		if (result)
+			return result;
+
+		if ((result = attach_hdlc_protocol(dev, &proto, 0)))
+			return result;
+		dev->type = ARPHRD_X25;
+		netif_dormant_off(dev);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+
+static int __init mod_init(void)
+{
+	register_hdlc_protocol(&proto);
+	return 0;
+}
+
+
+
+static void __exit mod_exit(void)
+{
+	unregister_hdlc_protocol(&proto);
+}
+
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
+MODULE_LICENSE("GPL v2");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/hostess_sv11.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/hostess_sv11.c
new file mode 100644
index 0000000..3d80e42
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/hostess_sv11.c
@@ -0,0 +1,352 @@
+/*
+ *	Comtrol SV11 card driver
+ *
+ *	This is a slightly odd Z85230 synchronous driver. All you need to
+ *	know basically is
+ *
+ *	Its a genuine Z85230
+ *
+ *	It supports DMA using two DMA channels in SYNC mode. The driver doesn't
+ *	use these facilities
+ *	
+ *	The control port is at io+1, the data at io+3 and turning off the DMA
+ *	is done by writing 0 to io+4
+ *
+ *	The hardware does the bus handling to avoid the need for delays between
+ *	touching control registers.
+ *
+ *	Port B isn't wired (why - beats me)
+ *
+ *	Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include "z85230.h"
+
+static int dma;
+
+/*
+ *	Network driver support routines
+ */
+
+static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
+{
+	return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
+}
+
+/*
+ *	Frame receive. Simple for our card as we do HDLC and there
+ *	is no funny garbage involved
+ */
+
+static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
+{
+	/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
+	skb_trim(skb, skb->len - 2);
+	skb->protocol = hdlc_type_trans(skb, c->netdevice);
+	skb_reset_mac_header(skb);
+	skb->dev = c->netdevice;
+	/*
+	 *	Send it to the PPP layer. We don't have time to process
+	 *	it right now.
+	 */
+	netif_rx(skb);
+}
+
+/*
+ *	We've been placed in the UP state
+ */
+
+static int hostess_open(struct net_device *d)
+{
+	struct z8530_dev *sv11 = dev_to_sv(d);
+	int err = -1;
+
+	/*
+	 *	Link layer up
+	 */
+	switch (dma) {
+		case 0:
+			err = z8530_sync_open(d, &sv11->chanA);
+			break;
+		case 1:
+			err = z8530_sync_dma_open(d, &sv11->chanA);
+			break;
+		case 2:
+			err = z8530_sync_txdma_open(d, &sv11->chanA);
+			break;
+	}
+
+	if (err)
+		return err;
+
+	err = hdlc_open(d);
+	if (err) {
+		switch (dma) {
+			case 0:
+				z8530_sync_close(d, &sv11->chanA);
+				break;
+			case 1:
+				z8530_sync_dma_close(d, &sv11->chanA);
+				break;
+			case 2:
+				z8530_sync_txdma_close(d, &sv11->chanA);
+				break;
+		}
+		return err;
+	}
+	sv11->chanA.rx_function = hostess_input;
+
+	/*
+	 *	Go go go
+	 */
+
+	netif_start_queue(d);
+	return 0;
+}
+
+static int hostess_close(struct net_device *d)
+{
+	struct z8530_dev *sv11 = dev_to_sv(d);
+	/*
+	 *	Discard new frames
+	 */
+	sv11->chanA.rx_function = z8530_null_rx;
+
+	hdlc_close(d);
+	netif_stop_queue(d);
+
+	switch (dma) {
+		case 0:
+			z8530_sync_close(d, &sv11->chanA);
+			break;
+		case 1:
+			z8530_sync_dma_close(d, &sv11->chanA);
+			break;
+		case 2:
+			z8530_sync_txdma_close(d, &sv11->chanA);
+			break;
+	}
+	return 0;
+}
+
+static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
+{
+	/* struct z8530_dev *sv11=dev_to_sv(d);
+	   z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
+	return hdlc_ioctl(d, ifr, cmd);
+}
+
+/*
+ *	Passed network frames, fire them downwind.
+ */
+
+static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
+					    struct net_device *d)
+{
+	return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
+}
+
+static int hostess_attach(struct net_device *dev, unsigned short encoding,
+			  unsigned short parity)
+{
+	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+		return 0;
+	return -EINVAL;
+}
+
+/*
+ *	Description block for a Comtrol Hostess SV11 card
+ */
+
+static const struct net_device_ops hostess_ops = {
+	.ndo_open       = hostess_open,
+	.ndo_stop       = hostess_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = hostess_ioctl,
+};
+
+static struct z8530_dev *sv11_init(int iobase, int irq)
+{
+	struct z8530_dev *sv;
+	struct net_device *netdev;
+	/*
+	 *	Get the needed I/O space
+	 */
+
+	if (!request_region(iobase, 8, "Comtrol SV11")) {
+		pr_warn("I/O 0x%X already in use\n", iobase);
+		return NULL;
+	}
+
+	sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
+	if (!sv)
+		goto err_kzalloc;
+
+	/*
+	 *	Stuff in the I/O addressing
+	 */
+
+	sv->active = 0;
+
+	sv->chanA.ctrlio = iobase + 1;
+	sv->chanA.dataio = iobase + 3;
+	sv->chanB.ctrlio = -1;
+	sv->chanB.dataio = -1;
+	sv->chanA.irqs = &z8530_nop;
+	sv->chanB.irqs = &z8530_nop;
+
+	outb(0, iobase + 4);		/* DMA off */
+
+	/* We want a fast IRQ for this device. Actually we'd like an even faster
+	   IRQ ;) - This is one driver RtLinux is made for */
+
+	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+			"Hostess SV11", sv) < 0) {
+		pr_warn("IRQ %d already in use\n", irq);
+		goto err_irq;
+	}
+
+	sv->irq = irq;
+	sv->chanA.private = sv;
+	sv->chanA.dev = sv;
+	sv->chanB.dev = sv;
+
+	if (dma) {
+		/*
+		 *	You can have DMA off or 1 and 3 thats the lot
+		 *	on the Comtrol.
+		 */
+		sv->chanA.txdma = 3;
+		sv->chanA.rxdma = 1;
+		outb(0x03 | 0x08, iobase + 4);		/* DMA on */
+		if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
+			goto err_txdma;
+
+		if (dma == 1)
+			if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
+				goto err_rxdma;
+	}
+
+	/* Kill our private IRQ line the hostess can end up chattering
+	   until the configuration is set */
+	disable_irq(irq);
+
+	/*
+	 *	Begin normal initialise
+	 */
+
+	if (z8530_init(sv)) {
+		pr_err("Z8530 series device not found\n");
+		enable_irq(irq);
+		goto free_dma;
+	}
+	z8530_channel_load(&sv->chanB, z8530_dead_port);
+	if (sv->type == Z85C30)
+		z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
+	else
+		z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
+
+	enable_irq(irq);
+
+	/*
+	 *	Now we can take the IRQ
+	 */
+
+	sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
+	if (!netdev)
+		goto free_dma;
+
+	dev_to_hdlc(netdev)->attach = hostess_attach;
+	dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
+	netdev->netdev_ops = &hostess_ops;
+	netdev->base_addr = iobase;
+	netdev->irq = irq;
+
+	if (register_hdlc_device(netdev)) {
+		pr_err("unable to register HDLC device\n");
+		free_netdev(netdev);
+		goto free_dma;
+	}
+
+	z8530_describe(sv, "I/O", iobase);
+	sv->active = 1;
+	return sv;
+
+free_dma:
+	if (dma == 1)
+		free_dma(sv->chanA.rxdma);
+err_rxdma:
+	if (dma)
+		free_dma(sv->chanA.txdma);
+err_txdma:
+	free_irq(irq, sv);
+err_irq:
+	kfree(sv);
+err_kzalloc:
+	release_region(iobase, 8);
+	return NULL;
+}
+
+static void sv11_shutdown(struct z8530_dev *dev)
+{
+	unregister_hdlc_device(dev->chanA.netdevice);
+	z8530_shutdown(dev);
+	free_irq(dev->irq, dev);
+	if (dma) {
+		if (dma == 1)
+			free_dma(dev->chanA.rxdma);
+		free_dma(dev->chanA.txdma);
+	}
+	release_region(dev->chanA.ctrlio - 1, 8);
+	free_netdev(dev->chanA.netdevice);
+	kfree(dev);
+}
+
+static int io = 0x200;
+static int irq = 9;
+
+module_param(io, int, 0);
+MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
+module_param(dma, int, 0);
+MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
+
+static struct z8530_dev *sv11_unit;
+
+int init_module(void)
+{
+	if ((sv11_unit = sv11_init(io, irq)) == NULL)
+		return -ENODEV;
+	return 0;
+}
+
+void cleanup_module(void)
+{
+	if (sv11_unit)
+		sv11_shutdown(sv11_unit);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/ixp4xx_hss.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/ixp4xx_hss.c
new file mode 100644
index 0000000..3f575af
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/ixp4xx_hss.c
@@ -0,0 +1,1418 @@
+/*
+ * Intel IXP4xx HSS (synchronous serial port) driver for Linux
+ *
+ * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/hdlc.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define DEBUG_DESC		0
+#define DEBUG_RX		0
+#define DEBUG_TX		0
+#define DEBUG_PKT_BYTES		0
+#define DEBUG_CLOSE		0
+
+#define DRV_NAME		"ixp4xx_hss"
+
+#define PKT_EXTRA_FLAGS		0 /* orig 1 */
+#define PKT_NUM_PIPES		1 /* 1, 2 or 4 */
+#define PKT_PIPE_FIFO_SIZEW	4 /* total 4 dwords per HSS */
+
+#define RX_DESCS		16 /* also length of all RX queues */
+#define TX_DESCS		16 /* also length of all TX queues */
+
+#define POOL_ALLOC_SIZE		(sizeof(struct desc) * (RX_DESCS + TX_DESCS))
+#define RX_SIZE			(HDLC_MAX_MRU + 4) /* NPE needs more space */
+#define MAX_CLOSE_WAIT		1000 /* microseconds */
+#define HSS_COUNT		2
+#define FRAME_SIZE		256 /* doesn't matter at this point */
+#define FRAME_OFFSET		0
+#define MAX_CHANNELS		(FRAME_SIZE / 8)
+
+#define NAPI_WEIGHT		16
+
+/* Queue IDs */
+#define HSS0_CHL_RXTRIG_QUEUE	12	/* orig size = 32 dwords */
+#define HSS0_PKT_RX_QUEUE	13	/* orig size = 32 dwords */
+#define HSS0_PKT_TX0_QUEUE	14	/* orig size = 16 dwords */
+#define HSS0_PKT_TX1_QUEUE	15
+#define HSS0_PKT_TX2_QUEUE	16
+#define HSS0_PKT_TX3_QUEUE	17
+#define HSS0_PKT_RXFREE0_QUEUE	18	/* orig size = 16 dwords */
+#define HSS0_PKT_RXFREE1_QUEUE	19
+#define HSS0_PKT_RXFREE2_QUEUE	20
+#define HSS0_PKT_RXFREE3_QUEUE	21
+#define HSS0_PKT_TXDONE_QUEUE	22	/* orig size = 64 dwords */
+
+#define HSS1_CHL_RXTRIG_QUEUE	10
+#define HSS1_PKT_RX_QUEUE	0
+#define HSS1_PKT_TX0_QUEUE	5
+#define HSS1_PKT_TX1_QUEUE	6
+#define HSS1_PKT_TX2_QUEUE	7
+#define HSS1_PKT_TX3_QUEUE	8
+#define HSS1_PKT_RXFREE0_QUEUE	1
+#define HSS1_PKT_RXFREE1_QUEUE	2
+#define HSS1_PKT_RXFREE2_QUEUE	3
+#define HSS1_PKT_RXFREE3_QUEUE	4
+#define HSS1_PKT_TXDONE_QUEUE	9
+
+#define NPE_PKT_MODE_HDLC		0
+#define NPE_PKT_MODE_RAW		1
+#define NPE_PKT_MODE_56KMODE		2
+#define NPE_PKT_MODE_56KENDIAN_MSB	4
+
+/* PKT_PIPE_HDLC_CFG_WRITE flags */
+#define PKT_HDLC_IDLE_ONES		0x1 /* default = flags */
+#define PKT_HDLC_CRC_32			0x2 /* default = CRC-16 */
+#define PKT_HDLC_MSB_ENDIAN		0x4 /* default = LE */
+
+
+/* hss_config, PCRs */
+/* Frame sync sampling, default = active low */
+#define PCR_FRM_SYNC_ACTIVE_HIGH	0x40000000
+#define PCR_FRM_SYNC_FALLINGEDGE	0x80000000
+#define PCR_FRM_SYNC_RISINGEDGE		0xC0000000
+
+/* Frame sync pin: input (default) or output generated off a given clk edge */
+#define PCR_FRM_SYNC_OUTPUT_FALLING	0x20000000
+#define PCR_FRM_SYNC_OUTPUT_RISING	0x30000000
+
+/* Frame and data clock sampling on edge, default = falling */
+#define PCR_FCLK_EDGE_RISING		0x08000000
+#define PCR_DCLK_EDGE_RISING		0x04000000
+
+/* Clock direction, default = input */
+#define PCR_SYNC_CLK_DIR_OUTPUT		0x02000000
+
+/* Generate/Receive frame pulses, default = enabled */
+#define PCR_FRM_PULSE_DISABLED		0x01000000
+
+ /* Data rate is full (default) or half the configured clk speed */
+#define PCR_HALF_CLK_RATE		0x00200000
+
+/* Invert data between NPE and HSS FIFOs? (default = no) */
+#define PCR_DATA_POLARITY_INVERT	0x00100000
+
+/* TX/RX endianness, default = LSB */
+#define PCR_MSB_ENDIAN			0x00080000
+
+/* Normal (default) / open drain mode (TX only) */
+#define PCR_TX_PINS_OPEN_DRAIN		0x00040000
+
+/* No framing bit transmitted and expected on RX? (default = framing bit) */
+#define PCR_SOF_NO_FBIT			0x00020000
+
+/* Drive data pins? */
+#define PCR_TX_DATA_ENABLE		0x00010000
+
+/* Voice 56k type: drive the data pins low (default), high, high Z */
+#define PCR_TX_V56K_HIGH		0x00002000
+#define PCR_TX_V56K_HIGH_IMP		0x00004000
+
+/* Unassigned type: drive the data pins low (default), high, high Z */
+#define PCR_TX_UNASS_HIGH		0x00000800
+#define PCR_TX_UNASS_HIGH_IMP		0x00001000
+
+/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
+#define PCR_TX_FB_HIGH_IMP		0x00000400
+
+/* 56k data endiannes - which bit unused: high (default) or low */
+#define PCR_TX_56KE_BIT_0_UNUSED	0x00000200
+
+/* 56k data transmission type: 32/8 bit data (default) or 56K data */
+#define PCR_TX_56KS_56K_DATA		0x00000100
+
+/* hss_config, cCR */
+/* Number of packetized clients, default = 1 */
+#define CCR_NPE_HFIFO_2_HDLC		0x04000000
+#define CCR_NPE_HFIFO_3_OR_4HDLC	0x08000000
+
+/* default = no loopback */
+#define CCR_LOOPBACK			0x02000000
+
+/* HSS number, default = 0 (first) */
+#define CCR_SECOND_HSS			0x01000000
+
+
+/* hss_config, clkCR: main:10, num:10, denom:12 */
+#define CLK42X_SPEED_EXP	((0x3FF << 22) | (  2 << 12) |   15) /*65 KHz*/
+
+#define CLK42X_SPEED_512KHZ	((  130 << 22) | (  2 << 12) |   15)
+#define CLK42X_SPEED_1536KHZ	((   43 << 22) | ( 18 << 12) |   47)
+#define CLK42X_SPEED_1544KHZ	((   43 << 22) | ( 33 << 12) |  192)
+#define CLK42X_SPEED_2048KHZ	((   32 << 22) | ( 34 << 12) |   63)
+#define CLK42X_SPEED_4096KHZ	((   16 << 22) | ( 34 << 12) |  127)
+#define CLK42X_SPEED_8192KHZ	((    8 << 22) | ( 34 << 12) |  255)
+
+#define CLK46X_SPEED_512KHZ	((  130 << 22) | ( 24 << 12) |  127)
+#define CLK46X_SPEED_1536KHZ	((   43 << 22) | (152 << 12) |  383)
+#define CLK46X_SPEED_1544KHZ	((   43 << 22) | ( 66 << 12) |  385)
+#define CLK46X_SPEED_2048KHZ	((   32 << 22) | (280 << 12) |  511)
+#define CLK46X_SPEED_4096KHZ	((   16 << 22) | (280 << 12) | 1023)
+#define CLK46X_SPEED_8192KHZ	((    8 << 22) | (280 << 12) | 2047)
+
+/*
+ * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
+ *     A (10 bits), B (10 bits) and C (12 bits).
+ * IXP42x HSS clock generator operation (verified with an oscilloscope):
+ * Each clock bit takes 7.5 ns (1 / 133.xx MHz).
+ * The clock sequence consists of (C - B) states of 0s and 1s, each state is
+ * A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
+ * (A + 1) bits wide.
+ *
+ * The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
+ * freq = 66.666 MHz / (A + (B + 1) / (C + 1))
+ * minimum freq = 66.666 MHz / (A + 1)
+ * maximum freq = 66.666 MHz / A
+ *
+ * Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
+ * freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
+ * The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
+ * The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
+ * = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
+ * The sequence consists of 4 complete clock periods, thus the average
+ * frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
+ * (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
+ */
+
+/* hss_config, LUT entries */
+#define TDMMAP_UNASSIGNED	0
+#define TDMMAP_HDLC		1	/* HDLC - packetized */
+#define TDMMAP_VOICE56K		2	/* Voice56K - 7-bit channelized */
+#define TDMMAP_VOICE64K		3	/* Voice64K - 8-bit channelized */
+
+/* offsets into HSS config */
+#define HSS_CONFIG_TX_PCR	0x00 /* port configuration registers */
+#define HSS_CONFIG_RX_PCR	0x04
+#define HSS_CONFIG_CORE_CR	0x08 /* loopback control, HSS# */
+#define HSS_CONFIG_CLOCK_CR	0x0C /* clock generator control */
+#define HSS_CONFIG_TX_FCR	0x10 /* frame configuration registers */
+#define HSS_CONFIG_RX_FCR	0x14
+#define HSS_CONFIG_TX_LUT	0x18 /* channel look-up tables */
+#define HSS_CONFIG_RX_LUT	0x38
+
+
+/* NPE command codes */
+/* writes the ConfigWord value to the location specified by offset */
+#define PORT_CONFIG_WRITE		0x40
+
+/* triggers the NPE to load the contents of the configuration table */
+#define PORT_CONFIG_LOAD		0x41
+
+/* triggers the NPE to return an HssErrorReadResponse message */
+#define PORT_ERROR_READ			0x42
+
+/* triggers the NPE to reset internal status and enable the HssPacketized
+   operation for the flow specified by pPipe */
+#define PKT_PIPE_FLOW_ENABLE		0x50
+#define PKT_PIPE_FLOW_DISABLE		0x51
+#define PKT_NUM_PIPES_WRITE		0x52
+#define PKT_PIPE_FIFO_SIZEW_WRITE	0x53
+#define PKT_PIPE_HDLC_CFG_WRITE		0x54
+#define PKT_PIPE_IDLE_PATTERN_WRITE	0x55
+#define PKT_PIPE_RX_SIZE_WRITE		0x56
+#define PKT_PIPE_MODE_WRITE		0x57
+
+/* HDLC packet status values - desc->status */
+#define ERR_SHUTDOWN		1 /* stop or shutdown occurrence */
+#define ERR_HDLC_ALIGN		2 /* HDLC alignment error */
+#define ERR_HDLC_FCS		3 /* HDLC Frame Check Sum error */
+#define ERR_RXFREE_Q_EMPTY	4 /* RX-free queue became empty while receiving
+				     this packet (if buf_len < pkt_len) */
+#define ERR_HDLC_TOO_LONG	5 /* HDLC frame size too long */
+#define ERR_HDLC_ABORT		6 /* abort sequence received */
+#define ERR_DISCONNECTING	7 /* disconnect is in progress */
+
+
+#ifdef __ARMEB__
+typedef struct sk_buff buffer_t;
+#define free_buffer dev_kfree_skb
+#define free_buffer_irq dev_kfree_skb_irq
+#else
+typedef void buffer_t;
+#define free_buffer kfree
+#define free_buffer_irq kfree
+#endif
+
+struct port {
+	struct device *dev;
+	struct npe *npe;
+	struct net_device *netdev;
+	struct napi_struct napi;
+	struct hss_plat_info *plat;
+	buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
+	struct desc *desc_tab;	/* coherent */
+	u32 desc_tab_phys;
+	unsigned int id;
+	unsigned int clock_type, clock_rate, loopback;
+	unsigned int initialized, carrier;
+	u8 hdlc_cfg;
+	u32 clock_reg;
+};
+
+/* NPE message structure */
+struct msg {
+#ifdef __ARMEB__
+	u8 cmd, unused, hss_port, index;
+	union {
+		struct { u8 data8a, data8b, data8c, data8d; };
+		struct { u16 data16a, data16b; };
+		struct { u32 data32; };
+	};
+#else
+	u8 index, hss_port, unused, cmd;
+	union {
+		struct { u8 data8d, data8c, data8b, data8a; };
+		struct { u16 data16b, data16a; };
+		struct { u32 data32; };
+	};
+#endif
+};
+
+/* HDLC packet descriptor */
+struct desc {
+	u32 next;		/* pointer to next buffer, unused */
+
+#ifdef __ARMEB__
+	u16 buf_len;		/* buffer length */
+	u16 pkt_len;		/* packet length */
+	u32 data;		/* pointer to data buffer in RAM */
+	u8 status;
+	u8 error_count;
+	u16 __reserved;
+#else
+	u16 pkt_len;		/* packet length */
+	u16 buf_len;		/* buffer length */
+	u32 data;		/* pointer to data buffer in RAM */
+	u16 __reserved;
+	u8 error_count;
+	u8 status;
+#endif
+	u32 __reserved1[4];
+};
+
+
+#define rx_desc_phys(port, n)	((port)->desc_tab_phys +		\
+				 (n) * sizeof(struct desc))
+#define rx_desc_ptr(port, n)	(&(port)->desc_tab[n])
+
+#define tx_desc_phys(port, n)	((port)->desc_tab_phys +		\
+				 ((n) + RX_DESCS) * sizeof(struct desc))
+#define tx_desc_ptr(port, n)	(&(port)->desc_tab[(n) + RX_DESCS])
+
+/*****************************************************************************
+ * global variables
+ ****************************************************************************/
+
+static int ports_open;
+static struct dma_pool *dma_pool;
+static spinlock_t npe_lock;
+
+static const struct {
+	int tx, txdone, rx, rxfree;
+}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
+		  HSS0_PKT_RXFREE0_QUEUE},
+		 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
+		  HSS1_PKT_RXFREE0_QUEUE},
+};
+
+/*****************************************************************************
+ * utility functions
+ ****************************************************************************/
+
+static inline struct port* dev_to_port(struct net_device *dev)
+{
+	return dev_to_hdlc(dev)->priv;
+}
+
+#ifndef __ARMEB__
+static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
+{
+	int i;
+	for (i = 0; i < cnt; i++)
+		dest[i] = swab32(src[i]);
+}
+#endif
+
+/*****************************************************************************
+ * HSS access
+ ****************************************************************************/
+
+static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
+{
+	u32 *val = (u32*)msg;
+	if (npe_send_message(port->npe, msg, what)) {
+		pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
+			port->id, val[0], val[1], npe_name(port->npe));
+		BUG();
+	}
+}
+
+static void hss_config_set_lut(struct port *port)
+{
+	struct msg msg;
+	int ch;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+
+	for (ch = 0; ch < MAX_CHANNELS; ch++) {
+		msg.data32 >>= 2;
+		msg.data32 |= TDMMAP_HDLC << 30;
+
+		if (ch % 16 == 15) {
+			msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
+			hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
+
+			msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
+			hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
+		}
+	}
+}
+
+static void hss_config(struct port *port)
+{
+	struct msg msg;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+	msg.index = HSS_CONFIG_TX_PCR;
+	msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
+		PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
+	if (port->clock_type == CLOCK_INT)
+		msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
+	hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
+
+	msg.index = HSS_CONFIG_RX_PCR;
+	msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
+	hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+	msg.index = HSS_CONFIG_CORE_CR;
+	msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
+		(port->id ? CCR_SECOND_HSS : 0);
+	hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+	msg.index = HSS_CONFIG_CLOCK_CR;
+	msg.data32 = port->clock_reg;
+	hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+	msg.index = HSS_CONFIG_TX_FCR;
+	msg.data16a = FRAME_OFFSET;
+	msg.data16b = FRAME_SIZE - 1;
+	hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_WRITE;
+	msg.hss_port = port->id;
+	msg.index = HSS_CONFIG_RX_FCR;
+	msg.data16a = FRAME_OFFSET;
+	msg.data16b = FRAME_SIZE - 1;
+	hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
+
+	hss_config_set_lut(port);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_CONFIG_LOAD;
+	msg.hss_port = port->id;
+	hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
+
+	if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
+	    /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
+	    msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
+		pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id);
+		BUG();
+	}
+
+	/* HDLC may stop working without this - check FIXME */
+	npe_recv_message(port->npe, &msg, "FLUSH_IT");
+}
+
+static void hss_set_hdlc_cfg(struct port *port)
+{
+	struct msg msg;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
+	msg.hss_port = port->id;
+	msg.data8a = port->hdlc_cfg; /* rx_cfg */
+	msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
+	hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
+}
+
+static u32 hss_get_status(struct port *port)
+{
+	struct msg msg;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PORT_ERROR_READ;
+	msg.hss_port = port->id;
+	hss_npe_send(port, &msg, "PORT_ERROR_READ");
+	if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
+		pr_crit("HSS-%i: unable to read HSS status\n", port->id);
+		BUG();
+	}
+
+	return msg.data32;
+}
+
+static void hss_start_hdlc(struct port *port)
+{
+	struct msg msg;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PKT_PIPE_FLOW_ENABLE;
+	msg.hss_port = port->id;
+	msg.data32 = 0;
+	hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
+}
+
+static void hss_stop_hdlc(struct port *port)
+{
+	struct msg msg;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PKT_PIPE_FLOW_DISABLE;
+	msg.hss_port = port->id;
+	hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
+	hss_get_status(port); /* make sure it's halted */
+}
+
+static int hss_load_firmware(struct port *port)
+{
+	struct msg msg;
+	int err;
+
+	if (port->initialized)
+		return 0;
+
+	if (!npe_running(port->npe) &&
+	    (err = npe_load_firmware(port->npe, npe_name(port->npe),
+				     port->dev)))
+		return err;
+
+	/* HDLC mode configuration */
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = PKT_NUM_PIPES_WRITE;
+	msg.hss_port = port->id;
+	msg.data8a = PKT_NUM_PIPES;
+	hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
+
+	msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
+	msg.data8a = PKT_PIPE_FIFO_SIZEW;
+	hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
+
+	msg.cmd = PKT_PIPE_MODE_WRITE;
+	msg.data8a = NPE_PKT_MODE_HDLC;
+	/* msg.data8b = inv_mask */
+	/* msg.data8c = or_mask */
+	hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
+
+	msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
+	msg.data16a = HDLC_MAX_MRU; /* including CRC */
+	hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
+
+	msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
+	msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
+	hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
+
+	port->initialized = 1;
+	return 0;
+}
+
+/*****************************************************************************
+ * packetized (HDLC) operation
+ ****************************************************************************/
+
+static inline void debug_pkt(struct net_device *dev, const char *func,
+			     u8 *data, int len)
+{
+#if DEBUG_PKT_BYTES
+	int i;
+
+	printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
+	for (i = 0; i < len; i++) {
+		if (i >= DEBUG_PKT_BYTES)
+			break;
+		printk("%s%02X", !(i % 4) ? " " : "", data[i]);
+	}
+	printk("\n");
+#endif
+}
+
+
+static inline void debug_desc(u32 phys, struct desc *desc)
+{
+#if DEBUG_DESC
+	printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
+	       phys, desc->next, desc->buf_len, desc->pkt_len,
+	       desc->data, desc->status, desc->error_count);
+#endif
+}
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+				 int is_tx)
+{
+	u32 phys, tab_phys, n_desc;
+	struct desc *tab;
+
+	if (!(phys = qmgr_get_entry(queue)))
+		return -1;
+
+	BUG_ON(phys & 0x1F);
+	tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
+	tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
+	n_desc = (phys - tab_phys) / sizeof(struct desc);
+	BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
+	debug_desc(phys, &tab[n_desc]);
+	BUG_ON(tab[n_desc].next);
+	return n_desc;
+}
+
+static inline void queue_put_desc(unsigned int queue, u32 phys,
+				  struct desc *desc)
+{
+	debug_desc(phys, desc);
+	BUG_ON(phys & 0x1F);
+	qmgr_put_entry(queue, phys);
+	/* Don't check for queue overflow here, we've allocated sufficient
+	   length and queues >= 32 don't support this check anyway. */
+}
+
+
+static inline void dma_unmap_tx(struct port *port, struct desc *desc)
+{
+#ifdef __ARMEB__
+	dma_unmap_single(&port->netdev->dev, desc->data,
+			 desc->buf_len, DMA_TO_DEVICE);
+#else
+	dma_unmap_single(&port->netdev->dev, desc->data & ~3,
+			 ALIGN((desc->data & 3) + desc->buf_len, 4),
+			 DMA_TO_DEVICE);
+#endif
+}
+
+
+static void hss_hdlc_set_carrier(void *pdev, int carrier)
+{
+	struct net_device *netdev = pdev;
+	struct port *port = dev_to_port(netdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&npe_lock, flags);
+	port->carrier = carrier;
+	if (!port->loopback) {
+		if (carrier)
+			netif_carrier_on(netdev);
+		else
+			netif_carrier_off(netdev);
+	}
+	spin_unlock_irqrestore(&npe_lock, flags);
+}
+
+static void hss_hdlc_rx_irq(void *pdev)
+{
+	struct net_device *dev = pdev;
+	struct port *port = dev_to_port(dev);
+
+#if DEBUG_RX
+	printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
+#endif
+	qmgr_disable_irq(queue_ids[port->id].rx);
+	napi_schedule(&port->napi);
+}
+
+static int hss_hdlc_poll(struct napi_struct *napi, int budget)
+{
+	struct port *port = container_of(napi, struct port, napi);
+	struct net_device *dev = port->netdev;
+	unsigned int rxq = queue_ids[port->id].rx;
+	unsigned int rxfreeq = queue_ids[port->id].rxfree;
+	int received = 0;
+
+#if DEBUG_RX
+	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
+#endif
+
+	while (received < budget) {
+		struct sk_buff *skb;
+		struct desc *desc;
+		int n;
+#ifdef __ARMEB__
+		struct sk_buff *temp;
+		u32 phys;
+#endif
+
+		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+#if DEBUG_RX
+			printk(KERN_DEBUG "%s: hss_hdlc_poll"
+			       " napi_complete\n", dev->name);
+#endif
+			napi_complete(napi);
+			qmgr_enable_irq(rxq);
+			if (!qmgr_stat_empty(rxq) &&
+			    napi_reschedule(napi)) {
+#if DEBUG_RX
+				printk(KERN_DEBUG "%s: hss_hdlc_poll"
+				       " napi_reschedule succeeded\n",
+				       dev->name);
+#endif
+				qmgr_disable_irq(rxq);
+				continue;
+			}
+#if DEBUG_RX
+			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
+			       dev->name);
+#endif
+			return received; /* all work done */
+		}
+
+		desc = rx_desc_ptr(port, n);
+#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
+		if (desc->error_count)
+			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
+			       " errors %u\n", dev->name, desc->status,
+			       desc->error_count);
+#endif
+		skb = NULL;
+		switch (desc->status) {
+		case 0:
+#ifdef __ARMEB__
+			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
+				phys = dma_map_single(&dev->dev, skb->data,
+						      RX_SIZE,
+						      DMA_FROM_DEVICE);
+				if (dma_mapping_error(&dev->dev, phys)) {
+					dev_kfree_skb(skb);
+					skb = NULL;
+				}
+			}
+#else
+			skb = netdev_alloc_skb(dev, desc->pkt_len);
+#endif
+			if (!skb)
+				dev->stats.rx_dropped++;
+			break;
+		case ERR_HDLC_ALIGN:
+		case ERR_HDLC_ABORT:
+			dev->stats.rx_frame_errors++;
+			dev->stats.rx_errors++;
+			break;
+		case ERR_HDLC_FCS:
+			dev->stats.rx_crc_errors++;
+			dev->stats.rx_errors++;
+			break;
+		case ERR_HDLC_TOO_LONG:
+			dev->stats.rx_length_errors++;
+			dev->stats.rx_errors++;
+			break;
+		default:	/* FIXME - remove printk */
+			netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
+				   desc->status, desc->error_count);
+			dev->stats.rx_errors++;
+		}
+
+		if (!skb) {
+			/* put the desc back on RX-ready queue */
+			desc->buf_len = RX_SIZE;
+			desc->pkt_len = desc->status = 0;
+			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+			continue;
+		}
+
+		/* process received frame */
+#ifdef __ARMEB__
+		temp = skb;
+		skb = port->rx_buff_tab[n];
+		dma_unmap_single(&dev->dev, desc->data,
+				 RX_SIZE, DMA_FROM_DEVICE);
+#else
+		dma_sync_single_for_cpu(&dev->dev, desc->data,
+					RX_SIZE, DMA_FROM_DEVICE);
+		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
+			      ALIGN(desc->pkt_len, 4) / 4);
+#endif
+		skb_put(skb, desc->pkt_len);
+
+		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
+
+		skb->protocol = hdlc_type_trans(skb, dev);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+		netif_receive_skb(skb);
+
+		/* put the new buffer on RX-free queue */
+#ifdef __ARMEB__
+		port->rx_buff_tab[n] = temp;
+		desc->data = phys;
+#endif
+		desc->buf_len = RX_SIZE;
+		desc->pkt_len = 0;
+		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+		received++;
+	}
+#if DEBUG_RX
+	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
+#endif
+	return received;	/* not all work done */
+}
+
+
+static void hss_hdlc_txdone_irq(void *pdev)
+{
+	struct net_device *dev = pdev;
+	struct port *port = dev_to_port(dev);
+	int n_desc;
+
+#if DEBUG_TX
+	printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
+#endif
+	while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
+					port, 1)) >= 0) {
+		struct desc *desc;
+		int start;
+
+		desc = tx_desc_ptr(port, n_desc);
+
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += desc->pkt_len;
+
+		dma_unmap_tx(port, desc);
+#if DEBUG_TX
+		printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
+		       dev->name, port->tx_buff_tab[n_desc]);
+#endif
+		free_buffer_irq(port->tx_buff_tab[n_desc]);
+		port->tx_buff_tab[n_desc] = NULL;
+
+		start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
+		queue_put_desc(port->plat->txreadyq,
+			       tx_desc_phys(port, n_desc), desc);
+		if (start) { /* TX-ready queue was empty */
+#if DEBUG_TX
+			printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
+			       " ready\n", dev->name);
+#endif
+			netif_wake_queue(dev);
+		}
+	}
+}
+
+static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct port *port = dev_to_port(dev);
+	unsigned int txreadyq = port->plat->txreadyq;
+	int len, offset, bytes, n;
+	void *mem;
+	u32 phys;
+	struct desc *desc;
+
+#if DEBUG_TX
+	printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
+#endif
+
+	if (unlikely(skb->len > HDLC_MAX_MRU)) {
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		return NETDEV_TX_OK;
+	}
+
+	debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
+
+	len = skb->len;
+#ifdef __ARMEB__
+	offset = 0; /* no need to keep alignment */
+	bytes = len;
+	mem = skb->data;
+#else
+	offset = (int)skb->data & 3; /* keep 32-bit alignment */
+	bytes = ALIGN(offset + len, 4);
+	if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+		dev_kfree_skb(skb);
+		dev->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+	memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+	dev_kfree_skb(skb);
+#endif
+
+	phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dev->dev, phys)) {
+#ifdef __ARMEB__
+		dev_kfree_skb(skb);
+#else
+		kfree(mem);
+#endif
+		dev->stats.tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	n = queue_get_desc(txreadyq, port, 1);
+	BUG_ON(n < 0);
+	desc = tx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+	port->tx_buff_tab[n] = skb;
+#else
+	port->tx_buff_tab[n] = mem;
+#endif
+	desc->data = phys + offset;
+	desc->buf_len = desc->pkt_len = len;
+
+	wmb();
+	queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
+
+	if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
+#if DEBUG_TX
+		printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
+#endif
+		netif_stop_queue(dev);
+		/* we could miss TX ready interrupt */
+		if (!qmgr_stat_below_low_watermark(txreadyq)) {
+#if DEBUG_TX
+			printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
+			       dev->name);
+#endif
+			netif_wake_queue(dev);
+		}
+	}
+
+#if DEBUG_TX
+	printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
+#endif
+	return NETDEV_TX_OK;
+}
+
+
+static int request_hdlc_queues(struct port *port)
+{
+	int err;
+
+	err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
+				 "%s:RX-free", port->netdev->name);
+	if (err)
+		return err;
+
+	err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
+				 "%s:RX", port->netdev->name);
+	if (err)
+		goto rel_rxfree;
+
+	err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
+				 "%s:TX", port->netdev->name);
+	if (err)
+		goto rel_rx;
+
+	err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+				 "%s:TX-ready", port->netdev->name);
+	if (err)
+		goto rel_tx;
+
+	err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
+				 "%s:TX-done", port->netdev->name);
+	if (err)
+		goto rel_txready;
+	return 0;
+
+rel_txready:
+	qmgr_release_queue(port->plat->txreadyq);
+rel_tx:
+	qmgr_release_queue(queue_ids[port->id].tx);
+rel_rx:
+	qmgr_release_queue(queue_ids[port->id].rx);
+rel_rxfree:
+	qmgr_release_queue(queue_ids[port->id].rxfree);
+	printk(KERN_DEBUG "%s: unable to request hardware queues\n",
+	       port->netdev->name);
+	return err;
+}
+
+static void release_hdlc_queues(struct port *port)
+{
+	qmgr_release_queue(queue_ids[port->id].rxfree);
+	qmgr_release_queue(queue_ids[port->id].rx);
+	qmgr_release_queue(queue_ids[port->id].txdone);
+	qmgr_release_queue(queue_ids[port->id].tx);
+	qmgr_release_queue(port->plat->txreadyq);
+}
+
+static int init_hdlc_queues(struct port *port)
+{
+	int i;
+
+	if (!ports_open)
+		if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
+						 POOL_ALLOC_SIZE, 32, 0)))
+			return -ENOMEM;
+
+	if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+					      &port->desc_tab_phys)))
+		return -ENOMEM;
+	memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
+	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
+	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
+
+	/* Setup RX buffers */
+	for (i = 0; i < RX_DESCS; i++) {
+		struct desc *desc = rx_desc_ptr(port, i);
+		buffer_t *buff;
+		void *data;
+#ifdef __ARMEB__
+		if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+			return -ENOMEM;
+		data = buff->data;
+#else
+		if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+			return -ENOMEM;
+		data = buff;
+#endif
+		desc->buf_len = RX_SIZE;
+		desc->data = dma_map_single(&port->netdev->dev, data,
+					    RX_SIZE, DMA_FROM_DEVICE);
+		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
+			free_buffer(buff);
+			return -EIO;
+		}
+		port->rx_buff_tab[i] = buff;
+	}
+
+	return 0;
+}
+
+static void destroy_hdlc_queues(struct port *port)
+{
+	int i;
+
+	if (port->desc_tab) {
+		for (i = 0; i < RX_DESCS; i++) {
+			struct desc *desc = rx_desc_ptr(port, i);
+			buffer_t *buff = port->rx_buff_tab[i];
+			if (buff) {
+				dma_unmap_single(&port->netdev->dev,
+						 desc->data, RX_SIZE,
+						 DMA_FROM_DEVICE);
+				free_buffer(buff);
+			}
+		}
+		for (i = 0; i < TX_DESCS; i++) {
+			struct desc *desc = tx_desc_ptr(port, i);
+			buffer_t *buff = port->tx_buff_tab[i];
+			if (buff) {
+				dma_unmap_tx(port, desc);
+				free_buffer(buff);
+			}
+		}
+		dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
+		port->desc_tab = NULL;
+	}
+
+	if (!ports_open && dma_pool) {
+		dma_pool_destroy(dma_pool);
+		dma_pool = NULL;
+	}
+}
+
+static int hss_hdlc_open(struct net_device *dev)
+{
+	struct port *port = dev_to_port(dev);
+	unsigned long flags;
+	int i, err = 0;
+
+	if ((err = hdlc_open(dev)))
+		return err;
+
+	if ((err = hss_load_firmware(port)))
+		goto err_hdlc_close;
+
+	if ((err = request_hdlc_queues(port)))
+		goto err_hdlc_close;
+
+	if ((err = init_hdlc_queues(port)))
+		goto err_destroy_queues;
+
+	spin_lock_irqsave(&npe_lock, flags);
+	if (port->plat->open)
+		if ((err = port->plat->open(port->id, dev,
+					    hss_hdlc_set_carrier)))
+			goto err_unlock;
+	spin_unlock_irqrestore(&npe_lock, flags);
+
+	/* Populate queues with buffers, no failure after this point */
+	for (i = 0; i < TX_DESCS; i++)
+		queue_put_desc(port->plat->txreadyq,
+			       tx_desc_phys(port, i), tx_desc_ptr(port, i));
+
+	for (i = 0; i < RX_DESCS; i++)
+		queue_put_desc(queue_ids[port->id].rxfree,
+			       rx_desc_phys(port, i), rx_desc_ptr(port, i));
+
+	napi_enable(&port->napi);
+	netif_start_queue(dev);
+
+	qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
+		     hss_hdlc_rx_irq, dev);
+
+	qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
+		     hss_hdlc_txdone_irq, dev);
+	qmgr_enable_irq(queue_ids[port->id].txdone);
+
+	ports_open++;
+
+	hss_set_hdlc_cfg(port);
+	hss_config(port);
+
+	hss_start_hdlc(port);
+
+	/* we may already have RX data, enables IRQ */
+	napi_schedule(&port->napi);
+	return 0;
+
+err_unlock:
+	spin_unlock_irqrestore(&npe_lock, flags);
+err_destroy_queues:
+	destroy_hdlc_queues(port);
+	release_hdlc_queues(port);
+err_hdlc_close:
+	hdlc_close(dev);
+	return err;
+}
+
+static int hss_hdlc_close(struct net_device *dev)
+{
+	struct port *port = dev_to_port(dev);
+	unsigned long flags;
+	int i, buffs = RX_DESCS; /* allocated RX buffers */
+
+	spin_lock_irqsave(&npe_lock, flags);
+	ports_open--;
+	qmgr_disable_irq(queue_ids[port->id].rx);
+	netif_stop_queue(dev);
+	napi_disable(&port->napi);
+
+	hss_stop_hdlc(port);
+
+	while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
+		buffs--;
+	while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
+		buffs--;
+
+	if (buffs)
+		netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n",
+			    buffs);
+
+	buffs = TX_DESCS;
+	while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
+		buffs--; /* cancel TX */
+
+	i = 0;
+	do {
+		while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+			buffs--;
+		if (!buffs)
+			break;
+	} while (++i < MAX_CLOSE_WAIT);
+
+	if (buffs)
+		netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n",
+			    buffs);
+#if DEBUG_CLOSE
+	if (!buffs)
+		printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+#endif
+	qmgr_disable_irq(queue_ids[port->id].txdone);
+
+	if (port->plat->close)
+		port->plat->close(port->id, dev);
+	spin_unlock_irqrestore(&npe_lock, flags);
+
+	destroy_hdlc_queues(port);
+	release_hdlc_queues(port);
+	hdlc_close(dev);
+	return 0;
+}
+
+
+static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
+			   unsigned short parity)
+{
+	struct port *port = dev_to_port(dev);
+
+	if (encoding != ENCODING_NRZ)
+		return -EINVAL;
+
+	switch(parity) {
+	case PARITY_CRC16_PR1_CCITT:
+		port->hdlc_cfg = 0;
+		return 0;
+
+	case PARITY_CRC32_PR1_CCITT:
+		port->hdlc_cfg = PKT_HDLC_CRC_32;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static u32 check_clock(u32 rate, u32 a, u32 b, u32 c,
+		       u32 *best, u32 *best_diff, u32 *reg)
+{
+	/* a is 10-bit, b is 10-bit, c is 12-bit */
+	u64 new_rate;
+	u32 new_diff;
+
+	new_rate = ixp4xx_timer_freq * (u64)(c + 1);
+	do_div(new_rate, a * (c + 1) + b + 1);
+	new_diff = abs((u32)new_rate - rate);
+
+	if (new_diff < *best_diff) {
+		*best = new_rate;
+		*best_diff = new_diff;
+		*reg = (a << 22) | (b << 12) | c;
+	}
+	return new_diff;
+}
+
+static void find_best_clock(u32 rate, u32 *best, u32 *reg)
+{
+	u32 a, b, diff = 0xFFFFFFFF;
+
+	a = ixp4xx_timer_freq / rate;
+
+	if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
+		check_clock(rate, 0x3FF, 1, 1, best, &diff, reg);
+		return;
+	}
+	if (a == 0) { /* > 66.666 MHz */
+		a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
+		rate = ixp4xx_timer_freq;
+	}
+
+	if (rate * a == ixp4xx_timer_freq) { /* don't divide by 0 later */
+		check_clock(rate, a - 1, 1, 1, best, &diff, reg);
+		return;
+	}
+
+	for (b = 0; b < 0x400; b++) {
+		u64 c = (b + 1) * (u64)rate;
+		do_div(c, ixp4xx_timer_freq - rate * a);
+		c--;
+		if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
+			if (b == 0 && /* also try a bit higher rate */
+			    !check_clock(rate, a - 1, 1, 1, best, &diff, reg))
+				return;
+			check_clock(rate, a, b, 0xFFF, best, &diff, reg);
+			return;
+		}
+		if (!check_clock(rate, a, b, c, best, &diff, reg))
+			return;
+		if (!check_clock(rate, a, b, c + 1, best, &diff, reg))
+			return;
+	}
+}
+
+static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	struct port *port = dev_to_port(dev);
+	unsigned long flags;
+	int clk;
+
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_V35;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		memset(&new_line, 0, sizeof(new_line));
+		new_line.clock_type = port->clock_type;
+		new_line.clock_rate = port->clock_rate;
+		new_line.loopback = port->loopback;
+		if (copy_to_user(line, &new_line, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_SYNC_SERIAL:
+	case IF_IFACE_V35:
+		if(!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (copy_from_user(&new_line, line, size))
+			return -EFAULT;
+
+		clk = new_line.clock_type;
+		if (port->plat->set_clock)
+			clk = port->plat->set_clock(port->id, clk);
+
+		if (clk != CLOCK_EXT && clk != CLOCK_INT)
+			return -EINVAL;	/* No such clock setting */
+
+		if (new_line.loopback != 0 && new_line.loopback != 1)
+			return -EINVAL;
+
+		port->clock_type = clk; /* Update settings */
+		if (clk == CLOCK_INT)
+			find_best_clock(new_line.clock_rate, &port->clock_rate,
+					&port->clock_reg);
+		else {
+			port->clock_rate = 0;
+			port->clock_reg = CLK42X_SPEED_2048KHZ;
+		}
+		port->loopback = new_line.loopback;
+
+		spin_lock_irqsave(&npe_lock, flags);
+
+		if (dev->flags & IFF_UP)
+			hss_config(port);
+
+		if (port->loopback || port->carrier)
+			netif_carrier_on(port->netdev);
+		else
+			netif_carrier_off(port->netdev);
+		spin_unlock_irqrestore(&npe_lock, flags);
+
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+/*****************************************************************************
+ * initialization
+ ****************************************************************************/
+
+static const struct net_device_ops hss_hdlc_ops = {
+	.ndo_open       = hss_hdlc_open,
+	.ndo_stop       = hss_hdlc_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = hss_hdlc_ioctl,
+};
+
+static int __devinit hss_init_one(struct platform_device *pdev)
+{
+	struct port *port;
+	struct net_device *dev;
+	hdlc_device *hdlc;
+	int err;
+
+	if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
+		return -ENOMEM;
+
+	if ((port->npe = npe_request(0)) == NULL) {
+		err = -ENODEV;
+		goto err_free;
+	}
+
+	if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
+		err = -ENOMEM;
+		goto err_plat;
+	}
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	hdlc = dev_to_hdlc(dev);
+	hdlc->attach = hss_hdlc_attach;
+	hdlc->xmit = hss_hdlc_xmit;
+	dev->netdev_ops = &hss_hdlc_ops;
+	dev->tx_queue_len = 100;
+	port->clock_type = CLOCK_EXT;
+	port->clock_rate = 0;
+	port->clock_reg = CLK42X_SPEED_2048KHZ;
+	port->id = pdev->id;
+	port->dev = &pdev->dev;
+	port->plat = pdev->dev.platform_data;
+	netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
+
+	if ((err = register_hdlc_device(dev)))
+		goto err_free_netdev;
+
+	platform_set_drvdata(pdev, port);
+
+	netdev_info(dev, "HSS-%i\n", port->id);
+	return 0;
+
+err_free_netdev:
+	free_netdev(dev);
+err_plat:
+	npe_release(port->npe);
+err_free:
+	kfree(port);
+	return err;
+}
+
+static int __devexit hss_remove_one(struct platform_device *pdev)
+{
+	struct port *port = platform_get_drvdata(pdev);
+
+	unregister_hdlc_device(port->netdev);
+	free_netdev(port->netdev);
+	npe_release(port->npe);
+	platform_set_drvdata(pdev, NULL);
+	kfree(port);
+	return 0;
+}
+
+static struct platform_driver ixp4xx_hss_driver = {
+	.driver.name	= DRV_NAME,
+	.probe		= hss_init_one,
+	.remove		= hss_remove_one,
+};
+
+static int __init hss_init_module(void)
+{
+	if ((ixp4xx_read_feature_bits() &
+	     (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
+	    (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
+		return -ENODEV;
+
+	spin_lock_init(&npe_lock);
+
+	return platform_driver_register(&ixp4xx_hss_driver);
+}
+
+static void __exit hss_cleanup_module(void)
+{
+	platform_driver_unregister(&ixp4xx_hss_driver);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ixp4xx_hss");
+module_init(hss_init_module);
+module_exit(hss_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lapbether.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/lapbether.c
new file mode 100644
index 0000000..a73b49e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lapbether.c
@@ -0,0 +1,451 @@
+/*
+ *	"LAPB via ethernet" driver release 001
+ *
+ *	This code REQUIRES 2.1.15 or higher/ NET3.038
+ *
+ *	This module:
+ *		This module is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ *	This is a "pseudo" network driver to allow LAPB over Ethernet.
+ *
+ *	This driver can use any ethernet destination address, and can be 
+ *	limited to accept frames from one dedicated ethernet card only.
+ *
+ *	History
+ *	LAPBETH 001	Jonathan Naylor		Cloned from bpqether.c
+ *	2000-10-29	Henner Eisen	lapb_data_indication() return status.
+ *	2000-11-14	Henner Eisen	dev_hold/put, NETDEV_GOING_DOWN support
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/stat.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/lapb.h>
+#include <linux/init.h>
+
+#include <net/x25device.h>
+
+static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+/* If this number is made larger, check that the temporary string buffer
+ * in lapbeth_new_device is large enough to store the probe device name.*/
+#define MAXLAPBDEV 100
+
+struct lapbethdev {
+	struct list_head	node;
+	struct net_device	*ethdev;	/* link to ethernet device */
+	struct net_device	*axdev;		/* lapbeth device (lapb#) */
+};
+
+static LIST_HEAD(lapbeth_devices);
+
+/* ------------------------------------------------------------------------ */
+
+/*
+ *	Get the LAPB device for the ethernet device
+ */
+static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
+{
+	struct lapbethdev *lapbeth;
+
+	list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+		if (lapbeth->ethdev == dev) 
+			return lapbeth;
+	}
+	return NULL;
+}
+
+static __inline__ int dev_is_ethdev(struct net_device *dev)
+{
+	return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+}
+
+/* ------------------------------------------------------------------------ */
+
+/*
+ *	Receive a LAPB frame via an ethernet interface.
+ */
+static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
+{
+	int len, err;
+	struct lapbethdev *lapbeth;
+
+	if (dev_net(dev) != &init_net)
+		goto drop;
+
+	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+		return NET_RX_DROP;
+
+	if (!pskb_may_pull(skb, 2))
+		goto drop;
+
+	rcu_read_lock();
+	lapbeth = lapbeth_get_x25_dev(dev);
+	if (!lapbeth)
+		goto drop_unlock;
+	if (!netif_running(lapbeth->axdev))
+		goto drop_unlock;
+
+	len = skb->data[0] + skb->data[1] * 256;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += len;
+
+	skb_pull(skb, 2);	/* Remove the length bytes */
+	skb_trim(skb, len);	/* Set the length of the data */
+
+	if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
+		printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
+		goto drop_unlock;
+	}
+out:
+	rcu_read_unlock();
+	return 0;
+drop_unlock:
+	kfree_skb(skb);
+	goto out;
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+	unsigned char *ptr;
+
+	skb_push(skb, 1);
+
+	if (skb_cow(skb, 1))
+		return NET_RX_DROP;
+
+	ptr  = skb->data;
+	*ptr = X25_IFACE_DATA;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	return netif_rx(skb);
+}
+
+/*
+ *	Send a LAPB frame via an ethernet interface
+ */
+static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	int err;
+
+	/*
+	 * Just to be *really* sure not to send anything if the interface
+	 * is down, the ethernet device may have gone.
+	 */
+	if (!netif_running(dev))
+		goto drop;
+
+	switch (skb->data[0]) {
+	case X25_IFACE_DATA:
+		break;
+	case X25_IFACE_CONNECT:
+		if ((err = lapb_connect_request(dev)) != LAPB_OK)
+			pr_err("lapb_connect_request error: %d\n", err);
+		goto drop;
+	case X25_IFACE_DISCONNECT:
+		if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
+			pr_err("lapb_disconnect_request err: %d\n", err);
+		/* Fall thru */
+	default:
+		goto drop;
+	}
+
+	skb_pull(skb, 1);
+
+	if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
+		pr_err("lapb_data_request error - %d\n", err);
+		goto drop;
+	}
+out:
+	return NETDEV_TX_OK;
+drop:
+	kfree_skb(skb);
+	goto out;
+}
+
+static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
+{
+	struct lapbethdev *lapbeth = netdev_priv(ndev);
+	unsigned char *ptr;
+	struct net_device *dev;
+	int size = skb->len;
+
+	skb->protocol = htons(ETH_P_X25);
+
+	ptr = skb_push(skb, 2);
+
+	*ptr++ = size % 256;
+	*ptr++ = size / 256;
+
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += size;
+
+	skb->dev = dev = lapbeth->ethdev;
+
+	dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
+
+	dev_queue_xmit(skb);
+}
+
+static void lapbeth_connected(struct net_device *dev, int reason)
+{
+	unsigned char *ptr;
+	struct sk_buff *skb = dev_alloc_skb(1);
+
+	if (!skb) {
+		pr_err("out of memory\n");
+		return;
+	}
+
+	ptr  = skb_put(skb, 1);
+	*ptr = X25_IFACE_CONNECT;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
+static void lapbeth_disconnected(struct net_device *dev, int reason)
+{
+	unsigned char *ptr;
+	struct sk_buff *skb = dev_alloc_skb(1);
+
+	if (!skb) {
+		pr_err("out of memory\n");
+		return;
+	}
+
+	ptr  = skb_put(skb, 1);
+	*ptr = X25_IFACE_DISCONNECT;
+
+	skb->protocol = x25_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
+/*
+ *	Set AX.25 callsign
+ */
+static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+	memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+	return 0;
+}
+
+
+static const struct lapb_register_struct lapbeth_callbacks = {
+	.connect_confirmation    = lapbeth_connected,
+	.connect_indication      = lapbeth_connected,
+	.disconnect_confirmation = lapbeth_disconnected,
+	.disconnect_indication   = lapbeth_disconnected,
+	.data_indication         = lapbeth_data_indication,
+	.data_transmit           = lapbeth_data_transmit,
+};
+
+/*
+ * open/close a device
+ */
+static int lapbeth_open(struct net_device *dev)
+{
+	int err;
+
+	if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+		pr_err("lapb_register error: %d\n", err);
+		return -ENODEV;
+	}
+
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int lapbeth_close(struct net_device *dev)
+{
+	int err;
+
+	netif_stop_queue(dev);
+
+	if ((err = lapb_unregister(dev)) != LAPB_OK)
+		pr_err("lapb_unregister error: %d\n", err);
+
+	return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static const struct net_device_ops lapbeth_netdev_ops = {
+	.ndo_open	     = lapbeth_open,
+	.ndo_stop	     = lapbeth_close,
+	.ndo_start_xmit	     = lapbeth_xmit,
+	.ndo_set_mac_address = lapbeth_set_mac_address,
+};
+
+static void lapbeth_setup(struct net_device *dev)
+{
+	dev->netdev_ops	     = &lapbeth_netdev_ops;
+	dev->destructor	     = free_netdev;
+	dev->type            = ARPHRD_X25;
+	dev->hard_header_len = 3;
+	dev->mtu             = 1000;
+	dev->addr_len        = 0;
+}
+
+/*
+ *	Setup a new device.
+ */
+static int lapbeth_new_device(struct net_device *dev)
+{
+	struct net_device *ndev;
+	struct lapbethdev *lapbeth;
+	int rc = -ENOMEM;
+
+	ASSERT_RTNL();
+
+	ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", 
+			   lapbeth_setup);
+	if (!ndev)
+		goto out;
+
+	lapbeth = netdev_priv(ndev);
+	lapbeth->axdev = ndev;
+
+	dev_hold(dev);
+	lapbeth->ethdev = dev;
+
+	rc = -EIO;
+	if (register_netdevice(ndev))
+		goto fail;
+
+	list_add_rcu(&lapbeth->node, &lapbeth_devices);
+	rc = 0;
+out:
+	return rc;
+fail:
+	dev_put(dev);
+	free_netdev(ndev);
+	kfree(lapbeth);
+	goto out;
+}
+
+/*
+ *	Free a lapb network device.
+ */
+static void lapbeth_free_device(struct lapbethdev *lapbeth)
+{
+	dev_put(lapbeth->ethdev);
+	list_del_rcu(&lapbeth->node);
+	unregister_netdevice(lapbeth->axdev);
+}
+
+/*
+ *	Handle device status changes.
+ *
+ * Called from notifier with RTNL held.
+ */
+static int lapbeth_device_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	struct lapbethdev *lapbeth;
+	struct net_device *dev = ptr;
+
+	if (dev_net(dev) != &init_net)
+		return NOTIFY_DONE;
+
+	if (!dev_is_ethdev(dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_UP:
+		/* New ethernet device -> new LAPB interface	 */
+		if (lapbeth_get_x25_dev(dev) == NULL)
+			lapbeth_new_device(dev);
+		break;
+	case NETDEV_DOWN:	
+		/* ethernet device closed -> close LAPB interface */
+		lapbeth = lapbeth_get_x25_dev(dev);
+		if (lapbeth) 
+			dev_close(lapbeth->axdev);
+		break;
+	case NETDEV_UNREGISTER:
+		/* ethernet device disappears -> remove LAPB interface */
+		lapbeth = lapbeth_get_x25_dev(dev);
+		if (lapbeth)
+			lapbeth_free_device(lapbeth);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct packet_type lapbeth_packet_type __read_mostly = {
+	.type = cpu_to_be16(ETH_P_DEC),
+	.func = lapbeth_rcv,
+};
+
+static struct notifier_block lapbeth_dev_notifier = {
+	.notifier_call = lapbeth_device_event,
+};
+
+static const char banner[] __initconst =
+	KERN_INFO "LAPB Ethernet driver version 0.02\n";
+
+static int __init lapbeth_init_driver(void)
+{
+	dev_add_pack(&lapbeth_packet_type);
+
+	register_netdevice_notifier(&lapbeth_dev_notifier);
+
+	printk(banner);
+
+	return 0;
+}
+module_init(lapbeth_init_driver);
+
+static void __exit lapbeth_cleanup_driver(void)
+{
+	struct lapbethdev *lapbeth;
+	struct list_head *entry, *tmp;
+
+	dev_remove_pack(&lapbeth_packet_type);
+	unregister_netdevice_notifier(&lapbeth_dev_notifier);
+
+	rtnl_lock();
+	list_for_each_safe(entry, tmp, &lapbeth_devices) {
+		lapbeth = list_entry(entry, struct lapbethdev, node);
+
+		dev_put(lapbeth->ethdev);
+		unregister_netdevice(lapbeth->axdev);
+	}
+	rtnl_unlock();
+}
+module_exit(lapbeth_cleanup_driver);
+
+MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
+MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/Makefile b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/Makefile
new file mode 100644
index 0000000..609710d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the Lan Media 21140 based WAN cards
+# Specifically the 1000,1200,5200,5245
+#
+
+obj-$(CONFIG_LANMEDIA) += lmc.o
+
+lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
+
+# Like above except every packet gets echoed to KERN_DEBUG
+# in hex
+#
+# DBDEF = \
+# -DDEBUG \
+# -DLMC_PACKET_LOG
+
+ccflags-y := -I. $(DBGDEF)
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc.h
new file mode 100644
index 0000000..4ced7ac
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc.h
@@ -0,0 +1,32 @@
+#ifndef _LMC_H_
+#define _LMC_H_
+
+#include "lmc_var.h"
+
+/*
+ * prototypes for everyone
+ */
+int lmc_probe(struct net_device * dev);
+unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
+      			  devaddr, unsigned regno);
+void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
+			       unsigned regno, unsigned data);
+void lmc_led_on(lmc_softc_t * const, u32);
+void lmc_led_off(lmc_softc_t * const, u32);
+unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
+void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
+
+int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+extern lmc_media_t lmc_ds3_media;
+extern lmc_media_t lmc_ssi_media;
+extern lmc_media_t lmc_t1_media;
+extern lmc_media_t lmc_hssi_media;
+
+#ifdef _DBG_EVENTLOG
+static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
+#endif
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.c
new file mode 100644
index 0000000..15049d7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.c
@@ -0,0 +1,82 @@
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include "lmc_debug.h"
+
+/*
+ * Prints out len, max to 80 octets using printk, 20 per line
+ */
+#ifdef DEBUG
+#ifdef LMC_PACKET_LOG
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
+{
+  int iNewLine = 1;
+  char str[80], *pstr;
+  
+  sprintf(str, KERN_DEBUG "lmc: %s: ", type);
+  pstr = str+strlen(str);
+  
+  if(iLen > 240){
+      printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
+    iLen = 240;
+  }
+  else{
+      printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
+  }
+
+  while(iLen > 0) 
+    {
+      sprintf(pstr, "%02x ", *ucData);
+      pstr+=3;
+      ucData++;
+      if( !(iNewLine % 20))
+	{
+	  sprintf(pstr, "\n");
+	  printk(str);
+	  sprintf(str, KERN_DEBUG "lmc: %s: ", type);
+	  pstr=str+strlen(str);
+	}
+      iNewLine++;
+      iLen--;
+    }
+  sprintf(pstr, "\n");
+  printk(str);
+}
+#endif
+#endif
+
+#ifdef DEBUG
+u32 lmcEventLogIndex;
+u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+
+void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
+{
+  lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
+  lmcEventLogBuf[lmcEventLogIndex++] = arg2;
+  lmcEventLogBuf[lmcEventLogIndex++] = arg3;
+  lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
+
+  lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
+}
+#endif  /*  DEBUG  */
+
+void lmc_trace(struct net_device *dev, char *msg){
+#ifdef LMC_TRACE
+    unsigned long j = jiffies + 3; /* Wait for 50 ms */
+
+    if(in_interrupt()){
+        printk("%s: * %s\n", dev->name, msg);
+//        while(time_before(jiffies, j+10))
+//            ;
+    }
+    else {
+        printk("%s: %s\n", dev->name, msg);
+        while(time_before(jiffies, j))
+            schedule();
+    }
+#endif
+}
+
+
+/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.h
new file mode 100644
index 0000000..2d46f12
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_debug.h
@@ -0,0 +1,52 @@
+#ifndef _LMC_DEBUG_H_
+#define _LMC_DEBUG_H_
+
+#ifdef DEBUG
+#ifdef LMC_PACKET_LOG
+#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
+#else
+#define LMC_CONSOLE_LOG(x,y,z)
+#endif
+#else
+#define LMC_CONSOLE_LOG(x,y,z)
+#endif
+
+
+
+/* Debug --- Event log definitions --- */
+/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
+#define LMC_EVENTLOGSIZE 1024	/* number of events in eventlog */
+#define LMC_EVENTLOGARGS 4		/* number of args for each event */
+
+/* event indicators */
+#define LMC_EVENT_XMT           1
+#define LMC_EVENT_XMTEND        2
+#define LMC_EVENT_XMTINT        3
+#define LMC_EVENT_RCVINT        4
+#define LMC_EVENT_RCVEND        5
+#define LMC_EVENT_INT           6
+#define LMC_EVENT_XMTINTTMO     7
+#define LMC_EVENT_XMTPRCTMO     8
+#define LMC_EVENT_INTEND        9
+#define LMC_EVENT_RESET1       10
+#define LMC_EVENT_RESET2       11
+#define LMC_EVENT_FORCEDRESET  12
+#define LMC_EVENT_WATCHDOG     13
+#define LMC_EVENT_BADPKTSURGE  14
+#define LMC_EVENT_TBUSY0       15
+#define LMC_EVENT_TBUSY1       16
+
+
+#ifdef DEBUG
+extern u32 lmcEventLogIndex;
+extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
+#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
+#else
+#define LMC_EVENT_LOG(x,y,z)
+#endif /* end ifdef _DBG_EVENTLOG */
+
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
+void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
+void lmc_trace(struct net_device *dev, char *msg);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_ioctl.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_ioctl.h
new file mode 100644
index 0000000..72fb113
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_ioctl.h
@@ -0,0 +1,257 @@
+#ifndef _LMC_IOCTL_H_
+#define _LMC_IOCTL_H_
+/*	$Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $	*/
+
+ /*
+  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+  * All rights reserved.  www.lanmedia.com
+  *
+  * This code is written by:
+  * Andrew Stanley-Jones (asj@cban.com)
+  * Rob Braun (bbraun@vix.com),
+  * Michael Graff (explorer@vix.com) and
+  * Matt Thomas (matt@3am-software.com).
+  *
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License version 2, incorporated herein by reference.
+  */
+
+#define LMCIOCGINFO             SIOCDEVPRIVATE+3 /* get current state */
+#define LMCIOCSINFO             SIOCDEVPRIVATE+4 /* set state to user values */
+#define LMCIOCGETLMCSTATS       SIOCDEVPRIVATE+5
+#define LMCIOCCLEARLMCSTATS     SIOCDEVPRIVATE+6
+#define LMCIOCDUMPEVENTLOG      SIOCDEVPRIVATE+7
+#define LMCIOCGETXINFO          SIOCDEVPRIVATE+8
+#define LMCIOCSETCIRCUIT        SIOCDEVPRIVATE+9
+#define LMCIOCUNUSEDATM         SIOCDEVPRIVATE+10
+#define LMCIOCRESET             SIOCDEVPRIVATE+11
+#define LMCIOCT1CONTROL         SIOCDEVPRIVATE+12
+#define LMCIOCIFTYPE            SIOCDEVPRIVATE+13
+#define LMCIOCXILINX            SIOCDEVPRIVATE+14
+
+#define LMC_CARDTYPE_UNKNOWN            -1
+#define LMC_CARDTYPE_HSSI               1       /* probed card is a HSSI card */
+#define LMC_CARDTYPE_DS3                2       /* probed card is a DS3 card */
+#define LMC_CARDTYPE_SSI                3       /* probed card is a SSI card */
+#define LMC_CARDTYPE_T1                 4       /* probed card is a T1 card */
+
+#define LMC_CTL_CARDTYPE_LMC5200	0	/* HSSI */
+#define LMC_CTL_CARDTYPE_LMC5245	1	/* DS3 */
+#define LMC_CTL_CARDTYPE_LMC1000	2	/* SSI, V.35 */
+#define LMC_CTL_CARDTYPE_LMC1200        3       /* DS1 */
+
+#define LMC_CTL_OFF			0	/* generic OFF value */
+#define LMC_CTL_ON			1	/* generic ON value */
+
+#define LMC_CTL_CLOCK_SOURCE_EXT	0	/* clock off line */
+#define LMC_CTL_CLOCK_SOURCE_INT	1	/* internal clock */
+
+#define LMC_CTL_CRC_LENGTH_16		16
+#define LMC_CTL_CRC_LENGTH_32		32
+#define LMC_CTL_CRC_BYTESIZE_2          2
+#define LMC_CTL_CRC_BYTESIZE_4          4
+
+
+#define LMC_CTL_CABLE_LENGTH_LT_100FT	0	/* DS3 cable < 100 feet */
+#define LMC_CTL_CABLE_LENGTH_GT_100FT	1	/* DS3 cable >= 100 feet */
+
+#define LMC_CTL_CIRCUIT_TYPE_E1 0
+#define LMC_CTL_CIRCUIT_TYPE_T1 1
+
+/*
+ * IFTYPE defines
+ */
+#define LMC_PPP         1               /* use generic HDLC interface */
+#define LMC_NET         2               /* use direct net interface */
+#define LMC_RAW         3               /* use direct net interface */
+
+/*
+ * These are not in the least IOCTL related, but I want them common.
+ */
+/*
+ * assignments for the GPIO register on the DEC chip (common)
+ */
+#define LMC_GEP_INIT		0x01 /* 0: */
+#define LMC_GEP_RESET		0x02 /* 1: */
+#define LMC_GEP_MODE		0x10 /* 4: */
+#define LMC_GEP_DP		0x20 /* 5: */
+#define LMC_GEP_DATA		0x40 /* 6: serial out */
+#define LMC_GEP_CLK	        0x80 /* 7: serial clock */
+
+/*
+ * HSSI GPIO assignments
+ */
+#define LMC_GEP_HSSI_ST		0x04 /* 2: receive timing sense (deprecated) */
+#define LMC_GEP_HSSI_CLOCK	0x08 /* 3: clock source */
+
+/*
+ * T1 GPIO assignments
+ */
+#define LMC_GEP_SSI_GENERATOR	0x04 /* 2: enable prog freq gen serial i/f */
+#define LMC_GEP_SSI_TXCLOCK	0x08 /* 3: provide clock on TXCLOCK output */
+
+/*
+ * Common MII16 bits
+ */
+#define LMC_MII16_LED0         0x0080
+#define LMC_MII16_LED1         0x0100
+#define LMC_MII16_LED2         0x0200
+#define LMC_MII16_LED3         0x0400  /* Error, and the red one */
+#define LMC_MII16_LED_ALL      0x0780  /* LED bit mask */
+#define LMC_MII16_FIFO_RESET   0x0800
+
+/*
+ * definitions for HSSI
+ */
+#define LMC_MII16_HSSI_TA      0x0001
+#define LMC_MII16_HSSI_CA      0x0002
+#define LMC_MII16_HSSI_LA      0x0004
+#define LMC_MII16_HSSI_LB      0x0008
+#define LMC_MII16_HSSI_LC      0x0010
+#define LMC_MII16_HSSI_TM      0x0020
+#define LMC_MII16_HSSI_CRC     0x0040
+
+/*
+ * assignments for the MII register 16 (DS3)
+ */
+#define LMC_MII16_DS3_ZERO	0x0001
+#define LMC_MII16_DS3_TRLBK	0x0002
+#define LMC_MII16_DS3_LNLBK	0x0004
+#define LMC_MII16_DS3_RAIS	0x0008
+#define LMC_MII16_DS3_TAIS	0x0010
+#define LMC_MII16_DS3_BIST	0x0020
+#define LMC_MII16_DS3_DLOS	0x0040
+#define LMC_MII16_DS3_CRC	0x1000
+#define LMC_MII16_DS3_SCRAM	0x2000
+#define LMC_MII16_DS3_SCRAM_LARS 0x4000
+
+/* Note: 2 pairs of LEDs where swapped by mistake
+ * in Xilinx code for DS3 & DS1 adapters */
+#define LMC_DS3_LED0    0x0100          /* bit 08  yellow */
+#define LMC_DS3_LED1    0x0080          /* bit 07  blue   */
+#define LMC_DS3_LED2    0x0400          /* bit 10  green  */
+#define LMC_DS3_LED3    0x0200          /* bit 09  red    */
+
+/*
+ * framer register 0 and 7 (7 is latched and reset on read)
+ */
+#define LMC_FRAMER_REG0_DLOS            0x80    /* digital loss of service */
+#define LMC_FRAMER_REG0_OOFS            0x40    /* out of frame sync */
+#define LMC_FRAMER_REG0_AIS             0x20    /* alarm indication signal */
+#define LMC_FRAMER_REG0_CIS             0x10    /* channel idle */
+#define LMC_FRAMER_REG0_LOC             0x08    /* loss of clock */
+
+/*
+ * Framer register 9 contains the blue alarm signal
+ */
+#define LMC_FRAMER_REG9_RBLUE          0x02     /* Blue alarm failure */
+
+/*
+ * Framer register 0x10 contains xbit error
+ */
+#define LMC_FRAMER_REG10_XBIT          0x01     /* X bit error alarm failure */
+
+/*
+ * And SSI, LMC1000
+ */
+#define LMC_MII16_SSI_DTR	0x0001	/* DTR output RW */
+#define LMC_MII16_SSI_DSR	0x0002	/* DSR input RO */
+#define LMC_MII16_SSI_RTS	0x0004	/* RTS output RW */
+#define LMC_MII16_SSI_CTS	0x0008	/* CTS input RO */
+#define LMC_MII16_SSI_DCD	0x0010	/* DCD input RO */
+#define LMC_MII16_SSI_RI		0x0020	/* RI input RO */
+#define LMC_MII16_SSI_CRC                0x1000  /* CRC select - RW */
+
+/*
+ * bits 0x0080 through 0x0800 are generic, and described
+ * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
+ */
+#define LMC_MII16_SSI_LL		0x1000	/* LL output RW */
+#define LMC_MII16_SSI_RL		0x2000	/* RL output RW */
+#define LMC_MII16_SSI_TM		0x4000	/* TM input RO */
+#define LMC_MII16_SSI_LOOP	0x8000	/* loopback enable RW */
+
+/*
+ * Some of the MII16 bits are mirrored in the MII17 register as well,
+ * but let's keep thing separate for now, and get only the cable from
+ * the MII17.
+ */
+#define LMC_MII17_SSI_CABLE_MASK	0x0038	/* mask to extract the cable type */
+#define LMC_MII17_SSI_CABLE_SHIFT 3	/* shift to extract the cable type */
+
+/*
+ * And T1, LMC1200
+ */
+#define LMC_MII16_T1_UNUSED1    0x0003
+#define LMC_MII16_T1_XOE                0x0004
+#define LMC_MII16_T1_RST                0x0008  /* T1 chip reset - RW */
+#define LMC_MII16_T1_Z                  0x0010  /* output impedance T1=1, E1=0 output - RW */
+#define LMC_MII16_T1_INTR               0x0020  /* interrupt from 8370 - RO */
+#define LMC_MII16_T1_ONESEC             0x0040  /* one second square wave - ro */
+
+#define LMC_MII16_T1_LED0               0x0100
+#define LMC_MII16_T1_LED1               0x0080
+#define LMC_MII16_T1_LED2               0x0400
+#define LMC_MII16_T1_LED3               0x0200
+#define LMC_MII16_T1_FIFO_RESET 0x0800
+
+#define LMC_MII16_T1_CRC                0x1000  /* CRC select - RW */
+#define LMC_MII16_T1_UNUSED2    0xe000
+
+
+/* 8370 framer registers  */
+
+#define T1FRAMER_ALARM1_STATUS  0x47
+#define T1FRAMER_ALARM2_STATUS  0x48
+#define T1FRAMER_FERR_LSB               0x50
+#define T1FRAMER_FERR_MSB               0x51    /* framing bit error counter */
+#define T1FRAMER_LCV_LSB                0x54
+#define T1FRAMER_LCV_MSB                0x55    /* line code violation counter */
+#define T1FRAMER_AERR                   0x5A
+
+/* mask for the above AERR register */
+#define T1FRAMER_LOF_MASK               (0x0f0) /* receive loss of frame */
+#define T1FRAMER_COFA_MASK              (0x0c0) /* change of frame alignment */
+#define T1FRAMER_SEF_MASK               (0x03)  /* severely errored frame  */
+
+/* 8370 framer register ALM1 (0x47) values
+ * used to determine link status
+ */
+
+#define T1F_SIGFRZ      0x01    /* signaling freeze */
+#define T1F_RLOF        0x02    /* receive loss of frame alignment */
+#define T1F_RLOS        0x04    /* receive loss of signal */
+#define T1F_RALOS       0x08    /* receive analog loss of signal or RCKI loss of clock */
+#define T1F_RAIS        0x10    /* receive alarm indication signal */
+#define T1F_UNUSED      0x20
+#define T1F_RYEL        0x40    /* receive yellow alarm */
+#define T1F_RMYEL       0x80    /* receive multiframe yellow alarm */
+
+#define LMC_T1F_WRITE       0
+#define LMC_T1F_READ        1
+
+typedef struct lmc_st1f_control {
+  int command;
+  int address;
+  int value;
+  char __user *data;
+} lmc_t1f_control;
+
+enum lmc_xilinx_c {
+    lmc_xilinx_reset = 1,
+    lmc_xilinx_load_prom = 2,
+    lmc_xilinx_load = 3
+};
+
+struct lmc_xilinx_control {
+    enum lmc_xilinx_c command;
+    int len;
+    char __user *data;
+};
+
+/* ------------------ end T1 defs ------------------- */
+
+#define LMC_MII_LedMask                 0x0780
+#define LMC_MII_LedBitPos               7
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_main.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_main.c
new file mode 100644
index 0000000..76a8a4a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_main.c
@@ -0,0 +1,2144 @@
+ /*
+  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+  * All rights reserved.  www.lanmedia.com
+  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
+  *
+  * This code is written by:
+  * Andrew Stanley-Jones (asj@cban.com)
+  * Rob Braun (bbraun@vix.com),
+  * Michael Graff (explorer@vix.com) and
+  * Matt Thomas (matt@3am-software.com).
+  *
+  * With Help By:
+  * David Boggs
+  * Ron Crane
+  * Alan Cox
+  *
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License version 2, incorporated herein by reference.
+  *
+  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
+  *
+  * To control link specific options lmcctl is required.
+  * It can be obtained from ftp.lanmedia.com.
+  *
+  * Linux driver notes:
+  * Linux uses the device struct lmc_private to pass private information
+  * around.
+  *
+  * The initialization portion of this driver (the lmc_reset() and the
+  * lmc_dec_reset() functions, as well as the led controls and the
+  * lmc_initcsrs() functions.
+  *
+  * The watchdog function runs every second and checks to see if
+  * we still have link, and that the timing source is what we expected
+  * it to be.  If link is lost, the interface is marked down, and
+  * we no longer can transmit.
+  *
+  */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/bitops.h>
+#include <asm/processor.h>             /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+//#include <asm/spinlock.h>
+
+#define DRIVER_MAJOR_VERSION     1
+#define DRIVER_MINOR_VERSION    34
+#define DRIVER_SUB_VERSION       0
+
+#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_ioctl.h"
+#include "lmc_debug.h"
+#include "lmc_proto.h"
+
+static int LMC_PKT_BUF_SZ = 1542;
+
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
+	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+	  PCI_VENDOR_ID_LMC, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+	  PCI_ANY_ID, PCI_VENDOR_ID_LMC },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
+MODULE_LICENSE("GPL v2");
+
+
+static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
+					struct net_device *dev);
+static int lmc_rx (struct net_device *dev);
+static int lmc_open(struct net_device *dev);
+static int lmc_close(struct net_device *dev);
+static struct net_device_stats *lmc_get_stats(struct net_device *dev);
+static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
+static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
+static void lmc_softreset(lmc_softc_t * const);
+static void lmc_running_reset(struct net_device *dev);
+static int lmc_ifdown(struct net_device * const);
+static void lmc_watchdog(unsigned long data);
+static void lmc_reset(lmc_softc_t * const sc);
+static void lmc_dec_reset(lmc_softc_t * const sc);
+static void lmc_driver_timeout(struct net_device *dev);
+
+/*
+ * linux reserves 16 device specific IOCTLs.  We call them
+ * LMCIOC* to control various bits of our world.
+ */
+int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    lmc_ctl_t ctl;
+    int ret = -EOPNOTSUPP;
+    u16 regVal;
+    unsigned long flags;
+
+    lmc_trace(dev, "lmc_ioctl in");
+
+    /*
+     * Most functions mess with the structure
+     * Disable interrupts while we do the polling
+     */
+
+    switch (cmd) {
+        /*
+         * Return current driver state.  Since we keep this up
+         * To date internally, just copy this out to the user.
+         */
+    case LMCIOCGINFO: /*fold01*/
+	if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+        break;
+
+    case LMCIOCSINFO: /*fold01*/
+        if (!capable(CAP_NET_ADMIN)) {
+            ret = -EPERM;
+            break;
+        }
+
+        if(dev->flags & IFF_UP){
+            ret = -EBUSY;
+            break;
+        }
+
+	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+		ret = -EFAULT;
+		break;
+	}
+
+	spin_lock_irqsave(&sc->lmc_lock, flags);
+        sc->lmc_media->set_status (sc, &ctl);
+
+        if(ctl.crc_length != sc->ictl.crc_length) {
+            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
+	    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
+		sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
+	    else
+		sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
+        }
+	spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+        ret = 0;
+        break;
+
+    case LMCIOCIFTYPE: /*fold01*/
+        {
+	    u16 old_type = sc->if_type;
+	    u16	new_type;
+
+	    if (!capable(CAP_NET_ADMIN)) {
+		ret = -EPERM;
+		break;
+	    }
+
+	    if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
+		ret = -EFAULT;
+		break;
+	    }
+
+            
+	    if (new_type == old_type)
+	    {
+		ret = 0 ;
+		break;				/* no change */
+            }
+            
+	    spin_lock_irqsave(&sc->lmc_lock, flags);
+            lmc_proto_close(sc);
+
+            sc->if_type = new_type;
+            lmc_proto_attach(sc);
+	    ret = lmc_proto_open(sc);
+	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+	    break;
+	}
+
+    case LMCIOCGETXINFO: /*fold01*/
+	spin_lock_irqsave(&sc->lmc_lock, flags);
+        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
+
+        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
+        sc->lmc_xinfo.PciSlotNumber = 0;
+        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
+        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
+        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
+        sc->lmc_xinfo.XilinxRevisionNumber =
+            lmc_mii_readreg (sc, 0, 3) & 0xf;
+        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
+        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
+        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
+	spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
+
+        if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
+			 sizeof(struct lmc_xinfo)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+
+        break;
+
+    case LMCIOCGETLMCSTATS:
+	    spin_lock_irqsave(&sc->lmc_lock, flags);
+	    if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
+		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
+		    sc->extra_stats.framingBitErrorCount +=
+			    lmc_mii_readreg(sc, 0, 18) & 0xff;
+		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
+		    sc->extra_stats.framingBitErrorCount +=
+			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
+		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
+		    sc->extra_stats.lineCodeViolationCount +=
+			    lmc_mii_readreg(sc, 0, 18) & 0xff;
+		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
+		    sc->extra_stats.lineCodeViolationCount +=
+			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
+		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
+		    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
+
+		    sc->extra_stats.lossOfFrameCount +=
+			    (regVal & T1FRAMER_LOF_MASK) >> 4;
+		    sc->extra_stats.changeOfFrameAlignmentCount +=
+			    (regVal & T1FRAMER_COFA_MASK) >> 2;
+		    sc->extra_stats.severelyErroredFrameCount +=
+			    regVal & T1FRAMER_SEF_MASK;
+	    }
+	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+	    if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
+			     sizeof(sc->lmc_device->stats)) ||
+		copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
+			     &sc->extra_stats, sizeof(sc->extra_stats)))
+		    ret = -EFAULT;
+	    else
+		    ret = 0;
+	    break;
+
+    case LMCIOCCLEARLMCSTATS:
+	    if (!capable(CAP_NET_ADMIN)) {
+		    ret = -EPERM;
+		    break;
+	    }
+
+	    spin_lock_irqsave(&sc->lmc_lock, flags);
+	    memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
+	    memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
+	    sc->extra_stats.check = STATCHECK;
+	    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
+		    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
+	    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
+	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+	    ret = 0;
+	    break;
+
+    case LMCIOCSETCIRCUIT: /*fold01*/
+        if (!capable(CAP_NET_ADMIN)){
+            ret = -EPERM;
+            break;
+        }
+
+        if(dev->flags & IFF_UP){
+            ret = -EBUSY;
+            break;
+        }
+
+	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
+		ret = -EFAULT;
+		break;
+	}
+	spin_lock_irqsave(&sc->lmc_lock, flags);
+        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
+        sc->ictl.circuit_type = ctl.circuit_type;
+	spin_unlock_irqrestore(&sc->lmc_lock, flags);
+        ret = 0;
+
+        break;
+
+    case LMCIOCRESET: /*fold01*/
+        if (!capable(CAP_NET_ADMIN)){
+            ret = -EPERM;
+            break;
+        }
+
+	spin_lock_irqsave(&sc->lmc_lock, flags);
+        /* Reset driver and bring back to current state */
+        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
+        lmc_running_reset (dev);
+        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
+
+        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
+	spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+        ret = 0;
+        break;
+
+#ifdef DEBUG
+    case LMCIOCDUMPEVENTLOG:
+	if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
+		ret = -EFAULT;
+		break;
+	}
+	if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
+			 sizeof(lmcEventLogBuf)))
+		ret = -EFAULT;
+	else
+		ret = 0;
+
+        break;
+#endif /* end ifdef _DBG_EVENTLOG */
+    case LMCIOCT1CONTROL: /*fold01*/
+        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
+            ret = -EOPNOTSUPP;
+            break;
+        }
+        break;
+    case LMCIOCXILINX: /*fold01*/
+        {
+            struct lmc_xilinx_control xc; /*fold02*/
+
+            if (!capable(CAP_NET_ADMIN)){
+                ret = -EPERM;
+                break;
+            }
+
+            /*
+             * Stop the xwitter whlie we restart the hardware
+             */
+            netif_stop_queue(dev);
+
+	    if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
+		ret = -EFAULT;
+		break;
+	    }
+            switch(xc.command){
+            case lmc_xilinx_reset: /*fold02*/
+                {
+                    u16 mii;
+		    spin_lock_irqsave(&sc->lmc_lock, flags);
+                    mii = lmc_mii_readreg (sc, 0, 16);
+
+                    /*
+                     * Make all of them 0 and make input
+                     */
+                    lmc_gpio_mkinput(sc, 0xff);
+
+                    /*
+                     * make the reset output
+                     */
+                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
+
+                    /*
+                     * RESET low to force configuration.  This also forces
+                     * the transmitter clock to be internal, but we expect to reset
+                     * that later anyway.
+                     */
+
+                    sc->lmc_gpio &= ~LMC_GEP_RESET;
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+                    /*
+                     * hold for more than 10 microseconds
+                     */
+                    udelay(50);
+
+                    sc->lmc_gpio |= LMC_GEP_RESET;
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+                    /*
+                     * stop driving Xilinx-related signals
+                     */
+                    lmc_gpio_mkinput(sc, 0xff);
+
+                    /* Reset the frammer hardware */
+                    sc->lmc_media->set_link_status (sc, 1);
+                    sc->lmc_media->set_status (sc, NULL);
+//                    lmc_softreset(sc);
+
+                    {
+                        int i;
+                        for(i = 0; i < 5; i++){
+                            lmc_led_on(sc, LMC_DS3_LED0);
+                            mdelay(100);
+                            lmc_led_off(sc, LMC_DS3_LED0);
+                            lmc_led_on(sc, LMC_DS3_LED1);
+                            mdelay(100);
+                            lmc_led_off(sc, LMC_DS3_LED1);
+                            lmc_led_on(sc, LMC_DS3_LED3);
+                            mdelay(100);
+                            lmc_led_off(sc, LMC_DS3_LED3);
+                            lmc_led_on(sc, LMC_DS3_LED2);
+                            mdelay(100);
+                            lmc_led_off(sc, LMC_DS3_LED2);
+                        }
+                    }
+		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+                    
+                    
+
+                    ret = 0x0;
+
+                }
+
+                break;
+            case lmc_xilinx_load_prom: /*fold02*/
+                {
+                    u16 mii;
+                    int timeout = 500000;
+		    spin_lock_irqsave(&sc->lmc_lock, flags);
+                    mii = lmc_mii_readreg (sc, 0, 16);
+
+                    /*
+                     * Make all of them 0 and make input
+                     */
+                    lmc_gpio_mkinput(sc, 0xff);
+
+                    /*
+                     * make the reset output
+                     */
+                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
+
+                    /*
+                     * RESET low to force configuration.  This also forces
+                     * the transmitter clock to be internal, but we expect to reset
+                     * that later anyway.
+                     */
+
+                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+
+                    /*
+                     * hold for more than 10 microseconds
+                     */
+                    udelay(50);
+
+                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+                    /*
+                     * busy wait for the chip to reset
+                     */
+                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
+                           (timeout-- > 0))
+                        cpu_relax();
+
+
+                    /*
+                     * stop driving Xilinx-related signals
+                     */
+                    lmc_gpio_mkinput(sc, 0xff);
+		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+                    ret = 0x0;
+                    
+
+                    break;
+
+                }
+
+            case lmc_xilinx_load: /*fold02*/
+                {
+                    char *data;
+                    int pos;
+                    int timeout = 500000;
+
+                    if (!xc.data) {
+                            ret = -EINVAL;
+                            break;
+                    }
+
+                    data = kmalloc(xc.len, GFP_KERNEL);
+                    if (!data) {
+                            ret = -ENOMEM;
+                            break;
+                    }
+                    
+                    if(copy_from_user(data, xc.data, xc.len))
+                    {
+                    	kfree(data);
+                    	ret = -ENOMEM;
+                    	break;
+                    }
+
+                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
+
+		    spin_lock_irqsave(&sc->lmc_lock, flags);
+                    lmc_gpio_mkinput(sc, 0xff);
+
+                    /*
+                     * Clear the Xilinx and start prgramming from the DEC
+                     */
+
+                    /*
+                     * Set ouput as:
+                     * Reset: 0 (active)
+                     * DP:    0 (active)
+                     * Mode:  1
+                     *
+                     */
+                    sc->lmc_gpio = 0x00;
+                    sc->lmc_gpio &= ~LMC_GEP_DP;
+                    sc->lmc_gpio &= ~LMC_GEP_RESET;
+                    sc->lmc_gpio |=  LMC_GEP_MODE;
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
+
+                    /*
+                     * Wait at least 10 us 20 to be safe
+                     */
+                    udelay(50);
+
+                    /*
+                     * Clear reset and activate programming lines
+                     * Reset: Input
+                     * DP:    Input
+                     * Clock: Output
+                     * Data:  Output
+                     * Mode:  Output
+                     */
+                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
+
+                    /*
+                     * Set LOAD, DATA, Clock to 1
+                     */
+                    sc->lmc_gpio = 0x00;
+                    sc->lmc_gpio |= LMC_GEP_MODE;
+                    sc->lmc_gpio |= LMC_GEP_DATA;
+                    sc->lmc_gpio |= LMC_GEP_CLK;
+                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+                    
+                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
+
+                    /*
+                     * busy wait for the chip to reset
+                     */
+                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
+                           (timeout-- > 0))
+                        cpu_relax();
+
+                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
+
+                    for(pos = 0; pos < xc.len; pos++){
+                        switch(data[pos]){
+                        case 0:
+                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
+                            break;
+                        case 1:
+                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
+                            break;
+                        default:
+                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
+                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
+                        }
+                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
+                        sc->lmc_gpio |= LMC_GEP_MODE;
+                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+                        udelay(1);
+                        
+                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
+                        sc->lmc_gpio |= LMC_GEP_MODE;
+                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+                        udelay(1);
+                    }
+                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
+                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
+                    }
+                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
+                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
+                    }
+                    else {
+                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
+                    }
+
+                    lmc_gpio_mkinput(sc, 0xff);
+                    
+                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
+                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
+                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+                    kfree(data);
+                    
+                    ret = 0;
+                    
+                    break;
+                }
+            default: /*fold02*/
+                ret = -EBADE;
+                break;
+            }
+
+            netif_wake_queue(dev);
+            sc->lmc_txfull = 0;
+
+        }
+        break;
+    default: /*fold01*/
+        /* If we don't know what to do, give the protocol a shot. */
+        ret = lmc_proto_ioctl (sc, ifr, cmd);
+        break;
+    }
+
+    lmc_trace(dev, "lmc_ioctl out");
+
+    return ret;
+}
+
+
+/* the watchdog process that cruises around */
+static void lmc_watchdog (unsigned long data) /*fold00*/
+{
+    struct net_device *dev = (struct net_device *)data;
+    lmc_softc_t *sc = dev_to_sc(dev);
+    int link_status;
+    u32 ticks;
+    unsigned long flags;
+
+    lmc_trace(dev, "lmc_watchdog in");
+
+    spin_lock_irqsave(&sc->lmc_lock, flags);
+
+    if(sc->check != 0xBEAFCAFE){
+        printk("LMC: Corrupt net_device struct, breaking out\n");
+	spin_unlock_irqrestore(&sc->lmc_lock, flags);
+        return;
+    }
+
+
+    /* Make sure the tx jabber and rx watchdog are off,
+     * and the transmit and receive processes are running.
+     */
+
+    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
+    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
+    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+    if (sc->lmc_ok == 0)
+        goto kick_timer;
+
+    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
+
+    /* --- begin time out check -----------------------------------
+     * check for a transmit interrupt timeout
+     * Has the packet xmt vs xmt serviced threshold been exceeded */
+    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
+	sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
+	sc->tx_TimeoutInd == 0)
+    {
+
+        /* wait for the watchdog to come around again */
+        sc->tx_TimeoutInd = 1;
+    }
+    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
+	     sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
+	     sc->tx_TimeoutInd)
+    {
+
+        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
+
+        sc->tx_TimeoutDisplay = 1;
+	sc->extra_stats.tx_TimeoutCnt++;
+
+        /* DEC chip is stuck, hit it with a RESET!!!! */
+        lmc_running_reset (dev);
+
+
+        /* look at receive & transmit process state to make sure they are running */
+        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+
+        /* look at: DSR - 02  for Reg 16
+         *                  CTS - 08
+         *                  DCD - 10
+         *                  RI  - 20
+         * for Reg 17
+         */
+        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
+
+        /* reset the transmit timeout detection flag */
+        sc->tx_TimeoutInd = 0;
+        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
+	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
+    } else {
+        sc->tx_TimeoutInd = 0;
+        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
+	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
+    }
+
+    /* --- end time out check ----------------------------------- */
+
+
+    link_status = sc->lmc_media->get_link_status (sc);
+
+    /*
+     * hardware level link lost, but the interface is marked as up.
+     * Mark it as down.
+     */
+    if ((link_status == 0) && (sc->last_link_status != 0)) {
+        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
+        sc->last_link_status = 0;
+        /* lmc_reset (sc); Why reset??? The link can go down ok */
+
+        /* Inform the world that link has been lost */
+	netif_carrier_off(dev);
+    }
+
+    /*
+     * hardware link is up, but the interface is marked as down.
+     * Bring it back up again.
+     */
+     if (link_status != 0 && sc->last_link_status == 0) {
+         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
+         sc->last_link_status = 1;
+         /* lmc_reset (sc); Again why reset??? */
+
+	 netif_carrier_on(dev);
+     }
+
+    /* Call media specific watchdog functions */
+    sc->lmc_media->watchdog(sc);
+
+    /*
+     * Poke the transmitter to make sure it
+     * never stops, even if we run out of mem
+     */
+    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
+
+    /*
+     * Check for code that failed
+     * and try and fix it as appropriate
+     */
+    if(sc->failed_ring == 1){
+        /*
+         * Failed to setup the recv/xmit rin
+         * Try again
+         */
+        sc->failed_ring = 0;
+        lmc_softreset(sc);
+    }
+    if(sc->failed_recv_alloc == 1){
+        /*
+         * We failed to alloc mem in the
+         * interrupt handler, go through the rings
+         * and rebuild them
+         */
+        sc->failed_recv_alloc = 0;
+        lmc_softreset(sc);
+    }
+
+
+    /*
+     * remember the timer value
+     */
+kick_timer:
+
+    ticks = LMC_CSR_READ (sc, csr_gp_timer);
+    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
+    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
+
+    /*
+     * restart this timer.
+     */
+    sc->timer.expires = jiffies + (HZ);
+    add_timer (&sc->timer);
+
+    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+    lmc_trace(dev, "lmc_watchdog out");
+
+}
+
+static int lmc_attach(struct net_device *dev, unsigned short encoding,
+		      unsigned short parity)
+{
+	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+		return 0;
+	return -EINVAL;
+}
+
+static const struct net_device_ops lmc_ops = {
+	.ndo_open       = lmc_open,
+	.ndo_stop       = lmc_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = lmc_ioctl,
+	.ndo_tx_timeout = lmc_driver_timeout,
+	.ndo_get_stats  = lmc_get_stats,
+};
+
+static int __devinit lmc_init_one(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	lmc_softc_t *sc;
+	struct net_device *dev;
+	u16 subdevice;
+	u16 AdapModelNum;
+	int err;
+	static int cards_found;
+
+	/* lmc_trace(dev, "lmc_init_one in"); */
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, "lmc");
+	if (err) {
+		printk(KERN_ERR "lmc: pci_request_region failed\n");
+		goto err_req_io;
+	}
+
+	/*
+	 * Allocate our own device structure
+	 */
+	sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
+	if (!sc) {
+		err = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	dev = alloc_hdlcdev(sc);
+	if (!dev) {
+		printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
+		goto err_hdlcdev;
+	}
+
+
+	dev->type = ARPHRD_HDLC;
+	dev_to_hdlc(dev)->xmit = lmc_start_xmit;
+	dev_to_hdlc(dev)->attach = lmc_attach;
+	dev->netdev_ops = &lmc_ops;
+	dev->watchdog_timeo = HZ; /* 1 second */
+	dev->tx_queue_len = 100;
+	sc->lmc_device = dev;
+	sc->name = dev->name;
+	sc->if_type = LMC_PPP;
+	sc->check = 0xBEAFCAFE;
+	dev->base_addr = pci_resource_start(pdev, 0);
+	dev->irq = pdev->irq;
+	pci_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	/*
+	 * This will get the protocol layer ready and do any 1 time init's
+	 * Must have a valid sc and dev structure
+	 */
+	lmc_proto_attach(sc);
+
+	/* Init the spin lock so can call it latter */
+
+	spin_lock_init(&sc->lmc_lock);
+	pci_set_master(pdev);
+
+	printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
+	       dev->base_addr, dev->irq);
+
+	err = register_hdlc_device(dev);
+	if (err) {
+		printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
+		free_netdev(dev);
+		goto err_hdlcdev;
+	}
+
+    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
+    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+
+    /*
+     *
+     * Check either the subvendor or the subdevice, some systems reverse
+     * the setting in the bois, seems to be version and arch dependent?
+     * Fix the error, exchange the two values 
+     */
+    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
+	    subdevice = pdev->subsystem_vendor;
+
+    switch (subdevice) {
+    case PCI_DEVICE_ID_LMC_HSSI:
+	printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
+        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
+        sc->lmc_media = &lmc_hssi_media;
+        break;
+    case PCI_DEVICE_ID_LMC_DS3:
+	printk(KERN_INFO "%s: LMC DS3\n", dev->name);
+        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
+        sc->lmc_media = &lmc_ds3_media;
+        break;
+    case PCI_DEVICE_ID_LMC_SSI:
+	printk(KERN_INFO "%s: LMC SSI\n", dev->name);
+        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
+        sc->lmc_media = &lmc_ssi_media;
+        break;
+    case PCI_DEVICE_ID_LMC_T1:
+	printk(KERN_INFO "%s: LMC T1\n", dev->name);
+        sc->lmc_cardtype = LMC_CARDTYPE_T1;
+        sc->lmc_media = &lmc_t1_media;
+        break;
+    default:
+	printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
+        break;
+    }
+
+    lmc_initcsrs (sc, dev->base_addr, 8);
+
+    lmc_gpio_mkinput (sc, 0xff);
+    sc->lmc_gpio = 0;		/* drive no signals yet */
+
+    sc->lmc_media->defaults (sc);
+
+    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
+
+    /* verify that the PCI Sub System ID matches the Adapter Model number
+     * from the MII register
+     */
+    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
+
+    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
+	 subdevice != PCI_DEVICE_ID_LMC_T1) &&
+	(AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
+	 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
+	(AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
+	 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
+	(AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
+	 subdevice != PCI_DEVICE_ID_LMC_HSSI))
+	    printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
+		   " Subsystem ID = 0x%04x\n",
+		   dev->name, AdapModelNum, subdevice);
+
+    /*
+     * reset clock
+     */
+    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
+
+    sc->board_idx = cards_found++;
+    sc->extra_stats.check = STATCHECK;
+    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
+	    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
+    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
+
+    sc->lmc_ok = 0;
+    sc->last_link_status = 0;
+
+    lmc_trace(dev, "lmc_init_one out");
+    return 0;
+
+err_hdlcdev:
+	pci_set_drvdata(pdev, NULL);
+	kfree(sc);
+err_kzalloc:
+	pci_release_regions(pdev);
+err_req_io:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/*
+ * Called from pci when removing module.
+ */
+static void __devexit lmc_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		printk(KERN_DEBUG "%s: removing...\n", dev->name);
+		unregister_hdlc_device(dev);
+		free_netdev(dev);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+	}
+}
+
+/* After this is called, packets can be sent.
+ * Does not initialize the addresses
+ */
+static int lmc_open(struct net_device *dev)
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    int err;
+
+    lmc_trace(dev, "lmc_open in");
+
+    lmc_led_on(sc, LMC_DS3_LED0);
+
+    lmc_dec_reset(sc);
+    lmc_reset(sc);
+
+    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
+    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
+		  lmc_mii_readreg(sc, 0, 17));
+
+    if (sc->lmc_ok){
+        lmc_trace(dev, "lmc_open lmc_ok out");
+        return 0;
+    }
+
+    lmc_softreset (sc);
+
+    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
+    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
+        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
+        lmc_trace(dev, "lmc_open irq failed out");
+        return -EAGAIN;
+    }
+    sc->got_irq = 1;
+
+    /* Assert Terminal Active */
+    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
+    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
+
+    /*
+     * reset to last state.
+     */
+    sc->lmc_media->set_status (sc, NULL);
+
+    /* setup default bits to be used in tulip_desc_t transmit descriptor
+     * -baz */
+    sc->TxDescriptControlInit = (
+                                 LMC_TDES_INTERRUPT_ON_COMPLETION
+                                 | LMC_TDES_FIRST_SEGMENT
+                                 | LMC_TDES_LAST_SEGMENT
+                                 | LMC_TDES_SECOND_ADDR_CHAINED
+                                 | LMC_TDES_DISABLE_PADDING
+                                );
+
+    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
+        /* disable 32 bit CRC generated by ASIC */
+        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
+    }
+    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
+    /* Acknoledge the Terminal Active and light LEDs */
+
+    /* dev->flags |= IFF_UP; */
+
+    if ((err = lmc_proto_open(sc)) != 0)
+	    return err;
+
+    netif_start_queue(dev);
+    sc->extra_stats.tx_tbusy0++;
+
+    /*
+     * select what interrupts we want to get
+     */
+    sc->lmc_intrmask = 0;
+    /* Should be using the default interrupt mask defined in the .h file. */
+    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
+                         | TULIP_STS_RXINTR
+                         | TULIP_STS_TXINTR
+                         | TULIP_STS_ABNRMLINTR
+                         | TULIP_STS_SYSERROR
+                         | TULIP_STS_TXSTOPPED
+                         | TULIP_STS_TXUNDERFLOW
+                         | TULIP_STS_RXSTOPPED
+		         | TULIP_STS_RXNOBUF
+                        );
+    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
+
+    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
+    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
+    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+    sc->lmc_ok = 1; /* Run watchdog */
+
+    /*
+     * Set the if up now - pfb
+     */
+
+    sc->last_link_status = 1;
+
+    /*
+     * Setup a timer for the watchdog on probe, and start it running.
+     * Since lmc_ok == 0, it will be a NOP for now.
+     */
+    init_timer (&sc->timer);
+    sc->timer.expires = jiffies + HZ;
+    sc->timer.data = (unsigned long) dev;
+    sc->timer.function = lmc_watchdog;
+    add_timer (&sc->timer);
+
+    lmc_trace(dev, "lmc_open out");
+
+    return 0;
+}
+
+/* Total reset to compensate for the AdTran DSU doing bad things
+ *  under heavy load
+ */
+
+static void lmc_running_reset (struct net_device *dev) /*fold00*/
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+
+    lmc_trace(dev, "lmc_runnig_reset in");
+
+    /* stop interrupts */
+    /* Clear the interrupt mask */
+    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
+
+    lmc_dec_reset (sc);
+    lmc_reset (sc);
+    lmc_softreset (sc);
+    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
+    sc->lmc_media->set_link_status (sc, 1);
+    sc->lmc_media->set_status (sc, NULL);
+
+    netif_wake_queue(dev);
+
+    sc->lmc_txfull = 0;
+    sc->extra_stats.tx_tbusy0++;
+
+    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
+    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
+
+    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
+    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
+
+    lmc_trace(dev, "lmc_runnin_reset_out");
+}
+
+
+/* This is what is called when you ifconfig down a device.
+ * This disables the timer for the watchdog and keepalives,
+ * and disables the irq for dev.
+ */
+static int lmc_close(struct net_device *dev)
+{
+    /* not calling release_region() as we should */
+    lmc_softc_t *sc = dev_to_sc(dev);
+
+    lmc_trace(dev, "lmc_close in");
+
+    sc->lmc_ok = 0;
+    sc->lmc_media->set_link_status (sc, 0);
+    del_timer (&sc->timer);
+    lmc_proto_close(sc);
+    lmc_ifdown (dev);
+
+    lmc_trace(dev, "lmc_close out");
+
+    return 0;
+}
+
+/* Ends the transfer of packets */
+/* When the interface goes down, this is called */
+static int lmc_ifdown (struct net_device *dev) /*fold00*/
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    u32 csr6;
+    int i;
+
+    lmc_trace(dev, "lmc_ifdown in");
+
+    /* Don't let anything else go on right now */
+    //    dev->start = 0;
+    netif_stop_queue(dev);
+    sc->extra_stats.tx_tbusy1++;
+
+    /* stop interrupts */
+    /* Clear the interrupt mask */
+    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
+
+    /* Stop Tx and Rx on the chip */
+    csr6 = LMC_CSR_READ (sc, csr_command);
+    csr6 &= ~LMC_DEC_ST;		/* Turn off the Transmission bit */
+    csr6 &= ~LMC_DEC_SR;		/* Turn off the Receive bit */
+    LMC_CSR_WRITE (sc, csr_command, csr6);
+
+    sc->lmc_device->stats.rx_missed_errors +=
+	    LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
+
+    /* release the interrupt */
+    if(sc->got_irq == 1){
+        free_irq (dev->irq, dev);
+        sc->got_irq = 0;
+    }
+
+    /* free skbuffs in the Rx queue */
+    for (i = 0; i < LMC_RXDESCS; i++)
+    {
+        struct sk_buff *skb = sc->lmc_rxq[i];
+        sc->lmc_rxq[i] = NULL;
+        sc->lmc_rxring[i].status = 0;
+        sc->lmc_rxring[i].length = 0;
+        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
+        if (skb != NULL)
+            dev_kfree_skb(skb);
+        sc->lmc_rxq[i] = NULL;
+    }
+
+    for (i = 0; i < LMC_TXDESCS; i++)
+    {
+        if (sc->lmc_txq[i] != NULL)
+            dev_kfree_skb(sc->lmc_txq[i]);
+        sc->lmc_txq[i] = NULL;
+    }
+
+    lmc_led_off (sc, LMC_MII16_LED_ALL);
+
+    netif_wake_queue(dev);
+    sc->extra_stats.tx_tbusy0++;
+
+    lmc_trace(dev, "lmc_ifdown out");
+
+    return 0;
+}
+
+/* Interrupt handling routine.  This will take an incoming packet, or clean
+ * up after a trasmit.
+ */
+static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
+{
+    struct net_device *dev = (struct net_device *) dev_instance;
+    lmc_softc_t *sc = dev_to_sc(dev);
+    u32 csr;
+    int i;
+    s32 stat;
+    unsigned int badtx;
+    u32 firstcsr;
+    int max_work = LMC_RXDESCS;
+    int handled = 0;
+
+    lmc_trace(dev, "lmc_interrupt in");
+
+    spin_lock(&sc->lmc_lock);
+
+    /*
+     * Read the csr to find what interrupts we have (if any)
+     */
+    csr = LMC_CSR_READ (sc, csr_status);
+
+    /*
+     * Make sure this is our interrupt
+     */
+    if ( ! (csr & sc->lmc_intrmask)) {
+        goto lmc_int_fail_out;
+    }
+
+    firstcsr = csr;
+
+    /* always go through this loop at least once */
+    while (csr & sc->lmc_intrmask) {
+	handled = 1;
+
+        /*
+         * Clear interrupt bits, we handle all case below
+         */
+        LMC_CSR_WRITE (sc, csr_status, csr);
+
+        /*
+         * One of
+         *  - Transmit process timed out CSR5<1>
+         *  - Transmit jabber timeout    CSR5<3>
+         *  - Transmit underflow         CSR5<5>
+         *  - Transmit Receiver buffer unavailable CSR5<7>
+         *  - Receive process stopped    CSR5<8>
+         *  - Receive watchdog timeout   CSR5<9>
+         *  - Early transmit interrupt   CSR5<10>
+         *
+         * Is this really right? Should we do a running reset for jabber?
+         * (being a WAN card and all)
+         */
+        if (csr & TULIP_STS_ABNRMLINTR){
+            lmc_running_reset (dev);
+            break;
+        }
+        
+        if (csr & TULIP_STS_RXINTR){
+            lmc_trace(dev, "rx interrupt");
+            lmc_rx (dev);
+            
+        }
+        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
+
+	    int		n_compl = 0 ;
+            /* reset the transmit timeout detection flag -baz */
+	    sc->extra_stats.tx_NoCompleteCnt = 0;
+
+            badtx = sc->lmc_taint_tx;
+            i = badtx % LMC_TXDESCS;
+
+            while ((badtx < sc->lmc_next_tx)) {
+                stat = sc->lmc_txring[i].status;
+
+                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
+						 sc->lmc_txring[i].length);
+                /*
+                 * If bit 31 is 1 the tulip owns it break out of the loop
+                 */
+                if (stat & 0x80000000)
+                    break;
+
+		n_compl++ ;		/* i.e., have an empty slot in ring */
+                /*
+                 * If we have no skbuff or have cleared it
+                 * Already continue to the next buffer
+                 */
+                if (sc->lmc_txq[i] == NULL)
+                    continue;
+
+		/*
+		 * Check the total error summary to look for any errors
+		 */
+		if (stat & 0x8000) {
+			sc->lmc_device->stats.tx_errors++;
+			if (stat & 0x4104)
+				sc->lmc_device->stats.tx_aborted_errors++;
+			if (stat & 0x0C00)
+				sc->lmc_device->stats.tx_carrier_errors++;
+			if (stat & 0x0200)
+				sc->lmc_device->stats.tx_window_errors++;
+			if (stat & 0x0002)
+				sc->lmc_device->stats.tx_fifo_errors++;
+		} else {
+			sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
+
+			sc->lmc_device->stats.tx_packets++;
+                }
+
+                //                dev_kfree_skb(sc->lmc_txq[i]);
+                dev_kfree_skb_irq(sc->lmc_txq[i]);
+                sc->lmc_txq[i] = NULL;
+
+                badtx++;
+                i = badtx % LMC_TXDESCS;
+            }
+
+            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
+            {
+                printk ("%s: out of sync pointer\n", dev->name);
+                badtx += LMC_TXDESCS;
+            }
+            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
+            sc->lmc_txfull = 0;
+            netif_wake_queue(dev);
+	    sc->extra_stats.tx_tbusy0++;
+
+
+#ifdef DEBUG
+	    sc->extra_stats.dirtyTx = badtx;
+	    sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
+	    sc->extra_stats.lmc_txfull = sc->lmc_txfull;
+#endif
+            sc->lmc_taint_tx = badtx;
+
+            /*
+             * Why was there a break here???
+             */
+        }			/* end handle transmit interrupt */
+
+        if (csr & TULIP_STS_SYSERROR) {
+            u32 error;
+            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
+            error = csr>>23 & 0x7;
+            switch(error){
+            case 0x000:
+                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
+                break;
+            case 0x001:
+                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
+                break;
+            case 0x010:
+                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
+                break;
+            default:
+                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
+            }
+            lmc_dec_reset (sc);
+            lmc_reset (sc);
+            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+            LMC_EVENT_LOG(LMC_EVENT_RESET2,
+                          lmc_mii_readreg (sc, 0, 16),
+                          lmc_mii_readreg (sc, 0, 17));
+
+        }
+
+        
+        if(max_work-- <= 0)
+            break;
+        
+        /*
+         * Get current csr status to make sure
+         * we've cleared all interrupts
+         */
+        csr = LMC_CSR_READ (sc, csr_status);
+    }				/* end interrupt loop */
+    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
+
+lmc_int_fail_out:
+
+    spin_unlock(&sc->lmc_lock);
+
+    lmc_trace(dev, "lmc_interrupt out");
+    return IRQ_RETVAL(handled);
+}
+
+static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
+					struct net_device *dev)
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    u32 flag;
+    int entry;
+    unsigned long flags;
+
+    lmc_trace(dev, "lmc_start_xmit in");
+
+    spin_lock_irqsave(&sc->lmc_lock, flags);
+
+    /* normal path, tbusy known to be zero */
+
+    entry = sc->lmc_next_tx % LMC_TXDESCS;
+
+    sc->lmc_txq[entry] = skb;
+    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
+
+    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
+
+#ifndef GCOM
+    /* If the queue is less than half full, don't interrupt */
+    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
+    {
+        /* Do not interrupt on completion of this packet */
+        flag = 0x60000000;
+        netif_wake_queue(dev);
+    }
+    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
+    {
+        /* This generates an interrupt on completion of this packet */
+        flag = 0xe0000000;
+        netif_wake_queue(dev);
+    }
+    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
+    {
+        /* Do not interrupt on completion of this packet */
+        flag = 0x60000000;
+        netif_wake_queue(dev);
+    }
+    else
+    {
+        /* This generates an interrupt on completion of this packet */
+        flag = 0xe0000000;
+        sc->lmc_txfull = 1;
+        netif_stop_queue(dev);
+    }
+#else
+    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
+
+    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
+    {				/* ring full, go busy */
+        sc->lmc_txfull = 1;
+	netif_stop_queue(dev);
+	sc->extra_stats.tx_tbusy1++;
+        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
+    }
+#endif
+
+
+    if (entry == LMC_TXDESCS - 1)	/* last descriptor in ring */
+	flag |= LMC_TDES_END_OF_RING;	/* flag as such for Tulip */
+
+    /* don't pad small packets either */
+    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
+						sc->TxDescriptControlInit;
+
+    /* set the transmit timeout flag to be checked in
+     * the watchdog timer handler. -baz
+     */
+
+    sc->extra_stats.tx_NoCompleteCnt++;
+    sc->lmc_next_tx++;
+
+    /* give ownership to the chip */
+    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
+    sc->lmc_txring[entry].status = 0x80000000;
+
+    /* send now! */
+    LMC_CSR_WRITE (sc, csr_txpoll, 0);
+
+    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+    lmc_trace(dev, "lmc_start_xmit_out");
+    return NETDEV_TX_OK;
+}
+
+
+static int lmc_rx(struct net_device *dev)
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    int i;
+    int rx_work_limit = LMC_RXDESCS;
+    unsigned int next_rx;
+    int rxIntLoopCnt;		/* debug -baz */
+    int localLengthErrCnt = 0;
+    long stat;
+    struct sk_buff *skb, *nsb;
+    u16 len;
+
+    lmc_trace(dev, "lmc_rx in");
+
+    lmc_led_on(sc, LMC_DS3_LED3);
+
+    rxIntLoopCnt = 0;		/* debug -baz */
+
+    i = sc->lmc_next_rx % LMC_RXDESCS;
+    next_rx = sc->lmc_next_rx;
+
+    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
+    {
+        rxIntLoopCnt++;		/* debug -baz */
+        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
+        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
+		if ((stat & 0x0000ffff) != 0x7fff) {
+			/* Oversized frame */
+			sc->lmc_device->stats.rx_length_errors++;
+			goto skip_packet;
+		}
+	}
+
+	if (stat & 0x00000008) { /* Catch a dribbling bit error */
+		sc->lmc_device->stats.rx_errors++;
+		sc->lmc_device->stats.rx_frame_errors++;
+		goto skip_packet;
+	}
+
+
+	if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
+		sc->lmc_device->stats.rx_errors++;
+		sc->lmc_device->stats.rx_crc_errors++;
+		goto skip_packet;
+	}
+
+	if (len > LMC_PKT_BUF_SZ) {
+		sc->lmc_device->stats.rx_length_errors++;
+		localLengthErrCnt++;
+		goto skip_packet;
+	}
+
+	if (len < sc->lmc_crcSize + 2) {
+		sc->lmc_device->stats.rx_length_errors++;
+		sc->extra_stats.rx_SmallPktCnt++;
+		localLengthErrCnt++;
+		goto skip_packet;
+	}
+
+        if(stat & 0x00004000){
+            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
+        }
+
+        len -= sc->lmc_crcSize;
+
+        skb = sc->lmc_rxq[i];
+
+        /*
+         * We ran out of memory at some point
+         * just allocate an skb buff and continue.
+         */
+        
+        if (!skb) {
+            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+            if (nsb) {
+                sc->lmc_rxq[i] = nsb;
+                nsb->dev = dev;
+                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
+            }
+            sc->failed_recv_alloc = 1;
+            goto skip_packet;
+        }
+        
+	sc->lmc_device->stats.rx_packets++;
+	sc->lmc_device->stats.rx_bytes += len;
+
+        LMC_CONSOLE_LOG("recv", skb->data, len);
+
+        /*
+         * I'm not sure of the sanity of this
+         * Packets could be arriving at a constant
+         * 44.210mbits/sec and we're going to copy
+         * them into a new buffer??
+         */
+        
+        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
+            /*
+             * If it's a large packet don't copy it just hand it up
+             */
+        give_it_anyways:
+
+            sc->lmc_rxq[i] = NULL;
+            sc->lmc_rxring[i].buffer1 = 0x0;
+
+            skb_put (skb, len);
+            skb->protocol = lmc_proto_type(sc, skb);
+            skb_reset_mac_header(skb);
+            /* skb_reset_network_header(skb); */
+            skb->dev = dev;
+            lmc_proto_netif(sc, skb);
+
+            /*
+             * This skb will be destroyed by the upper layers, make a new one
+             */
+            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+            if (nsb) {
+                sc->lmc_rxq[i] = nsb;
+                nsb->dev = dev;
+                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
+                /* Transferred to 21140 below */
+            }
+            else {
+                /*
+                 * We've run out of memory, stop trying to allocate
+                 * memory and exit the interrupt handler
+                 *
+                 * The chip may run out of receivers and stop
+                 * in which care we'll try to allocate the buffer
+                 * again.  (once a second)
+                 */
+		sc->extra_stats.rx_BuffAllocErr++;
+                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
+                sc->failed_recv_alloc = 1;
+                goto skip_out_of_mem;
+            }
+        }
+        else {
+            nsb = dev_alloc_skb(len);
+            if(!nsb) {
+                goto give_it_anyways;
+            }
+            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
+            
+            nsb->protocol = lmc_proto_type(sc, nsb);
+            skb_reset_mac_header(nsb);
+            /* skb_reset_network_header(nsb); */
+            nsb->dev = dev;
+            lmc_proto_netif(sc, nsb);
+        }
+
+    skip_packet:
+        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
+        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
+
+        sc->lmc_next_rx++;
+        i = sc->lmc_next_rx % LMC_RXDESCS;
+        rx_work_limit--;
+        if (rx_work_limit < 0)
+            break;
+    }
+
+    /* detect condition for LMC1000 where DSU cable attaches and fills
+     * descriptors with bogus packets
+     *
+    if (localLengthErrCnt > LMC_RXDESCS - 3) {
+	sc->extra_stats.rx_BadPktSurgeCnt++;
+	LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
+		      sc->extra_stats.rx_BadPktSurgeCnt);
+    } */
+
+    /* save max count of receive descriptors serviced */
+    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
+	    sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
+
+#ifdef DEBUG
+    if (rxIntLoopCnt == 0)
+    {
+        for (i = 0; i < LMC_RXDESCS; i++)
+        {
+            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
+                != DESC_OWNED_BY_DC21X4)
+            {
+                rxIntLoopCnt++;
+            }
+        }
+        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
+    }
+#endif
+
+
+    lmc_led_off(sc, LMC_DS3_LED3);
+
+skip_out_of_mem:
+
+    lmc_trace(dev, "lmc_rx out");
+
+    return 0;
+}
+
+static struct net_device_stats *lmc_get_stats(struct net_device *dev)
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    unsigned long flags;
+
+    lmc_trace(dev, "lmc_get_stats in");
+
+    spin_lock_irqsave(&sc->lmc_lock, flags);
+
+    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
+
+    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+    lmc_trace(dev, "lmc_get_stats out");
+
+    return &sc->lmc_device->stats;
+}
+
+static struct pci_driver lmc_driver = {
+	.name		= "lmc",
+	.id_table	= lmc_pci_tbl,
+	.probe		= lmc_init_one,
+	.remove		= __devexit_p(lmc_remove_one),
+};
+
+static int __init init_lmc(void)
+{
+    return pci_register_driver(&lmc_driver);
+}
+
+static void __exit exit_lmc(void)
+{
+    pci_unregister_driver(&lmc_driver);
+}
+
+module_init(init_lmc);
+module_exit(exit_lmc);
+
+unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
+{
+    int i;
+    int command = (0xf6 << 10) | (devaddr << 5) | regno;
+    int retval = 0;
+
+    lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
+
+    LMC_MII_SYNC (sc);
+
+    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
+
+    for (i = 15; i >= 0; i--)
+    {
+        int dataval = (command & (1 << i)) ? 0x20000 : 0;
+
+        LMC_CSR_WRITE (sc, csr_9, dataval);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+    }
+
+    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
+
+    for (i = 19; i > 0; i--)
+    {
+        LMC_CSR_WRITE (sc, csr_9, 0x40000);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
+        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+    }
+
+    lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
+
+    return (retval >> 1) & 0xffff;
+}
+
+void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
+{
+    int i = 32;
+    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
+
+    lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
+
+    LMC_MII_SYNC (sc);
+
+    i = 31;
+    while (i >= 0)
+    {
+        int datav;
+
+        if (command & (1 << i))
+            datav = 0x20000;
+        else
+            datav = 0x00000;
+
+        LMC_CSR_WRITE (sc, csr_9, datav);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        i--;
+    }
+
+    i = 2;
+    while (i > 0)
+    {
+        LMC_CSR_WRITE (sc, csr_9, 0x40000);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        LMC_CSR_WRITE (sc, csr_9, 0x50000);
+        lmc_delay ();
+        /* __SLOW_DOWN_IO; */
+        i--;
+    }
+
+    lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
+}
+
+static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
+{
+    int i;
+
+    lmc_trace(sc->lmc_device, "lmc_softreset in");
+
+    /* Initialize the receive rings and buffers. */
+    sc->lmc_txfull = 0;
+    sc->lmc_next_rx = 0;
+    sc->lmc_next_tx = 0;
+    sc->lmc_taint_rx = 0;
+    sc->lmc_taint_tx = 0;
+
+    /*
+     * Setup each one of the receiver buffers
+     * allocate an skbuff for each one, setup the descriptor table
+     * and point each buffer at the next one
+     */
+
+    for (i = 0; i < LMC_RXDESCS; i++)
+    {
+        struct sk_buff *skb;
+
+        if (sc->lmc_rxq[i] == NULL)
+        {
+            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
+            if(skb == NULL){
+                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
+                sc->failed_ring = 1;
+                break;
+            }
+            else{
+                sc->lmc_rxq[i] = skb;
+            }
+        }
+        else
+        {
+            skb = sc->lmc_rxq[i];
+        }
+
+        skb->dev = sc->lmc_device;
+
+        /* owned by 21140 */
+        sc->lmc_rxring[i].status = 0x80000000;
+
+        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
+        sc->lmc_rxring[i].length = skb_tailroom(skb);
+
+        /* use to be tail which is dumb since you're thinking why write
+         * to the end of the packj,et but since there's nothing there tail == data
+         */
+        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
+
+        /* This is fair since the structure is static and we have the next address */
+        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
+
+    }
+
+    /*
+     * Sets end of ring
+     */
+    if (i != 0) {
+        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
+        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
+    }
+    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
+
+    /* Initialize the transmit rings and buffers */
+    for (i = 0; i < LMC_TXDESCS; i++)
+    {
+        if (sc->lmc_txq[i] != NULL){		/* have buffer */
+            dev_kfree_skb(sc->lmc_txq[i]);	/* free it */
+	    sc->lmc_device->stats.tx_dropped++;	/* We just dropped a packet */
+        }
+        sc->lmc_txq[i] = NULL;
+        sc->lmc_txring[i].status = 0x00000000;
+        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
+    }
+    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
+    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
+
+    lmc_trace(sc->lmc_device, "lmc_softreset out");
+}
+
+void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
+    sc->lmc_gpio_io &= ~bits;
+    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
+    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
+}
+
+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
+    sc->lmc_gpio_io |= bits;
+    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
+    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
+}
+
+void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_led_on in");
+    if((~sc->lmc_miireg16) & led){ /* Already on! */
+        lmc_trace(sc->lmc_device, "lmc_led_on aon out");
+        return;
+    }
+    
+    sc->lmc_miireg16 &= ~led;
+    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+    lmc_trace(sc->lmc_device, "lmc_led_on out");
+}
+
+void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_led_off in");
+    if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
+        lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
+        return;
+    }
+    
+    sc->lmc_miireg16 |= led;
+    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+    lmc_trace(sc->lmc_device, "lmc_led_off out");
+}
+
+static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_reset in");
+    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
+    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
+    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
+
+    /*
+     * make some of the GPIO pins be outputs
+     */
+    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
+
+    /*
+     * RESET low to force state reset.  This also forces
+     * the transmitter clock to be internal, but we expect to reset
+     * that later anyway.
+     */
+    sc->lmc_gpio &= ~(LMC_GEP_RESET);
+    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
+
+    /*
+     * hold for more than 10 microseconds
+     */
+    udelay(50);
+
+    /*
+     * stop driving Xilinx-related signals
+     */
+    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
+
+    /*
+     * Call media specific init routine
+     */
+    sc->lmc_media->init(sc);
+
+    sc->extra_stats.resetCount++;
+    lmc_trace(sc->lmc_device, "lmc_reset out");
+}
+
+static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
+{
+    u32 val;
+    lmc_trace(sc->lmc_device, "lmc_dec_reset in");
+
+    /*
+     * disable all interrupts
+     */
+    sc->lmc_intrmask = 0;
+    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
+
+    /*
+     * Reset the chip with a software reset command.
+     * Wait 10 microseconds (actually 50 PCI cycles but at
+     * 33MHz that comes to two microseconds but wait a
+     * bit longer anyways)
+     */
+    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
+    udelay(25);
+#ifdef __sparc__
+    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
+    sc->lmc_busmode = 0x00100000;
+    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
+    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
+#endif
+    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
+
+    /*
+     * We want:
+     *   no ethernet address in frames we write
+     *   disable padding (txdesc, padding disable)
+     *   ignore runt frames (rdes0 bit 15)
+     *   no receiver watchdog or transmitter jabber timer
+     *       (csr15 bit 0,14 == 1)
+     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
+     */
+
+    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
+                         | TULIP_CMD_FULLDUPLEX
+                         | TULIP_CMD_PASSBADPKT
+                         | TULIP_CMD_NOHEARTBEAT
+                         | TULIP_CMD_PORTSELECT
+                         | TULIP_CMD_RECEIVEALL
+                         | TULIP_CMD_MUSTBEONE
+                       );
+    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
+                          | TULIP_CMD_THRESHOLDCTL
+                          | TULIP_CMD_STOREFWD
+                          | TULIP_CMD_TXTHRSHLDCTL
+                        );
+
+    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
+
+    /*
+     * disable receiver watchdog and transmit jabber
+     */
+    val = LMC_CSR_READ(sc, csr_sia_general);
+    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
+    LMC_CSR_WRITE(sc, csr_sia_general, val);
+
+    lmc_trace(sc->lmc_device, "lmc_dec_reset out");
+}
+
+static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
+                         size_t csr_size)
+{
+    lmc_trace(sc->lmc_device, "lmc_initcsrs in");
+    sc->lmc_csrs.csr_busmode	        = csr_base +  0 * csr_size;
+    sc->lmc_csrs.csr_txpoll		= csr_base +  1 * csr_size;
+    sc->lmc_csrs.csr_rxpoll		= csr_base +  2 * csr_size;
+    sc->lmc_csrs.csr_rxlist		= csr_base +  3 * csr_size;
+    sc->lmc_csrs.csr_txlist		= csr_base +  4 * csr_size;
+    sc->lmc_csrs.csr_status		= csr_base +  5 * csr_size;
+    sc->lmc_csrs.csr_command	        = csr_base +  6 * csr_size;
+    sc->lmc_csrs.csr_intr		= csr_base +  7 * csr_size;
+    sc->lmc_csrs.csr_missed_frames	= csr_base +  8 * csr_size;
+    sc->lmc_csrs.csr_9		        = csr_base +  9 * csr_size;
+    sc->lmc_csrs.csr_10		        = csr_base + 10 * csr_size;
+    sc->lmc_csrs.csr_11		        = csr_base + 11 * csr_size;
+    sc->lmc_csrs.csr_12		        = csr_base + 12 * csr_size;
+    sc->lmc_csrs.csr_13		        = csr_base + 13 * csr_size;
+    sc->lmc_csrs.csr_14		        = csr_base + 14 * csr_size;
+    sc->lmc_csrs.csr_15		        = csr_base + 15 * csr_size;
+    lmc_trace(sc->lmc_device, "lmc_initcsrs out");
+}
+
+static void lmc_driver_timeout(struct net_device *dev)
+{
+    lmc_softc_t *sc = dev_to_sc(dev);
+    u32 csr6;
+    unsigned long flags;
+
+    lmc_trace(dev, "lmc_driver_timeout in");
+
+    spin_lock_irqsave(&sc->lmc_lock, flags);
+
+    printk("%s: Xmitter busy|\n", dev->name);
+
+    sc->extra_stats.tx_tbusy_calls++;
+    if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
+	    goto bug_out;
+
+    /*
+     * Chip seems to have locked up
+     * Reset it
+     * This whips out all our decriptor
+     * table and starts from scartch
+     */
+
+    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
+                  LMC_CSR_READ (sc, csr_status),
+		  sc->extra_stats.tx_ProcTimeout);
+
+    lmc_running_reset (dev);
+
+    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
+    LMC_EVENT_LOG(LMC_EVENT_RESET2,
+                  lmc_mii_readreg (sc, 0, 16),
+                  lmc_mii_readreg (sc, 0, 17));
+
+    /* restart the tx processes */
+    csr6 = LMC_CSR_READ (sc, csr_command);
+    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
+    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
+
+    /* immediate transmit */
+    LMC_CSR_WRITE (sc, csr_txpoll, 0);
+
+    sc->lmc_device->stats.tx_errors++;
+    sc->extra_stats.tx_ProcTimeout++; /* -baz */
+
+    dev->trans_start = jiffies; /* prevent tx timeout */
+
+bug_out:
+
+    spin_unlock_irqrestore(&sc->lmc_lock, flags);
+
+    lmc_trace(dev, "lmc_driver_timout out");
+
+
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_media.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_media.c
new file mode 100644
index 0000000..5920c99
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_media.c
@@ -0,0 +1,1211 @@
+/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>             /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/uaccess.h>
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_ioctl.h"
+#include "lmc_debug.h"
+
+#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
+
+ /*
+  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+  * All rights reserved.  www.lanmedia.com
+  *
+  * This code is written by:
+  * Andrew Stanley-Jones (asj@cban.com)
+  * Rob Braun (bbraun@vix.com),
+  * Michael Graff (explorer@vix.com) and
+  * Matt Thomas (matt@3am-software.com).
+  *
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License version 2, incorporated herein by reference.
+  */
+
+/*
+ * protocol independent method.
+ */
+static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
+
+/*
+ * media independent methods to check on media status, link, light LEDs,
+ * etc.
+ */
+static void lmc_ds3_init (lmc_softc_t * const);
+static void lmc_ds3_default (lmc_softc_t * const);
+static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
+static int lmc_ds3_get_link_status (lmc_softc_t * const);
+static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
+static void lmc_ds3_set_scram (lmc_softc_t * const, int);
+static void lmc_ds3_watchdog (lmc_softc_t * const);
+
+static void lmc_hssi_init (lmc_softc_t * const);
+static void lmc_hssi_default (lmc_softc_t * const);
+static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_hssi_set_clock (lmc_softc_t * const, int);
+static int lmc_hssi_get_link_status (lmc_softc_t * const);
+static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
+static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
+static void lmc_hssi_watchdog (lmc_softc_t * const);
+
+static void lmc_ssi_init (lmc_softc_t * const);
+static void lmc_ssi_default (lmc_softc_t * const);
+static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static void lmc_ssi_set_clock (lmc_softc_t * const, int);
+static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
+static int lmc_ssi_get_link_status (lmc_softc_t * const);
+static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
+static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
+static void lmc_ssi_watchdog (lmc_softc_t * const);
+
+static void lmc_t1_init (lmc_softc_t * const);
+static void lmc_t1_default (lmc_softc_t * const);
+static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
+static int lmc_t1_get_link_status (lmc_softc_t * const);
+static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
+static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
+static void lmc_t1_set_clock (lmc_softc_t * const, int);
+static void lmc_t1_watchdog (lmc_softc_t * const);
+
+static void lmc_dummy_set_1 (lmc_softc_t * const, int);
+static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
+
+static inline void write_av9110_bit (lmc_softc_t *, int);
+static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
+
+lmc_media_t lmc_ds3_media = {
+  lmc_ds3_init,			/* special media init stuff */
+  lmc_ds3_default,		/* reset to default state */
+  lmc_ds3_set_status,		/* reset status to state provided */
+  lmc_dummy_set_1,		/* set clock source */
+  lmc_dummy_set2_1,		/* set line speed */
+  lmc_ds3_set_100ft,		/* set cable length */
+  lmc_ds3_set_scram,		/* set scrambler */
+  lmc_ds3_get_link_status,	/* get link status */
+  lmc_dummy_set_1,		/* set link status */
+  lmc_ds3_set_crc_length,	/* set CRC length */
+  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  lmc_ds3_watchdog
+};
+
+lmc_media_t lmc_hssi_media = {
+  lmc_hssi_init,		/* special media init stuff */
+  lmc_hssi_default,		/* reset to default state */
+  lmc_hssi_set_status,		/* reset status to state provided */
+  lmc_hssi_set_clock,		/* set clock source */
+  lmc_dummy_set2_1,		/* set line speed */
+  lmc_dummy_set_1,		/* set cable length */
+  lmc_dummy_set_1,		/* set scrambler */
+  lmc_hssi_get_link_status,	/* get link status */
+  lmc_hssi_set_link_status,	/* set link status */
+  lmc_hssi_set_crc_length,	/* set CRC length */
+  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  lmc_hssi_watchdog
+};
+
+lmc_media_t lmc_ssi_media = { lmc_ssi_init,	/* special media init stuff */
+  lmc_ssi_default,		/* reset to default state */
+  lmc_ssi_set_status,		/* reset status to state provided */
+  lmc_ssi_set_clock,		/* set clock source */
+  lmc_ssi_set_speed,		/* set line speed */
+  lmc_dummy_set_1,		/* set cable length */
+  lmc_dummy_set_1,		/* set scrambler */
+  lmc_ssi_get_link_status,	/* get link status */
+  lmc_ssi_set_link_status,	/* set link status */
+  lmc_ssi_set_crc_length,	/* set CRC length */
+  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  lmc_ssi_watchdog
+};
+
+lmc_media_t lmc_t1_media = {
+  lmc_t1_init,			/* special media init stuff */
+  lmc_t1_default,		/* reset to default state */
+  lmc_t1_set_status,		/* reset status to state provided */
+  lmc_t1_set_clock,		/* set clock source */
+  lmc_dummy_set2_1,		/* set line speed */
+  lmc_dummy_set_1,		/* set cable length */
+  lmc_dummy_set_1,		/* set scrambler */
+  lmc_t1_get_link_status,	/* get link status */
+  lmc_dummy_set_1,		/* set link status */
+  lmc_t1_set_crc_length,	/* set CRC length */
+  lmc_t1_set_circuit_type,	/* set T1 or E1 circuit type */
+  lmc_t1_watchdog
+};
+
+static void
+lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
+{
+}
+
+static void
+lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
+{
+}
+
+/*
+ *  HSSI methods
+ */
+
+static void
+lmc_hssi_init (lmc_softc_t * const sc)
+{
+  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
+
+  lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
+}
+
+static void
+lmc_hssi_default (lmc_softc_t * const sc)
+{
+  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+  sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it.  This will
+ * always reset the card if needed.
+ */
+static void
+lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+  if (ctl == NULL)
+    {
+      sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
+      lmc_set_protocol (sc, NULL);
+
+      return;
+    }
+
+  /*
+   * check for change in clock source
+   */
+  if (ctl->clock_source && !sc->ictl.clock_source)
+    {
+      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
+      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
+    }
+  else if (!ctl->clock_source && sc->ictl.clock_source)
+    {
+      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+    }
+
+  lmc_set_protocol (sc, ctl);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
+{
+  int old;
+  old = sc->ictl.clock_source;
+  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+    {
+      sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+      if(old != ie)
+        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+    }
+  else
+    {
+      sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+      if(old != ie)
+        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+    }
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_hssi_get_link_status (lmc_softc_t * const sc)
+{
+    /*
+     * We're using the same code as SSI since
+     * they're practically the same
+     */
+    return lmc_ssi_get_link_status(sc);
+}
+
+static void
+lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_LINK_UP)
+    sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
+  else
+    sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_CTL_CRC_LENGTH_32)
+    {
+      /* 32 bit */
+      sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+    }
+  else
+    {
+      /* 16 bit */
+      sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+    }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_hssi_watchdog (lmc_softc_t * const sc)
+{
+  /* HSSI is blank */
+}
+
+/*
+ *  DS3 methods
+ */
+
+/*
+ * Set cable length
+ */
+static void
+lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
+{
+  if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
+    {
+      sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
+      sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
+    }
+  else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
+    {
+      sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
+      sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
+    }
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_ds3_default (lmc_softc_t * const sc)
+{
+  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+  sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
+  sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
+  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it.  This will
+ * always reset the card if needed.
+ */
+static void
+lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+  if (ctl == NULL)
+    {
+      sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
+      sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
+      lmc_set_protocol (sc, NULL);
+
+      return;
+    }
+
+  /*
+   * check for change in cable length setting
+   */
+  if (ctl->cable_length && !sc->ictl.cable_length)
+    lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
+  else if (!ctl->cable_length && sc->ictl.cable_length)
+    lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
+
+  /*
+   * Check for change in scrambler setting (requires reset)
+   */
+  if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
+    lmc_ds3_set_scram (sc, LMC_CTL_ON);
+  else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
+    lmc_ds3_set_scram (sc, LMC_CTL_OFF);
+
+  lmc_set_protocol (sc, ctl);
+}
+
+static void
+lmc_ds3_init (lmc_softc_t * const sc)
+{
+  int i;
+
+  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
+
+  /* writes zeros everywhere */
+  for (i = 0; i < 21; i++)
+    {
+      lmc_mii_writereg (sc, 0, 17, i);
+      lmc_mii_writereg (sc, 0, 18, 0);
+    }
+
+  /* set some essential bits */
+  lmc_mii_writereg (sc, 0, 17, 1);
+  lmc_mii_writereg (sc, 0, 18, 0x25);	/* ser, xtx */
+
+  lmc_mii_writereg (sc, 0, 17, 5);
+  lmc_mii_writereg (sc, 0, 18, 0x80);	/* emode */
+
+  lmc_mii_writereg (sc, 0, 17, 14);
+  lmc_mii_writereg (sc, 0, 18, 0x30);	/* rcgen, tcgen */
+
+  /* clear counters and latched bits */
+  for (i = 0; i < 21; i++)
+    {
+      lmc_mii_writereg (sc, 0, 17, i);
+      lmc_mii_readreg (sc, 0, 18);
+    }
+}
+
+/*
+ * 1 == DS3 payload scrambled, 0 == not scrambled
+ */
+static void
+lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
+{
+  if (ie == LMC_CTL_ON)
+    {
+      sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
+      sc->ictl.scrambler_onoff = LMC_CTL_ON;
+    }
+  else
+    {
+      sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
+      sc->ictl.scrambler_onoff = LMC_CTL_OFF;
+    }
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_ds3_get_link_status (lmc_softc_t * const sc)
+{
+    u16 link_status, link_status_11;
+    int ret = 1;
+
+    lmc_mii_writereg (sc, 0, 17, 7);
+    link_status = lmc_mii_readreg (sc, 0, 18);
+
+    /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+     * led0 yellow = far-end adapter is in Red alarm condition
+     * led1 blue   = received an Alarm Indication signal
+     *               (upstream failure)
+     * led2 Green  = power to adapter, Gate Array loaded & driver
+     *               attached
+     * led3 red    = Loss of Signal (LOS) or out of frame (OOF)
+     *               conditions detected on T3 receive signal
+     */
+
+    lmc_led_on(sc, LMC_DS3_LED2);
+
+    if ((link_status & LMC_FRAMER_REG0_DLOS) ||
+        (link_status & LMC_FRAMER_REG0_OOFS)){
+        ret = 0;
+        if(sc->last_led_err[3] != 1){
+	    u16 r1;
+            lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
+            r1 = lmc_mii_readreg (sc, 0, 18);
+            r1 &= 0xfe;
+            lmc_mii_writereg(sc, 0, 18, r1);
+            printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED3);	/* turn on red LED */
+        sc->last_led_err[3] = 1;
+    }
+    else {
+        lmc_led_off(sc, LMC_DS3_LED3);	/* turn on red LED */
+        if(sc->last_led_err[3] == 1){
+	    u16 r1;
+            lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
+            r1 = lmc_mii_readreg (sc, 0, 18);
+            r1 |= 0x01;
+            lmc_mii_writereg(sc, 0, 18, r1);
+        }
+        sc->last_led_err[3] = 0;
+    }
+
+    lmc_mii_writereg(sc, 0, 17, 0x10);
+    link_status_11 = lmc_mii_readreg(sc, 0, 18);
+    if((link_status & LMC_FRAMER_REG0_AIS) ||
+       (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
+        ret = 0;
+        if(sc->last_led_err[0] != 1){
+            printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
+            printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED0);
+        sc->last_led_err[0] = 1;
+    }
+    else {
+        lmc_led_off(sc, LMC_DS3_LED0);
+        sc->last_led_err[0] = 0;
+    }
+
+    lmc_mii_writereg (sc, 0, 17, 9);
+    link_status = lmc_mii_readreg (sc, 0, 18);
+    
+    if(link_status & LMC_FRAMER_REG9_RBLUE){
+        ret = 0;
+        if(sc->last_led_err[1] != 1){
+            printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED1);
+        sc->last_led_err[1] = 1;
+    }
+    else {
+        lmc_led_off(sc, LMC_DS3_LED1);
+        sc->last_led_err[1] = 0;
+    }
+
+    return ret;
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_CTL_CRC_LENGTH_32)
+    {
+      /* 32 bit */
+      sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+    }
+  else
+    {
+      /* 16 bit */
+      sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+    }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+static void
+lmc_ds3_watchdog (lmc_softc_t * const sc)
+{
+    
+}
+
+
+/*
+ *  SSI methods
+ */
+
+static void lmc_ssi_init(lmc_softc_t * const sc)
+{
+	u16 mii17;
+	int cable;
+
+	sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
+
+	mii17 = lmc_mii_readreg(sc, 0, 17);
+
+	cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
+	sc->ictl.cable_type = cable;
+
+	lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
+}
+
+static void
+lmc_ssi_default (lmc_softc_t * const sc)
+{
+  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+
+  /*
+   * make TXCLOCK always be an output
+   */
+  lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
+
+  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+  sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+  sc->lmc_media->set_speed (sc, NULL);
+  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+}
+
+/*
+ * Given a user provided state, set ourselves up to match it.  This will
+ * always reset the card if needed.
+ */
+static void
+lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+  if (ctl == NULL)
+    {
+      sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
+      sc->lmc_media->set_speed (sc, &sc->ictl);
+      lmc_set_protocol (sc, NULL);
+
+      return;
+    }
+
+  /*
+   * check for change in clock source
+   */
+  if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
+      && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
+    {
+      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
+      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
+    }
+  else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
+	   && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
+    {
+      sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
+      sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
+    }
+
+  if (ctl->clock_rate != sc->ictl.clock_rate)
+    sc->lmc_media->set_speed (sc, ctl);
+
+  lmc_set_protocol (sc, ctl);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
+{
+  int old;
+  old = ie;
+  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+    {
+      sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+      if(ie != old)
+        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+    }
+  else
+    {
+      sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+      if(ie != old)
+        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+    }
+}
+
+static void
+lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+  lmc_ctl_t *ictl = &sc->ictl;
+  lmc_av9110_t *av;
+
+  /* original settings for clock rate of:
+   *  100 Khz (8,25,0,0,2) were incorrect
+   *  they should have been 80,125,1,3,3
+   *  There are 17 param combinations to produce this freq.
+   *  For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
+   */
+  if (ctl == NULL)
+    {
+      av = &ictl->cardspec.ssi;
+      ictl->clock_rate = 1500000;
+      av->f = ictl->clock_rate;
+      av->n = 120;
+      av->m = 100;
+      av->v = 1;
+      av->x = 1;
+      av->r = 2;
+
+      write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
+      return;
+    }
+
+  av = &ctl->cardspec.ssi;
+
+  if (av->f == 0)
+    return;
+
+  ictl->clock_rate = av->f;	/* really, this is the rate we are */
+  ictl->cardspec.ssi = *av;
+
+  write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
+}
+
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */
+static int
+lmc_ssi_get_link_status (lmc_softc_t * const sc)
+{
+  u16 link_status;
+  u32 ticks;
+  int ret = 1;
+  int hw_hdsk = 1;
+
+  /*
+   * missing CTS?  Hmm.  If we require CTS on, we may never get the
+   * link to come up, so omit it in this test.
+   *
+   * Also, it seems that with a loopback cable, DCD isn't asserted,
+   * so just check for things like this:
+   *      DSR _must_ be asserted.
+   *      One of DCD or CTS must be asserted.
+   */
+
+  /* LMC 1000 (SSI) LED definitions
+   * led0 Green = power to adapter, Gate Array loaded &
+   *              driver attached
+   * led1 Green = DSR and DTR and RTS and CTS are set
+   * led2 Green = Cable detected
+   * led3 red   = No timing is available from the
+   *              cable or the on-board frequency
+   *              generator.
+   */
+
+  link_status = lmc_mii_readreg (sc, 0, 16);
+
+  /* Is the transmit clock still available */
+  ticks = LMC_CSR_READ (sc, csr_gp_timer);
+  ticks = 0x0000ffff - (ticks & 0x0000ffff);
+
+  lmc_led_on (sc, LMC_MII16_LED0);
+
+  /* ====== transmit clock determination ===== */
+  if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
+      lmc_led_off(sc, LMC_MII16_LED3);
+  }
+  else if (ticks == 0 ) {				/* no clock found ? */
+      ret = 0;
+      if (sc->last_led_err[3] != 1) {
+	      sc->extra_stats.tx_lossOfClockCnt++;
+	      printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
+      }
+      sc->last_led_err[3] = 1;
+      lmc_led_on (sc, LMC_MII16_LED3);	/* turn ON red LED */
+  }
+  else {
+      if(sc->last_led_err[3] == 1)
+          printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
+      sc->last_led_err[3] = 0;
+      lmc_led_off (sc, LMC_MII16_LED3);		/* turn OFF red LED */
+  }
+
+  if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
+      ret = 0;
+      hw_hdsk = 0;
+  }
+
+#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
+  if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
+      ret = 0;
+      hw_hdsk = 0;
+  }
+#endif
+
+  if(hw_hdsk == 0){
+      if(sc->last_led_err[1] != 1)
+          printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
+      sc->last_led_err[1] = 1;
+      lmc_led_off(sc, LMC_MII16_LED1);
+  }
+  else {
+      if(sc->last_led_err[1] != 0)
+          printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
+      sc->last_led_err[1] = 0;
+      lmc_led_on(sc, LMC_MII16_LED1);
+  }
+
+  if(ret == 1) {
+      lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
+  }
+  
+  return ret;
+}
+
+static void
+lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_LINK_UP)
+    {
+      sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
+      printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
+    }
+  else
+    {
+      sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
+      printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
+    }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit
+ */
+static void
+lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_CTL_CRC_LENGTH_32)
+    {
+      /* 32 bit */
+      sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
+
+    }
+  else
+    {
+      /* 16 bit */
+      sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
+    }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * These are bits to program the ssi frequency generator
+ */
+static inline void
+write_av9110_bit (lmc_softc_t * sc, int c)
+{
+  /*
+   * set the data bit as we need it.
+   */
+  sc->lmc_gpio &= ~(LMC_GEP_CLK);
+  if (c & 0x01)
+    sc->lmc_gpio |= LMC_GEP_DATA;
+  else
+    sc->lmc_gpio &= ~(LMC_GEP_DATA);
+  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+  /*
+   * set the clock to high
+   */
+  sc->lmc_gpio |= LMC_GEP_CLK;
+  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+  /*
+   * set the clock to low again.
+   */
+  sc->lmc_gpio &= ~(LMC_GEP_CLK);
+  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+}
+
+static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
+{
+  int i;
+
+#if 0
+  printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
+	  LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
+#endif
+
+  sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
+  sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
+  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+  /*
+   * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
+   * as outputs.
+   */
+  lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
+			  | LMC_GEP_SSI_GENERATOR));
+
+  sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
+  LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+
+  /*
+   * a shifting we will go...
+   */
+  for (i = 0; i < 7; i++)
+    write_av9110_bit (sc, n >> i);
+  for (i = 0; i < 7; i++)
+    write_av9110_bit (sc, m >> i);
+  for (i = 0; i < 1; i++)
+    write_av9110_bit (sc, v >> i);
+  for (i = 0; i < 2; i++)
+    write_av9110_bit (sc, x >> i);
+  for (i = 0; i < 2; i++)
+    write_av9110_bit (sc, r >> i);
+  for (i = 0; i < 5; i++)
+    write_av9110_bit (sc, 0x17 >> i);
+
+  /*
+   * stop driving serial-related signals
+   */
+  lmc_gpio_mkinput (sc,
+		    (LMC_GEP_DATA | LMC_GEP_CLK
+		     | LMC_GEP_SSI_GENERATOR));
+}
+
+static void lmc_ssi_watchdog(lmc_softc_t * const sc)
+{
+	u16 mii17 = lmc_mii_readreg(sc, 0, 17);
+	if (((mii17 >> 3) & 7) == 7)
+		lmc_led_off(sc, LMC_MII16_LED2);
+	else
+		lmc_led_on(sc, LMC_MII16_LED2);
+}
+
+/*
+ *  T1 methods
+ */
+
+/*
+ * The framer regs are multiplexed through MII regs 17 & 18
+ *  write the register address to MII reg 17 and the *  data to MII reg 18. */
+static void
+lmc_t1_write (lmc_softc_t * const sc, int a, int d)
+{
+  lmc_mii_writereg (sc, 0, 17, a);
+  lmc_mii_writereg (sc, 0, 18, d);
+}
+
+/* Save a warning
+static int
+lmc_t1_read (lmc_softc_t * const sc, int a)
+{
+  lmc_mii_writereg (sc, 0, 17, a);
+  return lmc_mii_readreg (sc, 0, 18);
+}
+*/
+
+
+static void
+lmc_t1_init (lmc_softc_t * const sc)
+{
+  u16 mii16;
+  int i;
+
+  sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
+  mii16 = lmc_mii_readreg (sc, 0, 16);
+
+  /* reset 8370 */
+  mii16 &= ~LMC_MII16_T1_RST;
+  lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
+  lmc_mii_writereg (sc, 0, 16, mii16);
+
+  /* set T1 or E1 line.  Uses sc->lmcmii16 reg in function so update it */
+  sc->lmc_miireg16 = mii16;
+  lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
+  mii16 = sc->lmc_miireg16;
+
+  lmc_t1_write (sc, 0x01, 0x1B);	/* CR0     - primary control             */
+  lmc_t1_write (sc, 0x02, 0x42);	/* JAT_CR  - jitter atten config         */
+  lmc_t1_write (sc, 0x14, 0x00);	/* LOOP    - loopback config             */
+  lmc_t1_write (sc, 0x15, 0x00);	/* DL3_TS  - external data link timeslot */
+  lmc_t1_write (sc, 0x18, 0xFF);	/* PIO     - programmable I/O            */
+  lmc_t1_write (sc, 0x19, 0x30);	/* POE     - programmable OE             */
+  lmc_t1_write (sc, 0x1A, 0x0F);	/* CMUX    - clock input mux             */
+  lmc_t1_write (sc, 0x20, 0x41);	/* LIU_CR  - RX LIU config               */
+  lmc_t1_write (sc, 0x22, 0x76);	/* RLIU_CR - RX LIU config               */
+  lmc_t1_write (sc, 0x40, 0x03);	/* RCR0    - RX config                   */
+  lmc_t1_write (sc, 0x45, 0x00);	/* RALM    - RX alarm config             */
+  lmc_t1_write (sc, 0x46, 0x05);	/* LATCH   - RX alarm/err/cntr latch     */
+  lmc_t1_write (sc, 0x68, 0x40);	/* TLIU_CR - TX LIU config               */
+  lmc_t1_write (sc, 0x70, 0x0D);	/* TCR0    - TX framer config            */
+  lmc_t1_write (sc, 0x71, 0x05);	/* TCR1    - TX config                   */
+  lmc_t1_write (sc, 0x72, 0x0B);	/* TFRM    - TX frame format             */
+  lmc_t1_write (sc, 0x73, 0x00);	/* TERROR  - TX error insert             */
+  lmc_t1_write (sc, 0x74, 0x00);	/* TMAN    - TX manual Sa/FEBE config    */
+  lmc_t1_write (sc, 0x75, 0x00);	/* TALM    - TX alarm signal config      */
+  lmc_t1_write (sc, 0x76, 0x00);	/* TPATT   - TX test pattern config      */
+  lmc_t1_write (sc, 0x77, 0x00);	/* TLB     - TX inband loopback config   */
+  lmc_t1_write (sc, 0x90, 0x05);	/* CLAD_CR - clock rate adapter config   */
+  lmc_t1_write (sc, 0x91, 0x05);	/* CSEL    - clad freq sel               */
+  lmc_t1_write (sc, 0xA6, 0x00);	/* DL1_CTL - DL1 control                 */
+  lmc_t1_write (sc, 0xB1, 0x00);	/* DL2_CTL - DL2 control                 */
+  lmc_t1_write (sc, 0xD0, 0x47);	/* SBI_CR  - sys bus iface config        */
+  lmc_t1_write (sc, 0xD1, 0x70);	/* RSB_CR  - RX sys bus config           */
+  lmc_t1_write (sc, 0xD4, 0x30);	/* TSB_CR  - TX sys bus config           */
+  for (i = 0; i < 32; i++)
+    {
+      lmc_t1_write (sc, 0x0E0 + i, 0x00);	/* SBCn - sys bus per-channel ctl    */
+      lmc_t1_write (sc, 0x100 + i, 0x00);	/* TPCn - TX per-channel ctl         */
+      lmc_t1_write (sc, 0x180 + i, 0x00);	/* RPCn - RX per-channel ctl         */
+    }
+  for (i = 1; i < 25; i++)
+    {
+      lmc_t1_write (sc, 0x0E0 + i, 0x0D);	/* SBCn - sys bus per-channel ctl    */
+    }
+
+  mii16 |= LMC_MII16_T1_XOE;
+  lmc_mii_writereg (sc, 0, 16, mii16);
+  sc->lmc_miireg16 = mii16;
+}
+
+static void
+lmc_t1_default (lmc_softc_t * const sc)
+{
+  sc->lmc_miireg16 = LMC_MII16_LED_ALL;
+  sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
+  sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
+  sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
+  /* Right now we can only clock from out internal source */
+  sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+}
+/* * Given a user provided state, set ourselves up to match it.  This will * always reset the card if needed.
+ */
+static void
+lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+  if (ctl == NULL)
+    {
+      sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
+      lmc_set_protocol (sc, NULL);
+
+      return;
+    }
+  /*
+   * check for change in circuit type         */
+  if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
+      && sc->ictl.circuit_type ==
+      LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
+								LMC_CTL_CIRCUIT_TYPE_E1);
+  else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
+	   && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
+    sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
+  lmc_set_protocol (sc, ctl);
+}
+/*
+ * return hardware link status.
+ * 0 == link is down, 1 == link is up.
+ */ static int
+lmc_t1_get_link_status (lmc_softc_t * const sc)
+{
+    u16 link_status;
+    int ret = 1;
+
+  /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+   * led0 yellow = far-end adapter is in Red alarm condition
+   * led1 blue   = received an Alarm Indication signal
+   *               (upstream failure)
+   * led2 Green  = power to adapter, Gate Array loaded & driver
+   *               attached
+   * led3 red    = Loss of Signal (LOS) or out of frame (OOF)
+   *               conditions detected on T3 receive signal
+   */
+    lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in");
+    lmc_led_on(sc, LMC_DS3_LED2);
+
+    lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
+    link_status = lmc_mii_readreg (sc, 0, 18);
+
+
+    if (link_status & T1F_RAIS) {			/* turn on blue LED */
+        ret = 0;
+        if(sc->last_led_err[1] != 1){
+            printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED1);
+        sc->last_led_err[1] = 1;
+    }
+    else {
+        if(sc->last_led_err[1] != 0){
+            printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
+        }
+        lmc_led_off (sc, LMC_DS3_LED1);
+        sc->last_led_err[1] = 0;
+    }
+
+    /*
+     * Yellow Alarm is nasty evil stuff, looks at data patterns
+     * inside the channel and confuses it with HDLC framing
+     * ignore all yellow alarms.
+     *
+     * Do listen to MultiFrame Yellow alarm which while implemented
+     * different ways isn't in the channel and hence somewhat
+     * more reliable
+     */
+
+    if (link_status & T1F_RMYEL) {
+        ret = 0;
+        if(sc->last_led_err[0] != 1){
+            printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED0);
+        sc->last_led_err[0] = 1;
+    }
+    else {
+        if(sc->last_led_err[0] != 0){
+            printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
+        }
+        lmc_led_off(sc, LMC_DS3_LED0);
+        sc->last_led_err[0] = 0;
+    }
+
+    /*
+     * Loss of signal and los of frame
+     * Use the green bit to identify which one lit the led
+     */
+    if(link_status & T1F_RLOF){
+        ret = 0;
+        if(sc->last_led_err[3] != 1){
+            printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED3);
+        sc->last_led_err[3] = 1;
+
+    }
+    else {
+        if(sc->last_led_err[3] != 0){
+            printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
+        }
+        if( ! (link_status & T1F_RLOS))
+            lmc_led_off(sc, LMC_DS3_LED3);
+        sc->last_led_err[3] = 0;
+    }
+    
+    if(link_status & T1F_RLOS){
+        ret = 0;
+        if(sc->last_led_err[2] != 1){
+            printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
+        }
+        lmc_led_on(sc, LMC_DS3_LED3);
+        sc->last_led_err[2] = 1;
+
+    }
+    else {
+        if(sc->last_led_err[2] != 0){
+            printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
+        }
+        if( ! (link_status & T1F_RLOF))
+            lmc_led_off(sc, LMC_DS3_LED3);
+        sc->last_led_err[2] = 0;
+    }
+
+    sc->lmc_xinfo.t1_alarm1_status = link_status;
+
+    lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
+    sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
+
+    
+    lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out");
+
+    return ret;
+}
+
+/*
+ * 1 == T1 Circuit Type , 0 == E1 Circuit Type
+ */
+static void
+lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
+{
+  if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
+      sc->lmc_miireg16 |= LMC_MII16_T1_Z;
+      sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
+      printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
+  }
+  else {
+      sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
+      sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
+      printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
+  }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+  
+}
+
+/*
+ * 0 == 16bit, 1 == 32bit */
+static void
+lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
+{
+  if (state == LMC_CTL_CRC_LENGTH_32)
+    {
+      /* 32 bit */
+      sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
+      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
+
+    }
+  else
+    {
+      /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
+      sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
+      sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
+
+    }
+
+  lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
+}
+
+/*
+ * 1 == internal, 0 == external
+ */
+static void
+lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
+{
+  int old;
+  old = ie;
+  if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
+    {
+      sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
+      if(old != ie)
+        printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
+    }
+  else
+    {
+      sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
+      LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
+      sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
+      if(old != ie)
+        printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
+    }
+}
+
+static void
+lmc_t1_watchdog (lmc_softc_t * const sc)
+{
+}
+
+static void
+lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
+{
+	if (!ctl)
+		sc->ictl.keepalive_onoff = LMC_CTL_ON;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.c
new file mode 100644
index 0000000..f600075
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.c
@@ -0,0 +1,135 @@
+ /*
+  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+  * All rights reserved.  www.lanmedia.com
+  *
+  * This code is written by:
+  * Andrew Stanley-Jones (asj@cban.com)
+  * Rob Braun (bbraun@vix.com),
+  * Michael Graff (explorer@vix.com) and
+  * Matt Thomas (matt@3am-software.com).
+  *
+  * With Help By:
+  * David Boggs
+  * Ron Crane
+  * Allan Cox
+  *
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License version 2, incorporated herein by reference.
+  *
+  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
+  */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/bitops.h>
+#include <asm/processor.h>             /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/smp.h>
+
+#include "lmc.h"
+#include "lmc_var.h"
+#include "lmc_debug.h"
+#include "lmc_ioctl.h"
+#include "lmc_proto.h"
+
+// attach
+void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_proto_attach in");
+    if (sc->if_type == LMC_NET) {
+            struct net_device *dev = sc->lmc_device;
+            /*
+	     * They set a few basics because they don't use HDLC
+             */
+            dev->flags |= IFF_POINTOPOINT;
+            dev->hard_header_len = 0;
+            dev->addr_len = 0;
+        }
+    lmc_trace(sc->lmc_device, "lmc_proto_attach out");
+}
+
+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
+{
+	lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
+	if (sc->if_type == LMC_PPP)
+		return hdlc_ioctl(sc->lmc_device, ifr, cmd);
+	return -EOPNOTSUPP;
+}
+
+int lmc_proto_open(lmc_softc_t *sc)
+{
+	int ret = 0;
+
+	lmc_trace(sc->lmc_device, "lmc_proto_open in");
+
+	if (sc->if_type == LMC_PPP) {
+		ret = hdlc_open(sc->lmc_device);
+		if (ret < 0)
+			printk(KERN_WARNING "%s: HDLC open failed: %d\n",
+			       sc->name, ret);
+	}
+
+	lmc_trace(sc->lmc_device, "lmc_proto_open out");
+	return ret;
+}
+
+void lmc_proto_close(lmc_softc_t *sc)
+{
+	lmc_trace(sc->lmc_device, "lmc_proto_close in");
+
+	if (sc->if_type == LMC_PPP)
+		hdlc_close(sc->lmc_device);
+
+	lmc_trace(sc->lmc_device, "lmc_proto_close out");
+}
+
+__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_proto_type in");
+    switch(sc->if_type){
+    case LMC_PPP:
+	    return hdlc_type_trans(skb, sc->lmc_device);
+	    break;
+    case LMC_NET:
+        return htons(ETH_P_802_2);
+        break;
+    case LMC_RAW: /* Packet type for skbuff kind of useless */
+        return htons(ETH_P_802_2);
+        break;
+    default:
+        printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
+        return htons(ETH_P_802_2);
+        break;
+    }
+    lmc_trace(sc->lmc_device, "lmc_proto_tye out");
+
+}
+
+void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
+{
+    lmc_trace(sc->lmc_device, "lmc_proto_netif in");
+    switch(sc->if_type){
+    case LMC_PPP:
+    case LMC_NET:
+    default:
+        netif_rx(skb);
+        break;
+    case LMC_RAW:
+        break;
+    }
+    lmc_trace(sc->lmc_device, "lmc_proto_netif out");
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.h
new file mode 100644
index 0000000..662148c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_proto.h
@@ -0,0 +1,18 @@
+#ifndef _LMC_PROTO_H_
+#define _LMC_PROTO_H_
+
+#include <linux/hdlc.h>
+
+void lmc_proto_attach(lmc_softc_t *sc);
+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
+int lmc_proto_open(lmc_softc_t *sc);
+void lmc_proto_close(lmc_softc_t *sc);
+__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
+void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
+
+static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
+{
+	return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_var.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_var.h
new file mode 100644
index 0000000..a1d202d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/lmc/lmc_var.h
@@ -0,0 +1,470 @@
+#ifndef _LMC_VAR_H_
+#define _LMC_VAR_H_
+
+ /*
+  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
+  * All rights reserved.  www.lanmedia.com
+  *
+  * This code is written by:
+  * Andrew Stanley-Jones (asj@cban.com)
+  * Rob Braun (bbraun@vix.com),
+  * Michael Graff (explorer@vix.com) and
+  * Matt Thomas (matt@3am-software.com).
+  *
+  * This software may be used and distributed according to the terms
+  * of the GNU General Public License version 2, incorporated herein by reference.
+  */
+
+#include <linux/timer.h>
+
+/*
+ * basic definitions used in lmc include files
+ */
+
+typedef struct lmc___softc lmc_softc_t;
+typedef struct lmc___media lmc_media_t;
+typedef struct lmc___ctl lmc_ctl_t;
+
+#define lmc_csrptr_t    unsigned long
+
+#define LMC_REG_RANGE 0x80
+
+#define LMC_PRINTF_FMT  "%s"
+#define LMC_PRINTF_ARGS	(sc->lmc_device->name)
+
+#define TX_TIMEOUT (2*HZ)
+
+#define LMC_TXDESCS            32
+#define LMC_RXDESCS            32
+
+#define LMC_LINK_UP            1
+#define LMC_LINK_DOWN          0
+
+/* These macros for generic read and write to and from the dec chip */
+#define LMC_CSR_READ(sc, csr) \
+	inl((sc)->lmc_csrs.csr)
+#define LMC_CSR_WRITE(sc, reg, val) \
+	outl((val), (sc)->lmc_csrs.reg)
+
+//#ifdef _LINUX_DELAY_H
+//	#define SLOW_DOWN_IO udelay(2);
+//	#undef __SLOW_DOWN_IO
+//	#define __SLOW_DOWN_IO udelay(2);
+//#endif
+
+#define DELAY(n) SLOW_DOWN_IO
+
+#define lmc_delay() inl(sc->lmc_csrs.csr_9)
+
+/* This macro sync's up with the mii so that reads and writes can take place */
+#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
+                LMC_CSR_WRITE((sc), csr_9, 0x20000); \
+		lmc_delay(); \
+		LMC_CSR_WRITE((sc), csr_9, 0x30000); \
+                lmc_delay(); \
+		n--; }} while(0)
+
+struct lmc_regfile_t {
+    lmc_csrptr_t csr_busmode;                  /* CSR0 */
+    lmc_csrptr_t csr_txpoll;                   /* CSR1 */
+    lmc_csrptr_t csr_rxpoll;                   /* CSR2 */
+    lmc_csrptr_t csr_rxlist;                   /* CSR3 */
+    lmc_csrptr_t csr_txlist;                   /* CSR4 */
+    lmc_csrptr_t csr_status;                   /* CSR5 */
+    lmc_csrptr_t csr_command;                  /* CSR6 */
+    lmc_csrptr_t csr_intr;                     /* CSR7 */
+    lmc_csrptr_t csr_missed_frames;            /* CSR8 */
+    lmc_csrptr_t csr_9;                        /* CSR9 */
+    lmc_csrptr_t csr_10;                       /* CSR10 */
+    lmc_csrptr_t csr_11;                       /* CSR11 */
+    lmc_csrptr_t csr_12;                       /* CSR12 */
+    lmc_csrptr_t csr_13;                       /* CSR13 */
+    lmc_csrptr_t csr_14;                       /* CSR14 */
+    lmc_csrptr_t csr_15;                       /* CSR15 */
+};
+
+#define csr_enetrom             csr_9   /* 21040 */
+#define csr_reserved            csr_10  /* 21040 */
+#define csr_full_duplex         csr_11  /* 21040 */
+#define csr_bootrom             csr_10  /* 21041/21140A/?? */
+#define csr_gp                  csr_12  /* 21140* */
+#define csr_watchdog            csr_15  /* 21140* */
+#define csr_gp_timer            csr_11  /* 21041/21140* */
+#define csr_srom_mii            csr_9   /* 21041/21140* */
+#define csr_sia_status          csr_12  /* 2104x */
+#define csr_sia_connectivity    csr_13  /* 2104x */
+#define csr_sia_tx_rx           csr_14  /* 2104x */
+#define csr_sia_general         csr_15  /* 2104x */
+
+/* tulip length/control transmit descriptor definitions
+ *  used to define bits in the second tulip_desc_t field (length)
+ *  for the transmit descriptor -baz */
+
+#define LMC_TDES_FIRST_BUFFER_SIZE       ((u32)(0x000007FF))
+#define LMC_TDES_SECOND_BUFFER_SIZE      ((u32)(0x003FF800))
+#define LMC_TDES_HASH_FILTERING          ((u32)(0x00400000))
+#define LMC_TDES_DISABLE_PADDING         ((u32)(0x00800000))
+#define LMC_TDES_SECOND_ADDR_CHAINED     ((u32)(0x01000000))
+#define LMC_TDES_END_OF_RING             ((u32)(0x02000000))
+#define LMC_TDES_ADD_CRC_DISABLE         ((u32)(0x04000000))
+#define LMC_TDES_SETUP_PACKET            ((u32)(0x08000000))
+#define LMC_TDES_INVERSE_FILTERING       ((u32)(0x10000000))
+#define LMC_TDES_FIRST_SEGMENT           ((u32)(0x20000000))
+#define LMC_TDES_LAST_SEGMENT            ((u32)(0x40000000))
+#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
+
+#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER  11
+#define TDES_COLLISION_COUNT_BIT_NUMBER     3
+
+/* Constants for the RCV descriptor RDES */
+
+#define LMC_RDES_OVERFLOW             ((u32)(0x00000001))
+#define LMC_RDES_CRC_ERROR            ((u32)(0x00000002))
+#define LMC_RDES_DRIBBLING_BIT        ((u32)(0x00000004))
+#define LMC_RDES_REPORT_ON_MII_ERR    ((u32)(0x00000008))
+#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
+#define LMC_RDES_FRAME_TYPE           ((u32)(0x00000020))
+#define LMC_RDES_COLLISION_SEEN       ((u32)(0x00000040))
+#define LMC_RDES_FRAME_TOO_LONG       ((u32)(0x00000080))
+#define LMC_RDES_LAST_DESCRIPTOR      ((u32)(0x00000100))
+#define LMC_RDES_FIRST_DESCRIPTOR     ((u32)(0x00000200))
+#define LMC_RDES_MULTICAST_FRAME      ((u32)(0x00000400))
+#define LMC_RDES_RUNT_FRAME           ((u32)(0x00000800))
+#define LMC_RDES_DATA_TYPE            ((u32)(0x00003000))
+#define LMC_RDES_LENGTH_ERROR         ((u32)(0x00004000))
+#define LMC_RDES_ERROR_SUMMARY        ((u32)(0x00008000))
+#define LMC_RDES_FRAME_LENGTH         ((u32)(0x3FFF0000))
+#define LMC_RDES_OWN_BIT              ((u32)(0x80000000))
+
+#define RDES_FRAME_LENGTH_BIT_NUMBER       16
+
+#define LMC_RDES_ERROR_MASK ( (u32)( \
+	  LMC_RDES_OVERFLOW \
+	| LMC_RDES_DRIBBLING_BIT \
+	| LMC_RDES_REPORT_ON_MII_ERR \
+        | LMC_RDES_COLLISION_SEEN ) )
+
+
+/*
+ * Ioctl info
+ */
+
+typedef struct {
+	u32	n;
+	u32	m;
+	u32	v;
+	u32	x;
+	u32	r;
+	u32	f;
+	u32	exact;
+} lmc_av9110_t;
+
+/*
+ * Common structure passed to the ioctl code.
+ */
+struct lmc___ctl {
+	u32	cardtype;
+	u32	clock_source;		/* HSSI, T1 */
+	u32	clock_rate;		/* T1 */
+	u32	crc_length;
+	u32	cable_length;		/* DS3 */
+	u32	scrambler_onoff;	/* DS3 */
+	u32	cable_type;		/* T1 */
+	u32	keepalive_onoff;	/* protocol */
+	u32	ticks;			/* ticks/sec */
+	union {
+		lmc_av9110_t	ssi;
+	} cardspec;
+	u32       circuit_type;   /* T1 or E1 */
+};
+
+
+/*
+ * Careful, look at the data sheet, there's more to this
+ * structure than meets the eye.  It should probably be:
+ *
+ * struct tulip_desc_t {
+ *         u8  own:1;
+ *         u32 status:31;
+ *         u32 control:10;
+ *         u32 buffer1;
+ *         u32 buffer2;
+ * };
+ * You could also expand status control to provide more bit information
+ */
+
+struct tulip_desc_t {
+	s32 status;
+	s32 length;
+	u32 buffer1;
+	u32 buffer2;
+};
+
+/*
+ * media independent methods to check on media status, link, light LEDs,
+ * etc.
+ */
+struct lmc___media {
+	void	(* init)(lmc_softc_t * const);
+	void	(* defaults)(lmc_softc_t * const);
+	void	(* set_status)(lmc_softc_t * const, lmc_ctl_t *);
+	void	(* set_clock_source)(lmc_softc_t * const, int);
+	void	(* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
+	void	(* set_cable_length)(lmc_softc_t * const, int);
+	void	(* set_scrambler)(lmc_softc_t * const, int);
+	int	(* get_link_status)(lmc_softc_t * const);
+	void	(* set_link_status)(lmc_softc_t * const, int);
+	void	(* set_crc_length)(lmc_softc_t * const, int);
+        void    (* set_circuit_type)(lmc_softc_t * const, int);
+        void	(* watchdog)(lmc_softc_t * const);
+};
+
+
+#define STATCHECK     0xBEEFCAFE
+
+struct lmc_extra_statistics
+{
+	u32       version_size;
+	u32       lmc_cardtype;
+
+	u32       tx_ProcTimeout;
+	u32       tx_IntTimeout;
+	u32       tx_NoCompleteCnt;
+	u32       tx_MaxXmtsB4Int;
+	u32       tx_TimeoutCnt;
+	u32       tx_OutOfSyncPtr;
+	u32       tx_tbusy0;
+	u32       tx_tbusy1;
+	u32       tx_tbusy_calls;
+	u32       resetCount;
+	u32       lmc_txfull;
+	u32       tbusy;
+	u32       dirtyTx;
+	u32       lmc_next_tx;
+	u32       otherTypeCnt;
+	u32       lastType;
+	u32       lastTypeOK;
+	u32       txLoopCnt;
+	u32       usedXmtDescripCnt;
+	u32       txIndexCnt;
+	u32       rxIntLoopCnt;
+
+	u32       rx_SmallPktCnt;
+	u32       rx_BadPktSurgeCnt;
+	u32       rx_BuffAllocErr;
+	u32       tx_lossOfClockCnt;
+
+	/* T1 error counters */
+	u32       framingBitErrorCount;
+	u32       lineCodeViolationCount;
+
+	u32       lossOfFrameCount;
+	u32       changeOfFrameAlignmentCount;
+	u32       severelyErroredFrameCount;
+
+	u32       check;
+};
+
+typedef struct lmc_xinfo {
+	u32       Magic0;                         /* BEEFCAFE */
+
+	u32       PciCardType;
+	u32       PciSlotNumber;          /* PCI slot number       */
+
+	u16	       DriverMajorVersion;
+	u16	       DriverMinorVersion;
+	u16	       DriverSubVersion;
+
+	u16	       XilinxRevisionNumber;
+	u16	       MaxFrameSize;
+
+	u16     	  t1_alarm1_status;
+	u16       	t1_alarm2_status;
+
+	int             link_status;
+	u32       mii_reg16;
+
+	u32       Magic1;                         /* DEADBEEF */
+} LMC_XINFO;
+
+
+/*
+ * forward decl
+ */
+struct lmc___softc {
+	char                   *name;
+	u8			board_idx;
+	struct lmc_extra_statistics extra_stats;
+	struct net_device      *lmc_device;
+
+	int                     hang, rxdesc, bad_packet, some_counter;
+	u32  	         	txgo;
+	struct lmc_regfile_t	lmc_csrs;
+	volatile u32		lmc_txtick;
+	volatile u32		lmc_rxtick;
+	u32			lmc_flags;
+	u32			lmc_intrmask;	/* our copy of csr_intr */
+	u32			lmc_cmdmode;	/* our copy of csr_cmdmode */
+	u32			lmc_busmode;	/* our copy of csr_busmode */
+	u32			lmc_gpio_io;	/* state of in/out settings */
+	u32			lmc_gpio;	/* state of outputs */
+	struct sk_buff*		lmc_txq[LMC_TXDESCS];
+	struct sk_buff*		lmc_rxq[LMC_RXDESCS];
+	volatile
+	struct tulip_desc_t	lmc_rxring[LMC_RXDESCS];
+	volatile
+	struct tulip_desc_t	lmc_txring[LMC_TXDESCS];
+	unsigned int		lmc_next_rx, lmc_next_tx;
+	volatile
+	unsigned int		lmc_taint_tx, lmc_taint_rx;
+	int			lmc_tx_start, lmc_txfull;
+	int			lmc_txbusy;
+	u16			lmc_miireg16;
+	int			lmc_ok;
+	int			last_link_status;
+	int			lmc_cardtype;
+	u32               	last_frameerr;
+	lmc_media_t	       *lmc_media;
+	struct timer_list	timer;
+	lmc_ctl_t		ictl;
+	u32			TxDescriptControlInit;
+
+	int                     tx_TimeoutInd; /* additional driver state */
+	int                     tx_TimeoutDisplay;
+	unsigned int		lastlmc_taint_tx;
+	int                     lasttx_packets;
+	u32			tx_clockState;
+	u32			lmc_crcSize;
+	LMC_XINFO		lmc_xinfo;
+	char                    lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
+	char                    lmc_timing; /* for HSSI and SSI */
+	int                     got_irq;
+
+	char                    last_led_err[4];
+
+	u32                     last_int;
+	u32                     num_int;
+
+	spinlock_t              lmc_lock;
+	u16			if_type;       /* HDLC/PPP or NET */
+
+	/* Failure cases */
+	u8			failed_ring;
+	u8			failed_recv_alloc;
+
+	/* Structure check */
+	u32                     check;
+};
+
+#define LMC_PCI_TIME 1
+#define LMC_EXT_TIME 0
+
+#define PKT_BUF_SZ              1542  /* was 1536 */
+
+/* CSR5 settings */
+#define TIMER_INT     0x00000800
+#define TP_LINK_FAIL  0x00001000
+#define TP_LINK_PASS  0x00000010
+#define NORMAL_INT    0x00010000
+#define ABNORMAL_INT  0x00008000
+#define RX_JABBER_INT 0x00000200
+#define RX_DIED       0x00000100
+#define RX_NOBUFF     0x00000080
+#define RX_INT        0x00000040
+#define TX_FIFO_UNDER 0x00000020
+#define TX_JABBER     0x00000008
+#define TX_NOBUFF     0x00000004
+#define TX_DIED       0x00000002
+#define TX_INT        0x00000001
+
+/* CSR6 settings */
+#define OPERATION_MODE  0x00000200 /* Full Duplex      */
+#define PROMISC_MODE    0x00000040 /* Promiscuous Mode */
+#define RECEIVE_ALL     0x40000000 /* Receive All      */
+#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames  */
+
+/* Dec control registers  CSR6 as well */
+#define LMC_DEC_ST 0x00002000
+#define LMC_DEC_SR 0x00000002
+
+/* CSR15 settings */
+#define RECV_WATCHDOG_DISABLE 0x00000010
+#define JABBER_DISABLE        0x00000001
+
+/* More settings */
+/*
+ * aSR6 -- Command (Operation Mode) Register
+ */
+#define TULIP_CMD_RECEIVEALL    0x40000000L /* (RW)  Receivel all frames? */
+#define TULIP_CMD_MUSTBEONE     0x02000000L /* (RW)  Must Be One (21140) */
+#define TULIP_CMD_TXTHRSHLDCTL  0x00400000L /* (RW)  Transmit Threshold Mode (21140) */
+#define TULIP_CMD_STOREFWD      0x00200000L /* (RW)  Store and Forward (21140) */
+#define TULIP_CMD_NOHEARTBEAT   0x00080000L /* (RW)  No Heartbeat (21140) */
+#define TULIP_CMD_PORTSELECT    0x00040000L /* (RW)  Post Select (100Mb) (21140) */
+#define TULIP_CMD_FULLDUPLEX    0x00000200L /* (RW)  Full Duplex Mode */
+#define TULIP_CMD_OPERMODE      0x00000C00L /* (RW)  Operating Mode */
+#define TULIP_CMD_PROMISCUOUS   0x00000041L /* (RW)  Promiscuous Mode */
+#define TULIP_CMD_PASSBADPKT    0x00000008L /* (RW)  Pass Bad Frames */
+#define TULIP_CMD_THRESHOLDCTL  0x0000C000L /* (RW)  Threshold Control */
+
+#define TULIP_GP_PINSET         0x00000100L
+#define TULIP_BUSMODE_SWRESET   0x00000001L
+#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
+#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
+
+#define TULIP_STS_NORMALINTR    0x00010000L /* (RW)  Normal Interrupt */
+#define TULIP_STS_ABNRMLINTR    0x00008000L /* (RW)  Abnormal Interrupt */
+#define TULIP_STS_ERI           0x00004000L /* (RW)  Early Receive Interrupt */
+#define TULIP_STS_SYSERROR      0x00002000L /* (RW)  System Error */
+#define TULIP_STS_GTE           0x00000800L /* (RW)  General Pupose Timer Exp */
+#define TULIP_STS_ETI           0x00000400L /* (RW)  Early Transmit Interrupt */
+#define TULIP_STS_RXWT          0x00000200L /* (RW)  Receiver Watchdog Timeout */
+#define TULIP_STS_RXSTOPPED     0x00000100L /* (RW)  Receiver Process Stopped */
+#define TULIP_STS_RXNOBUF       0x00000080L /* (RW)  Receive Buf Unavail */
+#define TULIP_STS_RXINTR        0x00000040L /* (RW)  Receive Interrupt */
+#define TULIP_STS_TXUNDERFLOW   0x00000020L /* (RW)  Transmit Underflow */
+#define TULIP_STS_TXJABER       0x00000008L /* (RW)  Jabber timeout */
+#define TULIP_STS_TXNOBUF       0x00000004L
+#define TULIP_STS_TXSTOPPED     0x00000002L /* (RW)  Transmit Process Stopped */
+#define TULIP_STS_TXINTR        0x00000001L /* (RW)  Transmit Interrupt */
+
+#define TULIP_STS_RXS_STOPPED   0x00000000L /*        000 - Stopped */
+
+#define TULIP_STS_RXSTOPPED     0x00000100L             /* (RW)  Receive Process Stopped */
+#define TULIP_STS_RXNOBUF       0x00000080L
+
+#define TULIP_CMD_TXRUN         0x00002000L /* (RW)  Start/Stop Transmitter */
+#define TULIP_CMD_RXRUN         0x00000002L /* (RW)  Start/Stop Receive Filtering */
+#define TULIP_DSTS_TxDEFERRED   0x00000001      /* Initially Deferred */
+#define TULIP_DSTS_OWNER        0x80000000      /* Owner (1 = 21040) */
+#define TULIP_DSTS_RxMIIERR     0x00000008
+#define LMC_DSTS_ERRSUM         (TULIP_DSTS_RxMIIERR)
+
+#define TULIP_DEFAULT_INTR_MASK  (TULIP_STS_NORMALINTR \
+  | TULIP_STS_RXINTR       \
+  | TULIP_STS_TXINTR     \
+  | TULIP_STS_ABNRMLINTR \
+  | TULIP_STS_SYSERROR   \
+  | TULIP_STS_TXSTOPPED  \
+  | TULIP_STS_TXUNDERFLOW\
+  | TULIP_STS_RXSTOPPED )
+
+#define DESC_OWNED_BY_SYSTEM   ((u32)(0x00000000))
+#define DESC_OWNED_BY_DC21X4   ((u32)(0x80000000))
+
+#ifndef TULIP_CMD_RECEIVEALL
+#define TULIP_CMD_RECEIVEALL 0x40000000L
+#endif
+
+/* Adapter module number */
+#define LMC_ADAP_HSSI           2
+#define LMC_ADAP_DS3            3
+#define LMC_ADAP_SSI            4
+#define LMC_ADAP_T1             5
+
+#define LMC_MTU 1500
+
+#define LMC_CRC_LEN_16 2  /* 16-bit CRC */
+#define LMC_CRC_LEN_32 4
+
+#endif /* _LMC_VAR_H_ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/n2.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/n2.c
new file mode 100644
index 0000000..315bf09
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/n2.c
@@ -0,0 +1,566 @@
+/*
+ * SDL Inc. RISCom/N2 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ *
+ * Note: integrated CSU/DSU/DDS are not supported by this driver
+ *
+ * Sources of information:
+ *    Hitachi HD64570 SCA User's Manual
+ *    SDL Inc. PPP/HDLC/CISCO driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#include "hd64570.h"
+
+
+static const char* version = "SDL RISCom/N2 driver version: 1.15";
+static const char* devname = "RISCom/N2";
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define USE_WINDOWSIZE 16384
+#define USE_BUS16BITS 1
+#define CLOCK_BASE 9830400	/* 9.8304 MHz */
+#define MAX_PAGES      16	/* 16 RAM pages at max */
+#define MAX_RAM_SIZE 0x80000	/* 512 KB */
+#if MAX_RAM_SIZE > MAX_PAGES * USE_WINDOWSIZE
+#undef MAX_RAM_SIZE
+#define MAX_RAM_SIZE (MAX_PAGES * USE_WINDOWSIZE)
+#endif
+#define N2_IOPORTS 0x10
+#define NEED_DETECT_RAM
+#define NEED_SCA_MSCI_INTR
+#define MAX_TX_BUFFERS 10
+
+static char *hw;	/* pointer to hw=xxx command line string */
+
+/* RISCom/N2 Board Registers */
+
+/* PC Control Register */
+#define N2_PCR 0
+#define PCR_RUNSCA 1     /* Run 64570 */
+#define PCR_VPM    2     /* Enable VPM - needed if using RAM above 1 MB */
+#define PCR_ENWIN  4     /* Open window */
+#define PCR_BUS16  8     /* 16-bit bus */
+
+
+/* Memory Base Address Register */
+#define N2_BAR 2
+
+
+/* Page Scan Register  */
+#define N2_PSR 4
+#define WIN16K       0x00
+#define WIN32K       0x20
+#define WIN64K       0x40
+#define PSR_WINBITS  0x60
+#define PSR_DMAEN    0x80
+#define PSR_PAGEBITS 0x0F
+
+
+/* Modem Control Reg */
+#define N2_MCR 6
+#define CLOCK_OUT_PORT1 0x80
+#define CLOCK_OUT_PORT0 0x40
+#define TX422_PORT1     0x20
+#define TX422_PORT0     0x10
+#define DSR_PORT1       0x08
+#define DSR_PORT0       0x04
+#define DTR_PORT1       0x02
+#define DTR_PORT0       0x01
+
+
+typedef struct port_s {
+	struct net_device *dev;
+	struct card_s *card;
+	spinlock_t lock;	/* TX lock */
+	sync_serial_settings settings;
+	int valid;		/* port enabled */
+	int rxpart;		/* partial frame received, next frame invalid*/
+	unsigned short encoding;
+	unsigned short parity;
+	u16 rxin;		/* rx ring buffer 'in' pointer */
+	u16 txin;		/* tx ring buffer 'in' and 'last' pointers */
+	u16 txlast;
+	u8 rxs, txs, tmc;	/* SCA registers */
+	u8 phy_node;		/* physical port # - 0 or 1 */
+	u8 log_node;		/* logical port # */
+}port_t;
+
+
+
+typedef struct card_s {
+	u8 __iomem *winbase;		/* ISA window base address */
+	u32 phy_winbase;	/* ISA physical base address */
+	u32 ram_size;		/* number of bytes */
+	u16 io;			/* IO Base address */
+	u16 buff_offset;	/* offset of first buffer of first channel */
+	u16 rx_ring_buffers;	/* number of buffers in a ring */
+	u16 tx_ring_buffers;
+	u8 irq;			/* IRQ (3-15) */
+
+	port_t ports[2];
+	struct card_s *next_card;
+}card_t;
+
+
+static card_t *first_card;
+static card_t **new_card = &first_card;
+
+
+#define sca_reg(reg, card) (0x8000 | (card)->io | \
+			    ((reg) & 0x0F) | (((reg) & 0xF0) << 6))
+#define sca_in(reg, card)		inb(sca_reg(reg, card))
+#define sca_out(value, reg, card)	outb(value, sca_reg(reg, card))
+#define sca_inw(reg, card)		inw(sca_reg(reg, card))
+#define sca_outw(value, reg, card)	outw(value, sca_reg(reg, card))
+
+#define port_to_card(port)		((port)->card)
+#define log_node(port)			((port)->log_node)
+#define phy_node(port)			((port)->phy_node)
+#define winsize(card)			(USE_WINDOWSIZE)
+#define winbase(card)      	     	((card)->winbase)
+#define get_port(card, port)		((card)->ports[port].valid ? \
+					 &(card)->ports[port] : NULL)
+
+
+static __inline__ u8 sca_get_page(card_t *card)
+{
+	return inb(card->io + N2_PSR) & PSR_PAGEBITS;
+}
+
+
+static __inline__ void openwin(card_t *card, u8 page)
+{
+	u8 psr = inb(card->io + N2_PSR);
+	outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
+}
+
+
+#include "hd64570.c"
+
+
+static void n2_set_iface(port_t *port)
+{
+	card_t *card = port->card;
+	int io = card->io;
+	u8 mcr = inb(io + N2_MCR);
+	u8 msci = get_msci(port);
+	u8 rxs = port->rxs & CLK_BRG_MASK;
+	u8 txs = port->txs & CLK_BRG_MASK;
+
+	switch(port->settings.clock_type) {
+	case CLOCK_INT:
+		mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+		rxs |= CLK_BRG_RX; /* BRG output */
+		txs |= CLK_RXCLK_TX; /* RX clock */
+		break;
+
+	case CLOCK_TXINT:
+		mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_BRG_TX; /* BRG output */
+		break;
+
+	case CLOCK_TXFROMRX:
+		mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_RXCLK_TX; /* RX clock */
+		break;
+
+	default:		/* Clock EXTernal */
+		mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0;
+		rxs |= CLK_LINE_RX; /* RXC input */
+		txs |= CLK_LINE_TX; /* TXC input */
+	}
+
+	outb(mcr, io + N2_MCR);
+	port->rxs = rxs;
+	port->txs = txs;
+	sca_out(rxs, msci + RXS, card);
+	sca_out(txs, msci + TXS, card);
+	sca_set_port(port);
+}
+
+
+
+static int n2_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	int io = port->card->io;
+	u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
+	int result;
+
+	result = hdlc_open(dev);
+	if (result)
+		return result;
+
+	mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
+	outb(mcr, io + N2_MCR);
+
+	outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */
+	outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */
+	sca_open(dev);
+	n2_set_iface(port);
+	return 0;
+}
+
+
+
+static int n2_close(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	int io = port->card->io;
+	u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+
+	sca_close(dev);
+	mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
+	outb(mcr, io + N2_MCR);
+	hdlc_close(dev);
+	return 0;
+}
+
+
+
+static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+	if (cmd == SIOCDEVPRIVATE) {
+		sca_dump_rings(dev);
+		return 0;
+	}
+#endif
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(line, &port->settings, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_SYNC_SERIAL:
+		if(!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&new_line, line, size))
+			return -EFAULT;
+
+		if (new_line.clock_type != CLOCK_EXT &&
+		    new_line.clock_type != CLOCK_TXFROMRX &&
+		    new_line.clock_type != CLOCK_INT &&
+		    new_line.clock_type != CLOCK_TXINT)
+			return -EINVAL;	/* No such clock setting */
+
+		if (new_line.loopback != 0 && new_line.loopback != 1)
+			return -EINVAL;
+
+		memcpy(&port->settings, &new_line, size); /* Update settings */
+		n2_set_iface(port);
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+
+
+static void n2_destroy_card(card_t *card)
+{
+	int cnt;
+
+	for (cnt = 0; cnt < 2; cnt++)
+		if (card->ports[cnt].card) {
+			struct net_device *dev = port_to_dev(&card->ports[cnt]);
+			unregister_hdlc_device(dev);
+		}
+
+	if (card->irq)
+		free_irq(card->irq, card);
+
+	if (card->winbase) {
+		iounmap(card->winbase);
+		release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
+	}
+
+	if (card->io)
+		release_region(card->io, N2_IOPORTS);
+	if (card->ports[0].dev)
+		free_netdev(card->ports[0].dev);
+	if (card->ports[1].dev)
+		free_netdev(card->ports[1].dev);
+	kfree(card);
+}
+
+static const struct net_device_ops n2_ops = {
+	.ndo_open       = n2_open,
+	.ndo_stop       = n2_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = n2_ioctl,
+};
+
+static int __init n2_run(unsigned long io, unsigned long irq,
+			 unsigned long winbase, long valid0, long valid1)
+{
+	card_t *card;
+	u8 cnt, pcr;
+	int i;
+
+	if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
+		pr_err("invalid I/O port value\n");
+		return -ENODEV;
+	}
+
+	if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
+		pr_err("invalid IRQ value\n");
+		return -ENODEV;
+	}
+
+	if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
+		pr_err("invalid RAM value\n");
+		return -ENODEV;
+	}
+
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
+	if (card == NULL)
+		return -ENOBUFS;
+
+	card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
+	card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
+	if (!card->ports[0].dev || !card->ports[1].dev) {
+		pr_err("unable to allocate memory\n");
+		n2_destroy_card(card);
+		return -ENOMEM;
+	}
+
+	if (!request_region(io, N2_IOPORTS, devname)) {
+		pr_err("I/O port region in use\n");
+		n2_destroy_card(card);
+		return -EBUSY;
+	}
+	card->io = io;
+
+	if (request_irq(irq, sca_intr, 0, devname, card)) {
+		pr_err("could not allocate IRQ\n");
+		n2_destroy_card(card);
+		return -EBUSY;
+	}
+	card->irq = irq;
+
+	if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
+		pr_err("could not request RAM window\n");
+		n2_destroy_card(card);
+		return -EBUSY;
+	}
+	card->phy_winbase = winbase;
+	card->winbase = ioremap(winbase, USE_WINDOWSIZE);
+	if (!card->winbase) {
+		pr_err("ioremap() failed\n");
+		n2_destroy_card(card);
+		return -EFAULT;
+	}
+
+	outb(0, io + N2_PCR);
+	outb(winbase >> 12, io + N2_BAR);
+
+	switch (USE_WINDOWSIZE) {
+	case 16384:
+		outb(WIN16K, io + N2_PSR);
+		break;
+
+	case 32768:
+		outb(WIN32K, io + N2_PSR);
+		break;
+
+	case 65536:
+		outb(WIN64K, io + N2_PSR);
+		break;
+
+	default:
+		pr_err("invalid window size\n");
+		n2_destroy_card(card);
+		return -ENODEV;
+	}
+
+	pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
+	outb(pcr, io + N2_PCR);
+
+	card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE);
+
+	/* number of TX + RX buffers for one port */
+	i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) +
+						   HDLC_MAX_MRU));
+
+	card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
+	card->rx_ring_buffers = i - card->tx_ring_buffers;
+
+	card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
+		(card->tx_ring_buffers + card->rx_ring_buffers);
+
+	pr_info("RISCom/N2 %u KB RAM, IRQ%u, using %u TX + %u RX packets rings\n",
+		card->ram_size / 1024, card->irq,
+		card->tx_ring_buffers, card->rx_ring_buffers);
+
+	if (card->tx_ring_buffers < 1) {
+		pr_err("RAM test failed\n");
+		n2_destroy_card(card);
+		return -EIO;
+	}
+
+	pcr |= PCR_RUNSCA;		/* run SCA */
+	outb(pcr, io + N2_PCR);
+	outb(0, io + N2_MCR);
+
+	sca_init(card, 0);
+	for (cnt = 0; cnt < 2; cnt++) {
+		port_t *port = &card->ports[cnt];
+		struct net_device *dev = port_to_dev(port);
+		hdlc_device *hdlc = dev_to_hdlc(dev);
+
+		if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
+			continue;
+
+		port->phy_node = cnt;
+		port->valid = 1;
+
+		if ((cnt == 1) && valid0)
+			port->log_node = 1;
+
+		spin_lock_init(&port->lock);
+		dev->irq = irq;
+		dev->mem_start = winbase;
+		dev->mem_end = winbase + USE_WINDOWSIZE - 1;
+		dev->tx_queue_len = 50;
+		dev->netdev_ops = &n2_ops;
+		hdlc->attach = sca_attach;
+		hdlc->xmit = sca_xmit;
+		port->settings.clock_type = CLOCK_EXT;
+		port->card = card;
+
+		if (register_hdlc_device(dev)) {
+			pr_warn("unable to register hdlc device\n");
+			port->card = NULL;
+			n2_destroy_card(card);
+			return -ENOBUFS;
+		}
+		sca_init_port(port); /* Set up SCA memory */
+
+		netdev_info(dev, "RISCom/N2 node %d\n", port->phy_node);
+	}
+
+	*new_card = card;
+	new_card = &card->next_card;
+
+	return 0;
+}
+
+
+
+static int __init n2_init(void)
+{
+	if (hw==NULL) {
+#ifdef MODULE
+		pr_info("no card initialized\n");
+#endif
+		return -EINVAL;	/* no parameters specified, abort */
+	}
+
+	pr_info("%s\n", version);
+
+	do {
+		unsigned long io, irq, ram;
+		long valid[2] = { 0, 0 }; /* Default = both ports disabled */
+
+		io = simple_strtoul(hw, &hw, 0);
+
+		if (*hw++ != ',')
+			break;
+		irq = simple_strtoul(hw, &hw, 0);
+
+		if (*hw++ != ',')
+			break;
+		ram = simple_strtoul(hw, &hw, 0);
+
+		if (*hw++ != ',')
+			break;
+		while(1) {
+			if (*hw == '0' && !valid[0])
+				valid[0] = 1; /* Port 0 enabled */
+			else if (*hw == '1' && !valid[1])
+				valid[1] = 1; /* Port 1 enabled */
+			else
+				break;
+			hw++;
+		}
+
+		if (!valid[0] && !valid[1])
+			break;	/* at least one port must be used */
+
+		if (*hw == ':' || *hw == '\x0')
+			n2_run(io, irq, ram, valid[0], valid[1]);
+
+		if (*hw == '\x0')
+			return first_card ? 0 : -EINVAL;
+	}while(*hw++ == ':');
+
+	pr_err("invalid hardware parameters\n");
+	return first_card ? 0 : -EINVAL;
+}
+
+
+static void __exit n2_cleanup(void)
+{
+	card_t *card = first_card;
+
+	while (card) {
+		card_t *ptr = card;
+		card = card->next_card;
+		n2_destroy_card(ptr);
+	}
+}
+
+
+module_init(n2_init);
+module_exit(n2_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("RISCom/N2 serial port driver");
+MODULE_LICENSE("GPL v2");
+module_param(hw, charp, 0444);
+MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300-falc-lh.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300-falc-lh.h
new file mode 100644
index 0000000..01ed23c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300-falc-lh.h
@@ -0,0 +1,1238 @@
+/*
+ * falc.h	Description of the Siemens FALC T1/E1 framer.
+ *
+ * Author:	Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright:	(c) 2000-2001 Cyclades Corp.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ * $Log: falc-lh.h,v $
+ * Revision 3.1  2001/06/15 12:41:10  regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1  2001/06/13 20:24:47  daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 1.1 2000/05/15 ivan
+ * Included DJA bits for the LIM2 register.
+ *
+ * Revision 1.0 2000/02/22 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef _FALC_LH_H
+#define _FALC_LH_H
+
+#define NUM_OF_T1_CHANNELS	24
+#define NUM_OF_E1_CHANNELS	32
+
+/*>>>>>>>>>>>>>>>>>  FALC Register Bits (Transmit Mode)  <<<<<<<<<<<<<<<<<<< */
+
+/* CMDR (Command Register)
+   ---------------- E1 & T1 ------------------------------ */
+#define CMDR_RMC	0x80
+#define CMDR_RRES	0x40
+#define CMDR_XREP	0x20
+#define CMDR_XRES	0x10
+#define CMDR_XHF	0x08
+#define CMDR_XTF	0x04
+#define CMDR_XME	0x02
+#define CMDR_SRES	0x01
+
+/* MODE (Mode Register)
+   ----------------- E1 & T1 ----------------------------- */
+#define MODE_MDS2	0x80
+#define MODE_MDS1	0x40
+#define MODE_MDS0	0x20
+#define MODE_BRAC	0x10
+#define MODE_HRAC	0x08
+
+/* IPC (Interrupt Port Configuration)
+   ----------------- E1 & T1 ----------------------------- */
+#define IPC_VIS		0x80
+#define IPC_SCI		0x04
+#define IPC_IC1		0x02
+#define IPC_IC0		0x01
+
+/* CCR1 (Common Configuration Register 1)
+   ----------------- E1 & T1 ----------------------------- */
+#define CCR1_SFLG       0x80
+#define CCR1_XTS16RA    0x40
+#define CCR1_BRM        0x40
+#define CCR1_CASSYM     0x20
+#define CCR1_EDLX       0x20
+#define CCR1_EITS       0x10
+#define CCR1_ITF        0x08
+#define CCR1_RFT1       0x02
+#define CCR1_RFT0       0x01
+
+/* CCR3 (Common Configuration Register 3)
+   ---------------- E1 & T1 ------------------------------ */
+
+#define CCR3_PRE1       0x80
+#define CCR3_PRE0       0x40
+#define CCR3_EPT        0x20
+#define CCR3_RADD       0x10
+#define CCR3_RCRC       0x04
+#define CCR3_XCRC       0x02
+
+
+/* RTR1-4 (Receive Timeslot Register 1-4)
+   ---------------- E1 & T1 ------------------------------ */
+
+#define RTR1_TS0        0x80
+#define RTR1_TS1        0x40
+#define RTR1_TS2        0x20
+#define RTR1_TS3        0x10
+#define RTR1_TS4        0x08
+#define RTR1_TS5        0x04
+#define RTR1_TS6        0x02
+#define RTR1_TS7        0x01
+
+#define RTR2_TS8        0x80
+#define RTR2_TS9        0x40
+#define RTR2_TS10       0x20
+#define RTR2_TS11       0x10
+#define RTR2_TS12       0x08
+#define RTR2_TS13       0x04
+#define RTR2_TS14       0x02
+#define RTR2_TS15       0x01
+
+#define RTR3_TS16       0x80
+#define RTR3_TS17       0x40
+#define RTR3_TS18       0x20
+#define RTR3_TS19       0x10
+#define RTR3_TS20       0x08
+#define RTR3_TS21       0x04
+#define RTR3_TS22       0x02
+#define RTR3_TS23       0x01
+
+#define RTR4_TS24       0x80
+#define RTR4_TS25       0x40
+#define RTR4_TS26       0x20
+#define RTR4_TS27       0x10
+#define RTR4_TS28       0x08
+#define RTR4_TS29       0x04
+#define RTR4_TS30       0x02
+#define RTR4_TS31       0x01
+
+
+/* TTR1-4 (Transmit Timeslot Register 1-4)
+   ---------------- E1 & T1 ------------------------------ */
+
+#define TTR1_TS0        0x80
+#define TTR1_TS1        0x40
+#define TTR1_TS2        0x20
+#define TTR1_TS3        0x10
+#define TTR1_TS4        0x08
+#define TTR1_TS5        0x04
+#define TTR1_TS6        0x02
+#define TTR1_TS7        0x01
+
+#define TTR2_TS8        0x80
+#define TTR2_TS9        0x40
+#define TTR2_TS10       0x20
+#define TTR2_TS11       0x10
+#define TTR2_TS12       0x08
+#define TTR2_TS13       0x04
+#define TTR2_TS14       0x02
+#define TTR2_TS15       0x01
+
+#define TTR3_TS16       0x80
+#define TTR3_TS17       0x40
+#define TTR3_TS18       0x20
+#define TTR3_TS19       0x10
+#define TTR3_TS20       0x08
+#define TTR3_TS21       0x04
+#define TTR3_TS22       0x02
+#define TTR3_TS23       0x01
+
+#define TTR4_TS24       0x80
+#define TTR4_TS25       0x40
+#define TTR4_TS26       0x20
+#define TTR4_TS27       0x10
+#define TTR4_TS28       0x08
+#define TTR4_TS29       0x04
+#define TTR4_TS30       0x02
+#define TTR4_TS31       0x01
+
+
+
+/* IMR0-4 (Interrupt Mask Register 0-4)
+
+   ----------------- E1 & T1 ----------------------------- */
+
+#define IMR0_RME        0x80
+#define IMR0_RFS        0x40
+#define IMR0_T8MS       0x20
+#define IMR0_ISF        0x20
+#define IMR0_RMB        0x10
+#define IMR0_CASC       0x08
+#define IMR0_RSC        0x08
+#define IMR0_CRC6       0x04
+#define IMR0_CRC4       0x04
+#define IMR0_PDEN	0x02
+#define IMR0_RPF        0x01
+
+#define IMR1_CASE       0x80
+#define IMR1_RDO        0x40
+#define IMR1_ALLS       0x20
+#define IMR1_XDU        0x10
+#define IMR1_XMB        0x08
+#define IMR1_XLSC       0x02
+#define IMR1_XPR        0x01
+#define IMR1_LLBSC	0x80
+
+#define IMR2_FAR        0x80
+#define IMR2_LFA        0x40
+#define IMR2_MFAR       0x20
+#define IMR2_T400MS     0x10
+#define IMR2_LMFA       0x10
+#define IMR2_AIS        0x08
+#define IMR2_LOS        0x04
+#define IMR2_RAR        0x02
+#define IMR2_RA         0x01
+
+#define IMR3_ES         0x80
+#define IMR3_SEC        0x40
+#define IMR3_LMFA16     0x20
+#define IMR3_AIS16      0x10
+#define IMR3_RA16       0x08
+#define IMR3_API        0x04
+#define IMR3_XSLP       0x20
+#define IMR3_XSLN       0x10
+#define IMR3_LLBSC      0x08
+#define IMR3_XRS        0x04
+#define IMR3_SLN        0x02
+#define IMR3_SLP        0x01
+
+#define IMR4_LFA        0x80
+#define IMR4_FER        0x40
+#define IMR4_CER        0x20
+#define IMR4_AIS        0x10
+#define IMR4_LOS        0x08
+#define IMR4_CVE        0x04
+#define IMR4_SLIP       0x02
+#define IMR4_EBE        0x01
+
+/* FMR0-5 for E1 and T1  (Framer Mode Register ) */
+
+#define FMR0_XC1        0x80
+#define FMR0_XC0        0x40
+#define FMR0_RC1        0x20
+#define FMR0_RC0        0x10
+#define FMR0_EXTD       0x08
+#define FMR0_ALM        0x04
+#define E1_FMR0_FRS     0x02
+#define T1_FMR0_FRS     0x08
+#define FMR0_SRAF       0x04
+#define FMR0_EXLS       0x02
+#define FMR0_SIM        0x01
+
+#define FMR1_MFCS       0x80
+#define FMR1_AFR        0x40
+#define FMR1_ENSA       0x20
+#define FMR1_CTM        0x80
+#define FMR1_SIGM       0x40
+#define FMR1_EDL        0x20
+#define FMR1_PMOD       0x10
+#define FMR1_XFS        0x08
+#define FMR1_CRC        0x08
+#define FMR1_ECM        0x04
+#define FMR1_IMOD       0x02
+#define FMR1_XAIS       0x01
+
+#define FMR2_RFS1       0x80
+#define FMR2_RFS0       0x40
+#define FMR2_MCSP	0x40
+#define FMR2_RTM        0x20
+#define FMR2_SSP        0x20
+#define FMR2_DAIS       0x10
+#define FMR2_SAIS       0x08
+#define FMR2_PLB        0x04
+#define FMR2_AXRA       0x02
+#define FMR2_ALMF       0x01
+#define FMR2_EXZE       0x01
+
+#define LOOP_RTM	0x40
+#define LOOP_SFM	0x40
+#define LOOP_ECLB	0x20
+#define LOOP_CLA	0x1f
+
+/*--------------------- E1 ----------------------------*/
+#define FMR3_XLD	0x20
+#define FMR3_XLU	0x10
+
+/*--------------------- T1 ----------------------------*/
+#define FMR4_AIS3       0x80
+#define FMR4_TM         0x40
+#define FMR4_XRA        0x20
+#define FMR4_SSC1       0x10
+#define FMR4_SSC0       0x08
+#define FMR4_AUTO       0x04
+#define FMR4_FM1        0x02
+#define FMR4_FM0        0x01
+
+#define FMR5_SRS        0x80
+#define FMR5_EIBR       0x40
+#define FMR5_XLD        0x20
+#define FMR5_XLU        0x10
+
+
+/* LOOP (Channel Loop Back)
+
+   ------------------ E1 & T1 ---------------------------- */
+
+#define LOOP_SFM        0x40
+#define LOOP_ECLB       0x20
+#define LOOP_CLA4       0x10
+#define LOOP_CLA3       0x08
+#define LOOP_CLA2       0x04
+#define LOOP_CLA1       0x02
+#define LOOP_CLA0       0x01
+
+
+
+/* XSW (Transmit Service Word Pulseframe)
+
+   ------------------- E1 --------------------------- */
+
+#define XSW_XSIS        0x80
+#define XSW_XTM         0x40
+#define XSW_XRA         0x20
+#define XSW_XY0         0x10
+#define XSW_XY1         0x08
+#define XSW_XY2         0x04
+#define XSW_XY3         0x02
+#define XSW_XY4         0x01
+
+
+/* XSP (Transmit Spare Bits)
+
+   ------------------- E1 --------------------------- */
+
+#define XSP_XAP         0x80
+#define XSP_CASEN       0x40
+#define XSP_TT0         0x20
+#define XSP_EBP         0x10
+#define XSP_AXS         0x08
+#define XSP_XSIF        0x04
+#define XSP_XS13        0x02
+#define XSP_XS15        0x01
+
+
+/* XC0/1 (Transmit Control 0/1)
+   ------------------ E1 & T1 ---------------------------- */
+
+#define XC0_SA8E        0x80
+#define XC0_SA7E        0x40
+#define XC0_SA6E        0x20
+#define XC0_SA5E        0x10
+#define XC0_SA4E        0x08
+#define XC0_BRM         0x80
+#define XC0_MFBS        0x40
+#define XC0_SFRZ        0x10
+#define XC0_XCO2        0x04
+#define XC0_XCO1        0x02
+#define XC0_XCO0        0x01
+
+#define XC1_XTO5        0x20
+#define XC1_XTO4        0x10
+#define XC1_XTO3        0x08
+#define XC1_XTO2        0x04
+#define XC1_XTO1        0x02
+#define XC1_XTO0        0x01
+
+
+/* RC0/1 (Receive Control 0/1)
+   ------------------ E1 & T1 ---------------------------- */
+
+#define RC0_SICS        0x40
+#define RC0_CRCI        0x20
+#define RC0_XCRCI       0x10
+#define RC0_RDIS        0x08
+#define RC0_RCO2        0x04
+#define RC0_RCO1        0x02
+#define RC0_RCO0        0x01
+
+#define RC1_SWD         0x80
+#define RC1_ASY4        0x40
+#define RC1_RRAM        0x40
+#define RC1_RTO5        0x20
+#define RC1_RTO4        0x10
+#define RC1_RTO3        0x08
+#define RC1_RTO2        0x04
+#define RC1_RTO1        0x02
+#define RC1_RTO0        0x01
+
+
+
+/* XPM0-2 (Transmit Pulse Mask 0-2)
+   --------------------- E1 & T1 ------------------------- */
+
+#define XPM0_XP12       0x80
+#define XPM0_XP11       0x40
+#define XPM0_XP10       0x20
+#define XPM0_XP04       0x10
+#define XPM0_XP03       0x08
+#define XPM0_XP02       0x04
+#define XPM0_XP01       0x02
+#define XPM0_XP00       0x01
+
+#define XPM1_XP30       0x80
+#define XPM1_XP24       0x40
+#define XPM1_XP23       0x20
+#define XPM1_XP22       0x10
+#define XPM1_XP21       0x08
+#define XPM1_XP20       0x04
+#define XPM1_XP14       0x02
+#define XPM1_XP13       0x01
+
+#define XPM2_XLHP       0x80
+#define XPM2_XLT        0x40
+#define XPM2_DAXLT      0x20
+#define XPM2_XP34       0x08
+#define XPM2_XP33       0x04
+#define XPM2_XP32       0x02
+#define XPM2_XP31       0x01
+
+
+/* TSWM (Transparent Service Word Mask)
+   ------------------ E1 ---------------------------- */
+
+#define TSWM_TSIS       0x80
+#define TSWM_TSIF       0x40
+#define TSWM_TRA        0x20
+#define TSWM_TSA4       0x10
+#define TSWM_TSA5       0x08
+#define TSWM_TSA6       0x04
+#define TSWM_TSA7       0x02
+#define TSWM_TSA8       0x01
+
+/* IDLE <Idle Channel Code Register>
+
+   ------------------ E1 & T1 ----------------------- */
+
+#define IDLE_IDL7       0x80
+#define IDLE_IDL6       0x40
+#define IDLE_IDL5       0x20
+#define IDLE_IDL4       0x10
+#define IDLE_IDL3       0x08
+#define IDLE_IDL2       0x04
+#define IDLE_IDL1       0x02
+#define IDLE_IDL0       0x01
+
+
+/* XSA4-8 <Transmit SA4-8 Register(Read/Write) >
+   -------------------E1 ----------------------------- */
+
+#define XSA4_XS47       0x80
+#define XSA4_XS46       0x40
+#define XSA4_XS45       0x20
+#define XSA4_XS44       0x10
+#define XSA4_XS43       0x08
+#define XSA4_XS42       0x04
+#define XSA4_XS41       0x02
+#define XSA4_XS40       0x01
+
+#define XSA5_XS57       0x80
+#define XSA5_XS56       0x40
+#define XSA5_XS55       0x20
+#define XSA5_XS54       0x10
+#define XSA5_XS53       0x08
+#define XSA5_XS52       0x04
+#define XSA5_XS51       0x02
+#define XSA5_XS50       0x01
+
+#define XSA6_XS67       0x80
+#define XSA6_XS66       0x40
+#define XSA6_XS65       0x20
+#define XSA6_XS64       0x10
+#define XSA6_XS63       0x08
+#define XSA6_XS62       0x04
+#define XSA6_XS61       0x02
+#define XSA6_XS60       0x01
+
+#define XSA7_XS77       0x80
+#define XSA7_XS76       0x40
+#define XSA7_XS75       0x20
+#define XSA7_XS74       0x10
+#define XSA7_XS73       0x08
+#define XSA7_XS72       0x04
+#define XSA7_XS71       0x02
+#define XSA7_XS70       0x01
+
+#define XSA8_XS87       0x80
+#define XSA8_XS86       0x40
+#define XSA8_XS85       0x20
+#define XSA8_XS84       0x10
+#define XSA8_XS83       0x08
+#define XSA8_XS82       0x04
+#define XSA8_XS81       0x02
+#define XSA8_XS80       0x01
+
+
+/* XDL1-3 (Transmit DL-Bit Register1-3 (read/write))
+   ----------------------- T1 --------------------- */
+
+#define XDL1_XDL17      0x80
+#define XDL1_XDL16      0x40
+#define XDL1_XDL15      0x20
+#define XDL1_XDL14      0x10
+#define XDL1_XDL13      0x08
+#define XDL1_XDL12      0x04
+#define XDL1_XDL11      0x02
+#define XDL1_XDL10      0x01
+
+#define XDL2_XDL27      0x80
+#define XDL2_XDL26      0x40
+#define XDL2_XDL25      0x20
+#define XDL2_XDL24      0x10
+#define XDL2_XDL23      0x08
+#define XDL2_XDL22      0x04
+#define XDL2_XDL21      0x02
+#define XDL2_XDL20      0x01
+
+#define XDL3_XDL37      0x80
+#define XDL3_XDL36      0x40
+#define XDL3_XDL35      0x20
+#define XDL3_XDL34      0x10
+#define XDL3_XDL33      0x08
+#define XDL3_XDL32      0x04
+#define XDL3_XDL31      0x02
+#define XDL3_XDL30      0x01
+
+
+/* ICB1-4 (Idle Channel Register 1-4)
+   ------------------ E1 ---------------------------- */
+
+#define E1_ICB1_IC0	0x80
+#define E1_ICB1_IC1	0x40
+#define E1_ICB1_IC2	0x20
+#define E1_ICB1_IC3	0x10
+#define E1_ICB1_IC4	0x08
+#define E1_ICB1_IC5	0x04
+#define E1_ICB1_IC6	0x02
+#define E1_ICB1_IC7	0x01
+
+#define E1_ICB2_IC8	0x80
+#define E1_ICB2_IC9	0x40
+#define E1_ICB2_IC10	0x20
+#define E1_ICB2_IC11	0x10
+#define E1_ICB2_IC12	0x08
+#define E1_ICB2_IC13	0x04
+#define E1_ICB2_IC14	0x02
+#define E1_ICB2_IC15	0x01
+
+#define E1_ICB3_IC16	0x80
+#define E1_ICB3_IC17	0x40
+#define E1_ICB3_IC18	0x20
+#define E1_ICB3_IC19	0x10
+#define E1_ICB3_IC20	0x08
+#define E1_ICB3_IC21	0x04
+#define E1_ICB3_IC22	0x02
+#define E1_ICB3_IC23	0x01
+
+#define E1_ICB4_IC24	0x80
+#define E1_ICB4_IC25	0x40
+#define E1_ICB4_IC26	0x20
+#define E1_ICB4_IC27	0x10
+#define E1_ICB4_IC28	0x08
+#define E1_ICB4_IC29	0x04
+#define E1_ICB4_IC30	0x02
+#define E1_ICB4_IC31	0x01
+
+/* ICB1-4 (Idle Channel Register 1-4)
+   ------------------ T1 ---------------------------- */
+
+#define T1_ICB1_IC1	0x80
+#define T1_ICB1_IC2	0x40
+#define T1_ICB1_IC3	0x20
+#define T1_ICB1_IC4	0x10
+#define T1_ICB1_IC5	0x08
+#define T1_ICB1_IC6	0x04
+#define T1_ICB1_IC7	0x02
+#define T1_ICB1_IC8	0x01
+
+#define T1_ICB2_IC9	0x80
+#define T1_ICB2_IC10	0x40
+#define T1_ICB2_IC11	0x20
+#define T1_ICB2_IC12	0x10
+#define T1_ICB2_IC13	0x08
+#define T1_ICB2_IC14	0x04
+#define T1_ICB2_IC15	0x02
+#define T1_ICB2_IC16	0x01
+
+#define T1_ICB3_IC17	0x80
+#define T1_ICB3_IC18	0x40
+#define T1_ICB3_IC19	0x20
+#define T1_ICB3_IC20	0x10
+#define T1_ICB3_IC21	0x08
+#define T1_ICB3_IC22	0x04
+#define T1_ICB3_IC23	0x02
+#define T1_ICB3_IC24	0x01
+
+/* FMR3 (Framer Mode Register 3)
+   --------------------E1------------------------ */
+
+#define FMR3_CMI        0x08
+#define FMR3_SYNSA      0x04
+#define FMR3_CFRZ       0x02
+#define FMR3_EXTIW      0x01
+
+
+
+/* CCB1-3 (Clear Channel Register)
+   ------------------- T1 ----------------------- */
+
+#define CCB1_CH1        0x80
+#define CCB1_CH2        0x40
+#define CCB1_CH3        0x20
+#define CCB1_CH4        0x10
+#define CCB1_CH5        0x08
+#define CCB1_CH6        0x04
+#define CCB1_CH7        0x02
+#define CCB1_CH8        0x01
+
+#define CCB2_CH9        0x80
+#define CCB2_CH10       0x40
+#define CCB2_CH11       0x20
+#define CCB2_CH12       0x10
+#define CCB2_CH13       0x08
+#define CCB2_CH14       0x04
+#define CCB2_CH15       0x02
+#define CCB2_CH16       0x01
+
+#define CCB3_CH17       0x80
+#define CCB3_CH18       0x40
+#define CCB3_CH19       0x20
+#define CCB3_CH20       0x10
+#define CCB3_CH21       0x08
+#define CCB3_CH22       0x04
+#define CCB3_CH23       0x02
+#define CCB3_CH24       0x01
+
+
+/* LIM0/1 (Line Interface Mode 0/1)
+   ------------------- E1 & T1 --------------------------- */
+
+#define LIM0_XFB        0x80
+#define LIM0_XDOS       0x40
+#define LIM0_SCL1       0x20
+#define LIM0_SCL0       0x10
+#define LIM0_EQON       0x08
+#define LIM0_ELOS       0x04
+#define LIM0_LL         0x02
+#define LIM0_MAS        0x01
+
+#define LIM1_EFSC       0x80
+#define LIM1_RIL2       0x40
+#define LIM1_RIL1       0x20
+#define LIM1_RIL0       0x10
+#define LIM1_DCOC       0x08
+#define LIM1_JATT       0x04
+#define LIM1_RL         0x02
+#define LIM1_DRS        0x01
+
+
+/* PCDR (Pulse Count Detection Register(Read/Write))
+   ------------------ E1 & T1 ------------------------- */
+
+#define PCDR_PCD7	0x80
+#define PCDR_PCD6	0x40
+#define PCDR_PCD5	0x20
+#define PCDR_PCD4	0x10
+#define PCDR_PCD3	0x08
+#define PCDR_PCD2	0x04
+#define PCDR_PCD1	0x02
+#define PCDR_PCD0	0x01
+
+#define PCRR_PCR7	0x80
+#define PCRR_PCR6	0x40
+#define PCRR_PCR5	0x20
+#define PCRR_PCR4	0x10
+#define PCRR_PCR3	0x08
+#define PCRR_PCR2	0x04
+#define PCRR_PCR1	0x02
+#define PCRR_PCR0	0x01
+
+
+/* LIM2 (Line Interface Mode 2)
+
+   ------------------ E1 & T1 ---------------------------- */
+
+#define LIM2_DJA2	0x20
+#define LIM2_DJA1	0x10
+#define LIM2_LOS2	0x02
+#define LIM2_LOS1	0x01
+
+/* LCR1 (Loop Code Register 1) */
+
+#define LCR1_EPRM	0x80
+#define	LCR1_XPRBS	0x40
+
+/* SIC1 (System Interface Control 1) */
+#define SIC1_SRSC	0x80
+#define SIC1_RBS1	0x20
+#define SIC1_RBS0	0x10
+#define SIC1_SXSC	0x08
+#define SIC1_XBS1	0x02
+#define SIC1_XBS0	0x01
+
+/* DEC (Disable Error Counter)
+   ------------------ E1 & T1 ---------------------------- */
+
+#define DEC_DCEC3       0x20
+#define DEC_DBEC        0x10
+#define DEC_DCEC1       0x08
+#define DEC_DCEC        0x08
+#define DEC_DEBC        0x04
+#define DEC_DCVC        0x02
+#define DEC_DFEC        0x01
+
+
+/* FALC Register Bits (Receive Mode)
+   ---------------------------------------------------------------------------- */
+
+
+/* FRS0/1 (Framer Receive Status Register 0/1)
+   ----------------- E1 & T1 ---------------------------------- */
+
+#define FRS0_LOS        0x80
+#define FRS0_AIS        0x40
+#define FRS0_LFA        0x20
+#define FRS0_RRA        0x10
+#define FRS0_API        0x08
+#define FRS0_NMF        0x04
+#define FRS0_LMFA       0x02
+#define FRS0_FSRF       0x01
+
+#define FRS1_TS16RA     0x40
+#define FRS1_TS16LOS    0x20
+#define FRS1_TS16AIS    0x10
+#define FRS1_TS16LFA    0x08
+#define FRS1_EXZD       0x80
+#define FRS1_LLBDD      0x10
+#define FRS1_LLBAD      0x08
+#define FRS1_XLS        0x02
+#define FRS1_XLO        0x01
+#define FRS1_PDEN	0x40
+
+/* FRS2/3 (Framer Receive Status Register 2/3)
+   ----------------- T1 ---------------------------------- */
+
+#define FRS2_ESC2       0x80
+#define FRS2_ESC1       0x40
+#define FRS2_ESC0       0x20
+
+#define FRS3_FEH5       0x20
+#define FRS3_FEH4       0x10
+#define FRS3_FEH3       0x08
+#define FRS3_FEH2       0x04
+#define FRS3_FEH1       0x02
+#define FRS3_FEH0       0x01
+
+
+/* RSW (Receive Service Word Pulseframe)
+   ----------------- E1 ------------------------------ */
+
+#define RSW_RSI         0x80
+#define RSW_RRA         0x20
+#define RSW_RYO         0x10
+#define RSW_RY1         0x08
+#define RSW_RY2         0x04
+#define RSW_RY3         0x02
+#define RSW_RY4         0x01
+
+
+/* RSP (Receive Spare Bits / Additional Status)
+   ---------------- E1 ------------------------------- */
+
+#define RSP_SI1         0x80
+#define RSP_SI2         0x40
+#define RSP_LLBDD	0x10
+#define RSP_LLBAD	0x08
+#define RSP_RSIF        0x04
+#define RSP_RS13        0x02
+#define RSP_RS15        0x01
+
+
+/* FECL (Framing Error Counter)
+   ---------------- E1 & T1 -------------------------- */
+
+#define FECL_FE7        0x80
+#define FECL_FE6        0x40
+#define FECL_FE5        0x20
+#define FECL_FE4        0x10
+#define FECL_FE3        0x08
+#define FECL_FE2        0x04
+#define FECL_FE1        0x02
+#define FECL_FE0        0x01
+
+#define FECH_FE15       0x80
+#define FECH_FE14       0x40
+#define FECH_FE13       0x20
+#define FECH_FE12       0x10
+#define FECH_FE11       0x08
+#define FECH_FE10       0x04
+#define FECH_FE9        0x02
+#define FECH_FE8        0x01
+
+
+/* CVCl (Code Violation Counter)
+   ----------------- E1 ------------------------- */
+
+#define CVCL_CV7        0x80
+#define CVCL_CV6        0x40
+#define CVCL_CV5        0x20
+#define CVCL_CV4        0x10
+#define CVCL_CV3        0x08
+#define CVCL_CV2        0x04
+#define CVCL_CV1        0x02
+#define CVCL_CV0        0x01
+
+#define CVCH_CV15       0x80
+#define CVCH_CV14       0x40
+#define CVCH_CV13       0x20
+#define CVCH_CV12       0x10
+#define CVCH_CV11       0x08
+#define CVCH_CV10       0x04
+#define CVCH_CV9        0x02
+#define CVCH_CV8        0x01
+
+
+/* CEC1-3L (CRC Error Counter)
+   ------------------ E1 ----------------------------- */
+
+#define CEC1L_CR7       0x80
+#define CEC1L_CR6       0x40
+#define CEC1L_CR5       0x20
+#define CEC1L_CR4       0x10
+#define CEC1L_CR3       0x08
+#define CEC1L_CR2       0x04
+#define CEC1L_CR1       0x02
+#define CEC1L_CR0       0x01
+
+#define CEC1H_CR15      0x80
+#define CEC1H_CR14      0x40
+#define CEC1H_CR13      0x20
+#define CEC1H_CR12      0x10
+#define CEC1H_CR11      0x08
+#define CEC1H_CR10      0x04
+#define CEC1H_CR9       0x02
+#define CEC1H_CR8       0x01
+
+#define CEC2L_CR7       0x80
+#define CEC2L_CR6       0x40
+#define CEC2L_CR5       0x20
+#define CEC2L_CR4       0x10
+#define CEC2L_CR3       0x08
+#define CEC2L_CR2       0x04
+#define CEC2L_CR1       0x02
+#define CEC2L_CR0       0x01
+
+#define CEC2H_CR15      0x80
+#define CEC2H_CR14      0x40
+#define CEC2H_CR13      0x20
+#define CEC2H_CR12      0x10
+#define CEC2H_CR11      0x08
+#define CEC2H_CR10      0x04
+#define CEC2H_CR9       0x02
+#define CEC2H_CR8       0x01
+
+#define CEC3L_CR7       0x80
+#define CEC3L_CR6       0x40
+#define CEC3L_CR5       0x20
+#define CEC3L_CR4       0x10
+#define CEC3L_CR3       0x08
+#define CEC3L_CR2       0x04
+#define CEC3L_CR1       0x02
+#define CEC3L_CR0       0x01
+
+#define CEC3H_CR15      0x80
+#define CEC3H_CR14      0x40
+#define CEC3H_CR13      0x20
+#define CEC3H_CR12      0x10
+#define CEC3H_CR11      0x08
+#define CEC3H_CR10      0x04
+#define CEC3H_CR9       0x02
+#define CEC3H_CR8       0x01
+
+
+/* CECL (CRC Error Counter)
+
+   ------------------ T1 ----------------------------- */
+
+#define CECL_CR7        0x80
+#define CECL_CR6        0x40
+#define CECL_CR5        0x20
+#define CECL_CR4        0x10
+#define CECL_CR3        0x08
+#define CECL_CR2        0x04
+#define CECL_CR1        0x02
+#define CECL_CR0        0x01
+
+#define CECH_CR15       0x80
+#define CECH_CR14       0x40
+#define CECH_CR13       0x20
+#define CECH_CR12       0x10
+#define CECH_CR11       0x08
+#define CECH_CR10       0x04
+#define CECH_CR9        0x02
+#define CECH_CR8        0x01
+
+/* EBCL (E Bit Error Counter)
+   ------------------- E1 & T1 ------------------------- */
+
+#define EBCL_EB7        0x80
+#define EBCL_EB6        0x40
+#define EBCL_EB5        0x20
+#define EBCL_EB4        0x10
+#define EBCL_EB3        0x08
+#define EBCL_EB2        0x04
+#define EBCL_EB1        0x02
+#define EBCL_EB0        0x01
+
+#define EBCH_EB15       0x80
+#define EBCH_EB14       0x40
+#define EBCH_EB13       0x20
+#define EBCH_EB12       0x10
+#define EBCH_EB11       0x08
+#define EBCH_EB10       0x04
+#define EBCH_EB9        0x02
+#define EBCH_EB8        0x01
+
+
+/* RSA4-8 (Receive Sa4-8-Bit Register)
+   -------------------- E1 --------------------------- */
+
+#define RSA4_RS47       0x80
+#define RSA4_RS46       0x40
+#define RSA4_RS45       0x20
+#define RSA4_RS44       0x10
+#define RSA4_RS43       0x08
+#define RSA4_RS42       0x04
+#define RSA4_RS41       0x02
+#define RSA4_RS40       0x01
+
+#define RSA5_RS57       0x80
+#define RSA5_RS56       0x40
+#define RSA5_RS55       0x20
+#define RSA5_RS54       0x10
+#define RSA5_RS53       0x08
+#define RSA5_RS52       0x04
+#define RSA5_RS51       0x02
+#define RSA5_RS50       0x01
+
+#define RSA6_RS67       0x80
+#define RSA6_RS66       0x40
+#define RSA6_RS65       0x20
+#define RSA6_RS64       0x10
+#define RSA6_RS63       0x08
+#define RSA6_RS62       0x04
+#define RSA6_RS61       0x02
+#define RSA6_RS60       0x01
+
+#define RSA7_RS77       0x80
+#define RSA7_RS76       0x40
+#define RSA7_RS75       0x20
+#define RSA7_RS74       0x10
+#define RSA7_RS73       0x08
+#define RSA7_RS72       0x04
+#define RSA7_RS71       0x02
+#define RSA7_RS70       0x01
+
+#define RSA8_RS87       0x80
+#define RSA8_RS86       0x40
+#define RSA8_RS85       0x20
+#define RSA8_RS84       0x10
+#define RSA8_RS83       0x08
+#define RSA8_RS82       0x04
+#define RSA8_RS81       0x02
+#define RSA8_RS80       0x01
+
+/* RSA6S (Receive Sa6 Bit Status Register)
+   ------------------------ T1 ------------------------- */
+
+#define RSA6S_SX        0x20
+#define RSA6S_SF        0x10
+#define RSA6S_SE        0x08
+#define RSA6S_SC        0x04
+#define RSA6S_SA        0x02
+#define RSA6S_S8        0x01
+
+
+/* RDL1-3 Receive DL-Bit Register1-3)
+   ------------------------ T1 ------------------------- */
+
+#define RDL1_RDL17      0x80
+#define RDL1_RDL16      0x40
+#define RDL1_RDL15      0x20
+#define RDL1_RDL14      0x10
+#define RDL1_RDL13      0x08
+#define RDL1_RDL12      0x04
+#define RDL1_RDL11      0x02
+#define RDL1_RDL10      0x01
+
+#define RDL2_RDL27      0x80
+#define RDL2_RDL26      0x40
+#define RDL2_RDL25      0x20
+#define RDL2_RDL24      0x10
+#define RDL2_RDL23      0x08
+#define RDL2_RDL22      0x04
+#define RDL2_RDL21      0x02
+#define RDL2_RDL20      0x01
+
+#define RDL3_RDL37      0x80
+#define RDL3_RDL36      0x40
+#define RDL3_RDL35      0x20
+#define RDL3_RDL34      0x10
+#define RDL3_RDL33      0x08
+#define RDL3_RDL32      0x04
+#define RDL3_RDL31      0x02
+#define RDL3_RDL30      0x01
+
+
+/* SIS (Signaling Status Register)
+
+   -------------------- E1 & T1 -------------------------- */
+
+#define SIS_XDOV        0x80
+#define SIS_XFW         0x40
+#define SIS_XREP        0x20
+#define SIS_RLI         0x08
+#define SIS_CEC         0x04
+#define SIS_BOM         0x01
+
+
+/* RSIS (Receive Signaling Status Register)
+
+   -------------------- E1 & T1 --------------------------- */
+
+#define RSIS_VFR        0x80
+#define RSIS_RDO        0x40
+#define RSIS_CRC16      0x20
+#define RSIS_RAB        0x10
+#define RSIS_HA1        0x08
+#define RSIS_HA0        0x04
+#define RSIS_HFR        0x02
+#define RSIS_LA         0x01
+
+
+/* RBCL/H (Receive Byte Count Low/High)
+
+   ------------------- E1 & T1 ----------------------- */
+
+#define RBCL_RBC7       0x80
+#define RBCL_RBC6       0x40
+#define RBCL_RBC5       0x20
+#define RBCL_RBC4       0x10
+#define RBCL_RBC3       0x08
+#define RBCL_RBC2       0x04
+#define RBCL_RBC1       0x02
+#define RBCL_RBC0       0x01
+
+#define RBCH_OV         0x10
+#define RBCH_RBC11      0x08
+#define RBCH_RBC10      0x04
+#define RBCH_RBC9       0x02
+#define RBCH_RBC8       0x01
+
+
+/* ISR1-3  (Interrupt Status Register 1-3)
+
+   ------------------ E1 & T1 ------------------------------ */
+
+#define  FISR0_RME	0x80
+#define  FISR0_RFS	0x40
+#define  FISR0_T8MS	0x20
+#define  FISR0_ISF	0x20
+#define  FISR0_RMB	0x10
+#define  FISR0_CASC	0x08
+#define  FISR0_RSC	0x08
+#define  FISR0_CRC6	0x04
+#define  FISR0_CRC4	0x04
+#define  FISR0_PDEN	0x02
+#define  FISR0_RPF	0x01
+
+#define  FISR1_CASE	0x80
+#define  FISR1_LLBSC	0x80
+#define  FISR1_RDO	0x40
+#define  FISR1_ALLS	0x20
+#define  FISR1_XDU	0x10
+#define  FISR1_XMB	0x08
+#define  FISR1_XLSC	0x02
+#define  FISR1_XPR	0x01
+
+#define  FISR2_FAR	0x80
+#define  FISR2_LFA	0x40
+#define  FISR2_MFAR	0x20
+#define  FISR2_T400MS	0x10
+#define  FISR2_LMFA	0x10
+#define  FISR2_AIS	0x08
+#define  FISR2_LOS	0x04
+#define  FISR2_RAR	0x02
+#define  FISR2_RA	0x01
+
+#define  FISR3_ES	0x80
+#define  FISR3_SEC	0x40
+#define  FISR3_LMFA16	0x20
+#define  FISR3_AIS16	0x10
+#define  FISR3_RA16	0x08
+#define  FISR3_API	0x04
+#define  FISR3_XSLP	0x20
+#define  FISR3_XSLN	0x10
+#define  FISR3_LLBSC	0x08
+#define  FISR3_XRS	0x04
+#define  FISR3_SLN	0x02
+#define  FISR3_SLP	0x01
+
+
+/* GIS  (Global Interrupt Status Register)
+
+   --------------------- E1 & T1 --------------------- */
+
+#define  GIS_ISR3	0x08
+#define  GIS_ISR2	0x04
+#define  GIS_ISR1	0x02
+#define  GIS_ISR0	0x01
+
+
+/* VSTR  (Version Status Register)
+
+   --------------------- E1 & T1 --------------------- */
+
+#define  VSTR_VN3	0x08
+#define  VSTR_VN2	0x04
+#define  VSTR_VN1	0x02
+#define  VSTR_VN0	0x01
+
+
+/*>>>>>>>>>>>>>>>>>>>>>  Local Control Structures  <<<<<<<<<<<<<<<<<<<<<<<<< */
+
+/* Write-only Registers (E1/T1 control mode write registers) */
+#define XFIFOH	0x00		/* Tx FIFO High Byte */
+#define XFIFOL	0x01		/* Tx FIFO Low Byte */
+#define CMDR	0x02		/* Command Reg */
+#define DEC	0x60		/* Disable Error Counter */
+#define TEST2	0x62		/* Manuf. Test Reg 2 */
+#define XS(nbr)	(0x70 + (nbr))	/* Tx CAS Reg (0 to 15) */
+
+/* Read-write Registers (E1/T1 status mode read registers) */
+#define MODE	0x03	/* Mode Reg */
+#define RAH1	0x04	/* Receive Address High 1 */
+#define RAH2	0x05	/* Receive Address High 2 */
+#define RAL1	0x06	/* Receive Address Low 1 */
+#define RAL2	0x07	/* Receive Address Low 2 */
+#define IPC	0x08	/* Interrupt Port Configuration */
+#define CCR1	0x09	/* Common Configuration Reg 1 */
+#define CCR3	0x0A	/* Common Configuration Reg 3 */
+#define PRE	0x0B	/* Preamble Reg */
+#define RTR1	0x0C	/* Receive Timeslot Reg 1 */
+#define RTR2	0x0D	/* Receive Timeslot Reg 2 */
+#define RTR3	0x0E	/* Receive Timeslot Reg 3 */
+#define RTR4	0x0F	/* Receive Timeslot Reg 4 */
+#define TTR1	0x10	/* Transmit Timeslot Reg 1 */
+#define TTR2	0x11	/* Transmit Timeslot Reg 2 */
+#define TTR3	0x12	/* Transmit Timeslot Reg 3 */
+#define TTR4	0x13	/* Transmit Timeslot Reg 4 */
+#define IMR0	0x14	/* Interrupt Mask Reg 0 */
+#define IMR1	0x15	/* Interrupt Mask Reg 1 */
+#define IMR2	0x16	/* Interrupt Mask Reg 2 */
+#define IMR3	0x17	/* Interrupt Mask Reg 3 */
+#define IMR4	0x18	/* Interrupt Mask Reg 4 */
+#define IMR5	0x19	/* Interrupt Mask Reg 5 */
+#define FMR0	0x1A	/* Framer Mode Reigster 0 */
+#define FMR1	0x1B	/* Framer Mode Reigster 1 */
+#define FMR2	0x1C	/* Framer Mode Reigster 2 */
+#define LOOP	0x1D	/* Channel Loop Back */
+#define XSW	0x1E	/* Transmit Service Word */
+#define FMR4	0x1E	/* Framer Mode Reg 4 */
+#define XSP	0x1F	/* Transmit Spare Bits */
+#define FMR5	0x1F	/* Framer Mode Reg 5 */
+#define XC0	0x20	/* Transmit Control 0 */
+#define XC1	0x21	/* Transmit Control 1 */
+#define RC0	0x22	/* Receive Control 0 */
+#define RC1	0x23	/* Receive Control 1 */
+#define XPM0	0x24	/* Transmit Pulse Mask 0 */
+#define XPM1	0x25	/* Transmit Pulse Mask 1 */
+#define XPM2	0x26	/* Transmit Pulse Mask 2 */
+#define TSWM	0x27	/* Transparent Service Word Mask */
+#define TEST1	0x28	/* Manuf. Test Reg 1 */
+#define IDLE	0x29	/* Idle Channel Code */
+#define XSA4    0x2A	/* Transmit SA4 Bit Reg */
+#define XDL1	0x2A	/* Transmit DL-Bit Reg 2 */
+#define XSA5    0x2B	/* Transmit SA4 Bit Reg */
+#define XDL2	0x2B	/* Transmit DL-Bit Reg 2 */
+#define XSA6    0x2C	/* Transmit SA4 Bit Reg */
+#define XDL3	0x2C	/* Transmit DL-Bit Reg 2 */
+#define XSA7    0x2D	/* Transmit SA4 Bit Reg */
+#define CCB1	0x2D	/* Clear Channel Reg 1 */
+#define XSA8    0x2E	/* Transmit SA4 Bit Reg */
+#define CCB2	0x2E	/* Clear Channel Reg 2 */
+#define FMR3	0x2F	/* Framer Mode Reg. 3 */
+#define CCB3	0x2F	/* Clear Channel Reg 3 */
+#define ICB1	0x30	/* Idle Channel Reg 1 */
+#define ICB2	0x31	/* Idle Channel Reg 2 */
+#define ICB3	0x32	/* Idle Channel Reg 3 */
+#define ICB4	0x33	/* Idle Channel Reg 4 */
+#define LIM0	0x34	/* Line Interface Mode 0 */
+#define LIM1	0x35	/* Line Interface Mode 1 */
+#define PCDR	0x36	/* Pulse Count Detection */
+#define PCRR	0x37	/* Pulse Count Recovery */
+#define LIM2	0x38	/* Line Interface Mode Reg 2 */
+#define LCR1	0x39	/* Loop Code Reg 1 */
+#define LCR2	0x3A	/* Loop Code Reg 2 */
+#define LCR3	0x3B	/* Loop Code Reg 3 */
+#define SIC1	0x3C	/* System Interface Control 1 */
+
+/* Read-only Registers (E1/T1 control mode read registers) */
+#define RFIFOH	0x00		/* Receive FIFO */
+#define RFIFOL	0x01		/* Receive FIFO */
+#define FRS0	0x4C		/* Framer Receive Status 0 */
+#define FRS1	0x4D		/* Framer Receive Status 1 */
+#define RSW	0x4E		/* Receive Service Word */
+#define FRS2	0x4E		/* Framer Receive Status 2 */
+#define RSP	0x4F		/* Receive Spare Bits */
+#define FRS3	0x4F		/* Framer Receive Status 3 */
+#define FECL	0x50		/* Framing Error Counter */
+#define FECH	0x51		/* Framing Error Counter */
+#define CVCL	0x52		/* Code Violation Counter */
+#define CVCH	0x53		/* Code Violation Counter */
+#define CECL	0x54		/* CRC Error Counter 1 */
+#define CECH	0x55		/* CRC Error Counter 1 */
+#define EBCL	0x56		/* E-Bit Error Counter */
+#define EBCH	0x57		/* E-Bit Error Counter */
+#define BECL	0x58		/* Bit Error Counter Low */
+#define BECH	0x59		/* Bit Error Counter Low */
+#define CEC3	0x5A		/* CRC Error Counter 3 (16-bit) */
+#define RSA4	0x5C		/* Receive SA4 Bit Reg */
+#define RDL1	0x5C		/* Receive DL-Bit Reg 1 */
+#define RSA5	0x5D		/* Receive SA5 Bit Reg */
+#define RDL2	0x5D		/* Receive DL-Bit Reg 2 */
+#define RSA6	0x5E		/* Receive SA6 Bit Reg */
+#define RDL3	0x5E		/* Receive DL-Bit Reg 3 */
+#define RSA7	0x5F		/* Receive SA7 Bit Reg */
+#define RSA8	0x60		/* Receive SA8 Bit Reg */
+#define RSA6S	0x61		/* Receive SA6 Bit Status Reg */
+#define TSR0	0x62		/* Manuf. Test Reg 0 */
+#define TSR1	0x63		/* Manuf. Test Reg 1 */
+#define SIS	0x64		/* Signaling Status Reg */
+#define RSIS	0x65		/* Receive Signaling Status Reg */
+#define RBCL	0x66		/* Receive Byte Control */
+#define RBCH	0x67		/* Receive Byte Control */
+#define FISR0	0x68		/* Interrupt Status Reg 0 */
+#define FISR1	0x69		/* Interrupt Status Reg 1 */
+#define FISR2	0x6A		/* Interrupt Status Reg 2 */
+#define FISR3	0x6B		/* Interrupt Status Reg 3 */
+#define GIS	0x6E		/* Global Interrupt Status */
+#define VSTR	0x6F		/* Version Status */
+#define RS(nbr)	(0x70 + (nbr))	/* Rx CAS Reg (0 to 15) */
+
+#endif	/* _FALC_LH_H */
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300.h
new file mode 100644
index 0000000..2e4f84f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300.h
@@ -0,0 +1,436 @@
+/*
+ * pc300.h	Cyclades-PC300(tm) Kernel API Definitions.
+ *
+ * Author:	Ivan Passos <ivan@cyclades.com>
+ *
+ * Copyright:	(c) 1999-2002 Cyclades Corp.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ * $Log: pc300.h,v $
+ * Revision 3.12  2002/03/07 14:17:09  henrique
+ * License data fixed
+ *
+ * Revision 3.11  2002/01/28 21:09:39  daniela
+ * Included ';' after pc300hw.bus.
+ *
+ * Revision 3.10  2002/01/17 17:58:52  ivan
+ * Support for PC300-TE/M (PMC).
+ *
+ * Revision 3.9  2001/09/28 13:30:53  daniela
+ * Renamed dma_start routine to rx_dma_start.
+ *
+ * Revision 3.8  2001/09/24 13:03:45  daniela
+ * Fixed BOF interrupt treatment. Created dma_start routine.
+ *
+ * Revision 3.7  2001/08/10 17:19:58  daniela
+ * Fixed IOCTLs defines.
+ *
+ * Revision 3.6  2001/07/18 19:24:42  daniela
+ * Included kernel version.
+ *
+ * Revision 3.5  2001/07/05 18:38:08  daniela
+ * DMA transmission bug fix.
+ *
+ * Revision 3.4  2001/06/26 17:10:40  daniela
+ * New configuration parameters (line code, CRC calculation and clock).
+ *
+ * Revision 3.3  2001/06/22 13:13:02  regina
+ * MLPPP implementation
+ *
+ * Revision 3.2  2001/06/18 17:56:09  daniela
+ * Increased DEF_MTU and TX_QUEUE_LEN.
+ *
+ * Revision 3.1  2001/06/15 12:41:10  regina
+ * upping major version number
+ *
+ * Revision 1.1.1.1  2001/06/13 20:25:06  daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 2.3 2001/03/05 daniela
+ * Created struct pc300conf, to provide the hardware information to pc300util.
+ * Inclusion of 'alloc_ramsize' field on structure 'pc300hw'.
+ * 
+ * Revision 2.2 2000/12/22 daniela
+ * Structures and defines to support pc300util: statistics, status, 
+ * loopback tests, trace.
+ * 
+ * Revision 2.1 2000/09/28 ivan
+ * Inclusion of 'iophys' and 'iosize' fields on structure 'pc300hw', to 
+ * allow release of I/O region at module unload.
+ * Changed location of include files.
+ *
+ * Revision 2.0 2000/03/27 ivan
+ * Added support for the PC300/TE cards.
+ *
+ * Revision 1.1 2000/01/31 ivan
+ * Replaced 'pc300[drv|sca].h' former PC300 driver include files.
+ *
+ * Revision 1.0 1999/12/16 ivan
+ * First official release.
+ * Inclusion of 'nchan' field on structure 'pc300hw', to allow variable 
+ * number of ports per card.
+ * Inclusion of 'if_ptr' field on structure 'pc300dev'.
+ *
+ * Revision 0.6 1999/11/17 ivan
+ * Changed X.25-specific function names to comply with adopted convention.
+ *
+ * Revision 0.5 1999/11/16 Daniela Squassoni
+ * X.25 support.
+ *
+ * Revision 0.4 1999/11/15 ivan
+ * Inclusion of 'clock' field on structure 'pc300hw'.
+ *
+ * Revision 0.3 1999/11/10 ivan
+ * IOCTL name changing.
+ * Inclusion of driver function prototypes.
+ *
+ * Revision 0.2 1999/11/03 ivan
+ * Inclusion of 'tx_skb' and union 'ifu' on structure 'pc300dev'.
+ *
+ * Revision 0.1 1999/01/15 ivan
+ * Initial version.
+ *
+ */
+
+#ifndef	_PC300_H
+#define	_PC300_H
+
+#include <linux/hdlc.h>
+#include "hd64572.h"
+#include "pc300-falc-lh.h"
+
+#define PC300_PROTO_MLPPP 1
+
+#define	PC300_MAXCHAN	2	/* Number of channels per card */
+
+#define	PC300_RAMSIZE	0x40000 /* RAM window size (256Kb) */
+#define	PC300_FALCSIZE	0x400	/* FALC window size (1Kb) */
+
+#define PC300_OSC_CLOCK	24576000
+#define PC300_PCI_CLOCK	33000000
+
+#define BD_DEF_LEN	0x0800	/* DMA buffer length (2KB) */
+#define DMA_TX_MEMSZ	0x8000	/* Total DMA Tx memory size (32KB/ch) */
+#define DMA_RX_MEMSZ	0x10000	/* Total DMA Rx memory size (64KB/ch) */
+
+#define N_DMA_TX_BUF	(DMA_TX_MEMSZ / BD_DEF_LEN)	/* DMA Tx buffers */
+#define N_DMA_RX_BUF	(DMA_RX_MEMSZ / BD_DEF_LEN)	/* DMA Rx buffers */
+
+/* DMA Buffer Offsets */
+#define DMA_TX_BASE	((N_DMA_TX_BUF + N_DMA_RX_BUF) *	\
+			 PC300_MAXCHAN * sizeof(pcsca_bd_t))
+#define DMA_RX_BASE	(DMA_TX_BASE + PC300_MAXCHAN*DMA_TX_MEMSZ)
+
+/* DMA Descriptor Offsets */
+#define DMA_TX_BD_BASE	0x0000
+#define DMA_RX_BD_BASE	(DMA_TX_BD_BASE + ((PC300_MAXCHAN*DMA_TX_MEMSZ / \
+				BD_DEF_LEN) * sizeof(pcsca_bd_t)))
+
+/* DMA Descriptor Macros */
+#define TX_BD_ADDR(chan, n)	(DMA_TX_BD_BASE + \
+				 ((N_DMA_TX_BUF*chan) + n) * sizeof(pcsca_bd_t))
+#define RX_BD_ADDR(chan, n)	(DMA_RX_BD_BASE + \
+				 ((N_DMA_RX_BUF*chan) + n) * sizeof(pcsca_bd_t))
+
+/* Macro to access the FALC registers (TE only) */
+#define F_REG(reg, chan)	(0x200*(chan) + ((reg)<<2))
+
+/***************************************
+ * Memory access functions/macros      *
+ * (required to support Alpha systems) *
+ ***************************************/
+#define cpc_writeb(port,val)	{writeb((u8)(val),(port)); mb();}
+#define cpc_writew(port,val)	{writew((ushort)(val),(port)); mb();}
+#define cpc_writel(port,val)	{writel((u32)(val),(port)); mb();}
+
+#define cpc_readb(port)		readb(port)
+#define cpc_readw(port)		readw(port)
+#define cpc_readl(port)		readl(port)
+
+/****** Data Structures *****************************************************/
+
+/*
+ *      RUNTIME_9050 - PLX PCI9050-1 local configuration and shared runtime
+ *      registers. This structure can be used to access the 9050 registers
+ *      (memory mapped).
+ */
+struct RUNTIME_9050 {
+	u32 loc_addr_range[4];	/* 00-0Ch : Local Address Ranges */
+	u32 loc_rom_range;	/* 10h : Local ROM Range */
+	u32 loc_addr_base[4];	/* 14-20h : Local Address Base Addrs */
+	u32 loc_rom_base;	/* 24h : Local ROM Base */
+	u32 loc_bus_descr[4];	/* 28-34h : Local Bus Descriptors */
+	u32 rom_bus_descr;	/* 38h : ROM Bus Descriptor */
+	u32 cs_base[4];		/* 3C-48h : Chip Select Base Addrs */
+	u32 intr_ctrl_stat;	/* 4Ch : Interrupt Control/Status */
+	u32 init_ctrl;		/* 50h : EEPROM ctrl, Init Ctrl, etc */
+};
+
+#define PLX_9050_LINT1_ENABLE	0x01
+#define PLX_9050_LINT1_POL	0x02
+#define PLX_9050_LINT1_STATUS	0x04
+#define PLX_9050_LINT2_ENABLE	0x08
+#define PLX_9050_LINT2_POL	0x10
+#define PLX_9050_LINT2_STATUS	0x20
+#define PLX_9050_INTR_ENABLE	0x40
+#define PLX_9050_SW_INTR	0x80
+
+/* Masks to access the init_ctrl PLX register */
+#define	PC300_CLKSEL_MASK		(0x00000004UL)
+#define	PC300_CHMEDIA_MASK(chan)	(0x00000020UL<<(chan*3))
+#define	PC300_CTYPE_MASK		(0x00000800UL)
+
+/* CPLD Registers (base addr = falcbase, TE only) */
+/* CPLD v. 0 */
+#define CPLD_REG1	0x140	/* Chip resets, DCD/CTS status */
+#define CPLD_REG2	0x144	/* Clock enable , LED control */
+/* CPLD v. 2 or higher */
+#define CPLD_V2_REG1	0x100	/* Chip resets, DCD/CTS status */
+#define CPLD_V2_REG2	0x104	/* Clock enable , LED control */
+#define CPLD_ID_REG	0x108	/* CPLD version */
+
+/* CPLD Register bit description: for the FALC bits, they should always be 
+   set based on the channel (use (bit<<(2*ch)) to access the correct bit for 
+   that channel) */
+#define CPLD_REG1_FALC_RESET	0x01
+#define CPLD_REG1_SCA_RESET	0x02
+#define CPLD_REG1_GLOBAL_CLK	0x08
+#define CPLD_REG1_FALC_DCD	0x10
+#define CPLD_REG1_FALC_CTS	0x20
+
+#define CPLD_REG2_FALC_TX_CLK	0x01
+#define CPLD_REG2_FALC_RX_CLK	0x02
+#define CPLD_REG2_FALC_LED1	0x10
+#define CPLD_REG2_FALC_LED2	0x20
+
+/* Structure with FALC-related fields (TE only) */
+#define PC300_FALC_MAXLOOP	0x0000ffff	/* for falc_issue_cmd() */
+
+typedef struct falc {
+	u8 sync;	/* If true FALC is synchronized */
+	u8 active;	/* if TRUE then already active */
+	u8 loop_active;	/* if TRUE a line loopback UP was received */
+	u8 loop_gen;	/* if TRUE a line loopback UP was issued */
+
+	u8 num_channels;
+	u8 offset;	/* 1 for T1, 0 for E1 */
+	u8 full_bandwidth;
+
+	u8 xmb_cause;
+	u8 multiframe_mode;
+
+	/* Statistics */
+	u16 pden;	/* Pulse Density violation count */
+	u16 los;	/* Loss of Signal count */
+	u16 losr;	/* Loss of Signal recovery count */
+	u16 lfa;	/* Loss of frame alignment count */
+	u16 farec;	/* Frame Alignment Recovery count */
+	u16 lmfa;	/* Loss of multiframe alignment count */
+	u16 ais;	/* Remote Alarm indication Signal count */
+	u16 sec;	/* One-second timer */
+	u16 es;		/* Errored second */
+	u16 rai;	/* remote alarm received */
+	u16 bec;
+	u16 fec;
+	u16 cvc;
+	u16 cec;
+	u16 ebc;
+
+	/* Status */
+	u8 red_alarm;
+	u8 blue_alarm;
+	u8 loss_fa;
+	u8 yellow_alarm;
+	u8 loss_mfa;
+	u8 prbs;
+} falc_t;
+
+typedef struct falc_status {
+	u8 sync;	/* If true FALC is synchronized */
+	u8 red_alarm;
+	u8 blue_alarm;
+	u8 loss_fa;
+	u8 yellow_alarm;
+	u8 loss_mfa;
+	u8 prbs;
+} falc_status_t;
+
+typedef struct rsv_x21_status {
+	u8 dcd;
+	u8 dsr;
+	u8 cts;
+	u8 rts;
+	u8 dtr;
+} rsv_x21_status_t;
+
+typedef struct pc300stats {
+	int hw_type;
+	u32 line_on;
+	u32 line_off;
+	struct net_device_stats gen_stats;
+	falc_t te_stats;
+} pc300stats_t;
+
+typedef struct pc300status {
+	int hw_type;
+	rsv_x21_status_t gen_status;
+	falc_status_t te_status;
+} pc300status_t;
+
+typedef struct pc300loopback {
+	char loop_type;
+	char loop_on;
+} pc300loopback_t;
+
+typedef struct pc300patterntst {
+	char patrntst_on;       /* 0 - off; 1 - on; 2 - read num_errors */
+	u16 num_errors;
+} pc300patterntst_t;
+
+typedef struct pc300dev {
+	struct pc300ch *chan;
+	u8 trace_on;
+	u32 line_on;		/* DCD(X.21, RSV) / sync(TE) change counters */
+	u32 line_off;
+	char name[16];
+	struct net_device *dev;
+#ifdef CONFIG_PC300_MLPPP
+	void *cpc_tty;	/* information to PC300 TTY driver */
+#endif
+}pc300dev_t;
+
+typedef struct pc300hw {
+	int type;		/* RSV, X21, etc. */
+	int bus;		/* Bus (PCI, PMC, etc.) */
+	int nchan;		/* number of channels */
+	int irq;		/* interrupt request level */
+	u32 clock;		/* Board clock */
+	u8 cpld_id;		/* CPLD ID (TE only) */
+	u16 cpld_reg1;		/* CPLD reg 1 (TE only) */
+	u16 cpld_reg2;		/* CPLD reg 2 (TE only) */
+	u16 gpioc_reg;		/* PLX GPIOC reg */
+	u16 intctl_reg;		/* PLX Int Ctrl/Status reg */
+	u32 iophys;		/* PLX registers I/O base */
+	u32 iosize;		/* PLX registers I/O size */
+	u32 plxphys;		/* PLX registers MMIO base (physical) */
+	void __iomem * plxbase;	/* PLX registers MMIO base (virtual) */
+	u32 plxsize;		/* PLX registers MMIO size */
+	u32 scaphys;		/* SCA registers MMIO base (physical) */
+	void __iomem * scabase;	/* SCA registers MMIO base (virtual) */
+	u32 scasize;		/* SCA registers MMIO size */
+	u32 ramphys;		/* On-board RAM MMIO base (physical) */
+	void __iomem * rambase;	/* On-board RAM MMIO base (virtual) */
+	u32 alloc_ramsize;	/* RAM MMIO size allocated by the PCI bridge */
+	u32 ramsize;		/* On-board RAM MMIO size */
+	u32 falcphys;		/* FALC registers MMIO base (physical) */
+	void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
+	u32 falcsize;		/* FALC registers MMIO size */
+} pc300hw_t;
+
+typedef struct pc300chconf {
+	sync_serial_settings	phys_settings;	/* Clock type/rate (in bps),
+						   loopback mode */
+	raw_hdlc_proto		proto_settings;	/* Encoding, parity (CRC) */
+	u32 media;		/* HW media (RS232, V.35, etc.) */
+	u32 proto;		/* Protocol (PPP, X.25, etc.) */
+
+	/* TE-specific parameters */
+	u8 lcode;		/* Line Code (AMI, B8ZS, etc.) */
+	u8 fr_mode;		/* Frame Mode (ESF, D4, etc.) */
+	u8 lbo;			/* Line Build Out */
+	u8 rx_sens;		/* Rx Sensitivity (long- or short-haul) */
+	u32 tslot_bitmap;	/* bit[i]=1  =>  timeslot _i_ is active */
+} pc300chconf_t;
+
+typedef struct pc300ch {
+	struct pc300 *card;
+	int channel;
+	pc300dev_t d;
+	pc300chconf_t conf;
+	u8 tx_first_bd;	/* First TX DMA block descr. w/ data */
+	u8 tx_next_bd;	/* Next free TX DMA block descriptor */
+	u8 rx_first_bd;	/* First free RX DMA block descriptor */
+	u8 rx_last_bd;	/* Last free RX DMA block descriptor */
+	u8 nfree_tx_bd;	/* Number of free TX DMA block descriptors */
+	falc_t falc;	/* FALC structure (TE only) */
+} pc300ch_t;
+
+typedef struct pc300 {
+	pc300hw_t hw;			/* hardware config. */
+	pc300ch_t chan[PC300_MAXCHAN];
+	spinlock_t card_lock;
+} pc300_t;
+
+typedef struct pc300conf {
+	pc300hw_t hw;
+	pc300chconf_t conf;
+} pc300conf_t;
+
+/* DEV ioctl() commands */
+#define	N_SPPP_IOCTLS	2
+
+enum pc300_ioctl_cmds {
+	SIOCCPCRESERVED = (SIOCDEVPRIVATE + N_SPPP_IOCTLS),
+	SIOCGPC300CONF,
+	SIOCSPC300CONF,
+	SIOCGPC300STATUS,
+	SIOCGPC300FALCSTATUS,
+	SIOCGPC300UTILSTATS,
+	SIOCGPC300UTILSTATUS,
+	SIOCSPC300TRACE,
+	SIOCSPC300LOOPBACK,
+	SIOCSPC300PATTERNTEST,
+};
+
+/* Loopback types - PC300/TE boards */
+enum pc300_loopback_cmds {
+	PC300LOCLOOP = 1,
+	PC300REMLOOP,
+	PC300PAYLOADLOOP,
+	PC300GENLOOPUP,
+	PC300GENLOOPDOWN,
+};
+
+/* Control Constant Definitions */
+#define	PC300_RSV	0x01
+#define	PC300_X21	0x02
+#define	PC300_TE	0x03
+
+#define	PC300_PCI	0x00
+#define	PC300_PMC	0x01
+
+#define PC300_LC_AMI	0x01
+#define PC300_LC_B8ZS	0x02
+#define PC300_LC_NRZ	0x03
+#define PC300_LC_HDB3	0x04
+
+/* Framing (T1) */
+#define PC300_FR_ESF		0x01
+#define PC300_FR_D4		0x02
+#define PC300_FR_ESF_JAPAN	0x03
+
+/* Framing (E1) */
+#define PC300_FR_MF_CRC4	0x04
+#define PC300_FR_MF_NON_CRC4	0x05
+#define PC300_FR_UNFRAMED	0x06
+
+#define PC300_LBO_0_DB		0x00
+#define PC300_LBO_7_5_DB	0x01
+#define PC300_LBO_15_DB		0x02
+#define PC300_LBO_22_5_DB	0x03
+
+#define PC300_RX_SENS_SH	0x01
+#define PC300_RX_SENS_LH	0x02
+
+#define PC300_TX_TIMEOUT	(2*HZ)
+#define PC300_TX_QUEUE_LEN	100
+#define	PC300_DEF_MTU		1600
+
+/* Function Prototypes */
+int cpc_open(struct net_device *dev);
+
+#endif	/* _PC300_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_drv.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_drv.c
new file mode 100644
index 0000000..cb0f8d9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_drv.c
@@ -0,0 +1,3670 @@
+#define	USE_PCI_CLOCK
+static const char rcsid[] =
+"Revision: 3.4.5 Date: 2002/03/07 ";
+
+/*
+ * pc300.c	Cyclades-PC300(tm) Driver.
+ *
+ * Author:	Ivan Passos <ivan@cyclades.com>
+ * Maintainer:	PC300 Maintainer <pc300@cyclades.com>
+ *
+ * Copyright:	(c) 1999-2003 Cyclades Corp.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *	
+ *	Using tabstop = 4.
+ * 
+ * $Log: pc300_drv.c,v $
+ * Revision 3.23  2002/03/20 13:58:40  henrique
+ * Fixed ortographic mistakes
+ *
+ * Revision 3.22  2002/03/13 16:56:56  henrique
+ * Take out the debug messages
+ *
+ * Revision 3.21  2002/03/07 14:17:09  henrique
+ * License data fixed
+ *
+ * Revision 3.20  2002/01/17 17:58:52  ivan
+ * Support for PC300-TE/M (PMC).
+ *
+ * Revision 3.19  2002/01/03 17:08:47  daniela
+ * Enables DMA reception when the SCA-II disables it improperly.
+ *
+ * Revision 3.18  2001/12/03 18:47:50  daniela
+ * Esthetic changes.
+ *
+ * Revision 3.17  2001/10/19 16:50:13  henrique
+ * Patch to kernel 2.4.12 and new generic hdlc.
+ *
+ * Revision 3.16  2001/10/16 15:12:31  regina
+ * clear statistics
+ *
+ * Revision 3.11 to 3.15  2001/10/11 20:26:04  daniela
+ * More DMA fixes for noisy lines.
+ * Return the size of bad frames in dma_get_rx_frame_size, so that the Rx buffer
+ * descriptors can be cleaned by dma_buf_read (called in cpc_net_rx).
+ * Renamed dma_start routine to rx_dma_start. Improved Rx statistics.
+ * Fixed BOF interrupt treatment. Created dma_start routine.
+ * Changed min and max to cpc_min and cpc_max.
+ *
+ * Revision 3.10  2001/08/06 12:01:51  regina
+ * Fixed problem in DSR_DE bit.
+ *
+ * Revision 3.9  2001/07/18 19:27:26  daniela
+ * Added some history comments.
+ *
+ * Revision 3.8  2001/07/12 13:11:19  regina
+ * bug fix - DCD-OFF in pc300 tty driver
+ *
+ * Revision 3.3 to 3.7  2001/07/06 15:00:20  daniela
+ * Removing kernel 2.4.3 and previous support.
+ * DMA transmission bug fix.
+ * MTU check in cpc_net_rx fixed.
+ * Boot messages reviewed.
+ * New configuration parameters (line code, CRC calculation and clock).
+ *
+ * Revision 3.2 2001/06/22 13:13:02  regina
+ * MLPPP implementation. Changed the header of message trace to include
+ * the device name. New format : "hdlcX[R/T]: ".
+ * Default configuration changed.
+ *
+ * Revision 3.1 2001/06/15 regina
+ * in cpc_queue_xmit, netif_stop_queue is called if don't have free descriptor
+ * upping major version number
+ *
+ * Revision 1.1.1.1  2001/06/13 20:25:04  daniela
+ * PC300 initial CVS version (3.4.0-pre1)
+ *
+ * Revision 3.0.1.2 2001/06/08 daniela
+ * Did some changes in the DMA programming implementation to avoid the 
+ * occurrence of a SCA-II bug when CDA is accessed during a DMA transfer.
+ *
+ * Revision 3.0.1.1 2001/05/02 daniela
+ * Added kernel 2.4.3 support.
+ * 
+ * Revision 3.0.1.0 2001/03/13 daniela, henrique
+ * Added Frame Relay Support.
+ * Driver now uses HDLC generic driver to provide protocol support.
+ * 
+ * Revision 3.0.0.8 2001/03/02 daniela
+ * Fixed ram size detection. 
+ * Changed SIOCGPC300CONF ioctl, to give hw information to pc300util.
+ * 
+ * Revision 3.0.0.7 2001/02/23 daniela
+ * netif_stop_queue called before the SCA-II transmition commands in 
+ * cpc_queue_xmit, and with interrupts disabled to avoid race conditions with 
+ * transmition interrupts.
+ * Fixed falc_check_status for Unframed E1.
+ * 
+ * Revision 3.0.0.6 2000/12/13 daniela
+ * Implemented pc300util support: trace, statistics, status and loopback
+ * tests for the PC300 TE boards.
+ * 
+ * Revision 3.0.0.5 2000/12/12 ivan
+ * Added support for Unframed E1.
+ * Implemented monitor mode.
+ * Fixed DCD sensitivity on the second channel.
+ * Driver now complies with new PCI kernel architecture.
+ *
+ * Revision 3.0.0.4 2000/09/28 ivan
+ * Implemented DCD sensitivity.
+ * Moved hardware-specific open to the end of cpc_open, to avoid race
+ * conditions with early reception interrupts.
+ * Included code for [request|release]_mem_region().
+ * Changed location of pc300.h .
+ * Minor code revision (contrib. of Jeff Garzik).
+ *
+ * Revision 3.0.0.3 2000/07/03 ivan
+ * Previous bugfix for the framing errors with external clock made X21
+ * boards stop working. This version fixes it.
+ *
+ * Revision 3.0.0.2 2000/06/23 ivan
+ * Revisited cpc_queue_xmit to prevent race conditions on Tx DMA buffer
+ * handling when Tx timeouts occur.
+ * Revisited Rx statistics.
+ * Fixed a bug in the SCA-II programming that would cause framing errors
+ * when external clock was configured.
+ *
+ * Revision 3.0.0.1 2000/05/26 ivan
+ * Added logic in the SCA interrupt handler so that no board can monopolize
+ * the driver.
+ * Request PLX I/O region, although driver doesn't use it, to avoid
+ * problems with other drivers accessing it.
+ *
+ * Revision 3.0.0.0 2000/05/15 ivan
+ * Did some changes in the DMA programming implementation to avoid the
+ * occurrence of a SCA-II bug in the second channel.
+ * Implemented workaround for PLX9050 bug that would cause a system lockup
+ * in certain systems, depending on the MMIO addresses allocated to the
+ * board.
+ * Fixed the FALC chip programming to avoid synchronization problems in the
+ * second channel (TE only).
+ * Implemented a cleaner and faster Tx DMA descriptor cleanup procedure in
+ * cpc_queue_xmit().
+ * Changed the built-in driver implementation so that the driver can use the
+ * general 'hdlcN' naming convention instead of proprietary device names.
+ * Driver load messages are now device-centric, instead of board-centric.
+ * Dynamic allocation of net_device structures.
+ * Code is now compliant with the new module interface (module_[init|exit]).
+ * Make use of the PCI helper functions to access PCI resources.
+ *
+ * Revision 2.0.0.0 2000/04/15 ivan
+ * Added support for the PC300/TE boards (T1/FT1/E1/FE1).
+ *
+ * Revision 1.1.0.0 2000/02/28 ivan
+ * Major changes in the driver architecture.
+ * Softnet compliancy implemented.
+ * Driver now reports physical instead of virtual memory addresses.
+ * Added cpc_change_mtu function.
+ *
+ * Revision 1.0.0.0 1999/12/16 ivan
+ * First official release.
+ * Support for 1- and 2-channel boards (which use distinct PCI Device ID's).
+ * Support for monolythic installation (i.e., drv built into the kernel).
+ * X.25 additional checking when lapb_[dis]connect_request returns an error.
+ * SCA programming now covers X.21 as well.
+ *
+ * Revision 0.3.1.0 1999/11/18 ivan
+ * Made X.25 support configuration-dependent (as it depends on external 
+ * modules to work).
+ * Changed X.25-specific function names to comply with adopted convention.
+ * Fixed typos in X.25 functions that would cause compile errors (Daniela).
+ * Fixed bug in ch_config that would disable interrupts on a previously 
+ * enabled channel if the other channel on the same board was enabled later.
+ *
+ * Revision 0.3.0.0 1999/11/16 daniela
+ * X.25 support.
+ *
+ * Revision 0.2.3.0 1999/11/15 ivan
+ * Function cpc_ch_status now provides more detailed information.
+ * Added support for X.21 clock configuration.
+ * Changed TNR1 setting in order to prevent Tx FIFO overaccesses by the SCA.
+ * Now using PCI clock instead of internal oscillator clock for the SCA.
+ *
+ * Revision 0.2.2.0 1999/11/10 ivan
+ * Changed the *_dma_buf_check functions so that they would print only 
+ * the useful info instead of the whole buffer descriptor bank.
+ * Fixed bug in cpc_queue_xmit that would eventually crash the system 
+ * in case of a packet drop.
+ * Implemented TX underrun handling.
+ * Improved SCA fine tuning to boost up its performance.
+ *
+ * Revision 0.2.1.0 1999/11/03 ivan
+ * Added functions *dma_buf_pt_init to allow independent initialization 
+ * of the next-descr. and DMA buffer pointers on the DMA descriptors.
+ * Kernel buffer release and tbusy clearing is now done in the interrupt 
+ * handler.
+ * Fixed bug in cpc_open that would cause an interface reopen to fail.
+ * Added a protocol-specific code section in cpc_net_rx.
+ * Removed printk level defs (they might be added back after the beta phase).
+ *
+ * Revision 0.2.0.0 1999/10/28 ivan
+ * Revisited the code so that new protocols can be easily added / supported. 
+ *
+ * Revision 0.1.0.1 1999/10/20 ivan
+ * Mostly "esthetic" changes.
+ *
+ * Revision 0.1.0.0 1999/10/11 ivan
+ * Initial version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/if.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "pc300.h"
+
+#define	CPC_LOCK(card,flags)		\
+		do {						\
+		spin_lock_irqsave(&card->card_lock, flags);	\
+		} while (0)
+
+#define CPC_UNLOCK(card,flags)			\
+		do {							\
+		spin_unlock_irqrestore(&card->card_lock, flags);	\
+		} while (0)
+
+#undef	PC300_DEBUG_PCI
+#undef	PC300_DEBUG_INTR
+#undef	PC300_DEBUG_TX
+#undef	PC300_DEBUG_RX
+#undef	PC300_DEBUG_OTHER
+
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
+	/* PC300/RSV or PC300/X21, 2 chan */
+	{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
+	/* PC300/RSV or PC300/X21, 1 chan */
+	{0x120e, 0x301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x301},
+	/* PC300/TE, 2 chan */
+	{0x120e, 0x310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x310},
+	/* PC300/TE, 1 chan */
+	{0x120e, 0x311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x311},
+	/* PC300/TE-M, 2 chan */
+	{0x120e, 0x320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x320},
+	/* PC300/TE-M, 1 chan */
+	{0x120e, 0x321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x321},
+	/* End of table */
+	{0,},
+};
+MODULE_DEVICE_TABLE(pci, cpc_pci_dev_id);
+
+#ifndef cpc_min
+#define	cpc_min(a,b)	(((a)<(b))?(a):(b))
+#endif
+#ifndef cpc_max
+#define	cpc_max(a,b)	(((a)>(b))?(a):(b))
+#endif
+
+/* prototypes */
+static void tx_dma_buf_pt_init(pc300_t *, int);
+static void tx_dma_buf_init(pc300_t *, int);
+static void rx_dma_buf_pt_init(pc300_t *, int);
+static void rx_dma_buf_init(pc300_t *, int);
+static void tx_dma_buf_check(pc300_t *, int);
+static void rx_dma_buf_check(pc300_t *, int);
+static irqreturn_t cpc_intr(int, void *);
+static int clock_rate_calc(u32, u32, int *);
+static u32 detect_ram(pc300_t *);
+static void plx_init(pc300_t *);
+static void cpc_trace(struct net_device *, struct sk_buff *, char);
+static int cpc_attach(struct net_device *, unsigned short, unsigned short);
+static int cpc_close(struct net_device *dev);
+
+#ifdef CONFIG_PC300_MLPPP
+void cpc_tty_init(pc300dev_t * dev);
+void cpc_tty_unregister_service(pc300dev_t * pc300dev);
+void cpc_tty_receive(pc300dev_t * pc300dev);
+void cpc_tty_trigger_poll(pc300dev_t * pc300dev);
+#endif
+
+/************************/
+/***   DMA Routines   ***/
+/************************/
+static void tx_dma_buf_pt_init(pc300_t * card, int ch)
+{
+	int i;
+	int ch_factor = ch * N_DMA_TX_BUF;
+	volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+			               + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+	for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
+		cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE +
+			(ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
+		cpc_writel(&ptdescr->ptbuf,
+			   (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
+	}
+}
+
+static void tx_dma_buf_init(pc300_t * card, int ch)
+{
+	int i;
+	int ch_factor = ch * N_DMA_TX_BUF;
+	volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+			       + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+	for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
+		memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
+		cpc_writew(&ptdescr->len, 0);
+		cpc_writeb(&ptdescr->status, DST_OSB);
+	}
+	tx_dma_buf_pt_init(card, ch);
+}
+
+static void rx_dma_buf_pt_init(pc300_t * card, int ch)
+{
+	int i;
+	int ch_factor = ch * N_DMA_RX_BUF;
+	volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+				       + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+	for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
+		cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE +
+			(ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
+		cpc_writel(&ptdescr->ptbuf,
+			   (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
+	}
+}
+
+static void rx_dma_buf_init(pc300_t * card, int ch)
+{
+	int i;
+	int ch_factor = ch * N_DMA_RX_BUF;
+	volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
+				       + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+
+	for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
+		memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
+		cpc_writew(&ptdescr->len, 0);
+		cpc_writeb(&ptdescr->status, 0);
+	}
+	rx_dma_buf_pt_init(card, ch);
+}
+
+static void tx_dma_buf_check(pc300_t * card, int ch)
+{
+	volatile pcsca_bd_t __iomem *ptdescr;
+	int i;
+	u16 first_bd = card->chan[ch].tx_first_bd;
+	u16 next_bd = card->chan[ch].tx_next_bd;
+
+	printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
+	       first_bd, TX_BD_ADDR(ch, first_bd),
+	       next_bd, TX_BD_ADDR(ch, next_bd));
+	for (i = first_bd,
+	     ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, first_bd));
+	     i != ((next_bd + 1) & (N_DMA_TX_BUF - 1));
+	     i = (i + 1) & (N_DMA_TX_BUF - 1), 
+		 ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i))) {
+		printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+		       ch, i, cpc_readl(&ptdescr->next),
+		       cpc_readl(&ptdescr->ptbuf),
+		       cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
+	}
+	printk("\n");
+}
+
+#ifdef	PC300_DEBUG_OTHER
+/* Show all TX buffer descriptors */
+static void tx1_dma_buf_check(pc300_t * card, int ch)
+{
+	volatile pcsca_bd_t __iomem *ptdescr;
+	int i;
+	u16 first_bd = card->chan[ch].tx_first_bd;
+	u16 next_bd = card->chan[ch].tx_next_bd;
+	u32 scabase = card->hw.scabase;
+
+	printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
+	printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
+	       first_bd, TX_BD_ADDR(ch, first_bd),
+	       next_bd, TX_BD_ADDR(ch, next_bd));
+	printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
+	       cpc_readl(scabase + DTX_REG(CDAL, ch)),
+	       cpc_readl(scabase + DTX_REG(EDAL, ch)));
+	for (i = 0; i < N_DMA_TX_BUF; i++) {
+		ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i));
+		printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+		       ch, i, cpc_readl(&ptdescr->next),
+		       cpc_readl(&ptdescr->ptbuf),
+		       cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
+	}
+	printk("\n");
+}
+#endif
+
+static void rx_dma_buf_check(pc300_t * card, int ch)
+{
+	volatile pcsca_bd_t __iomem *ptdescr;
+	int i;
+	u16 first_bd = card->chan[ch].rx_first_bd;
+	u16 last_bd = card->chan[ch].rx_last_bd;
+	int ch_factor;
+
+	ch_factor = ch * N_DMA_RX_BUF;
+	printk("#CH%d: f_bd = %d, l_bd = %d\n", ch, first_bd, last_bd);
+	for (i = 0, ptdescr = (card->hw.rambase +
+					      DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
+	     i < N_DMA_RX_BUF; i++, ptdescr++) {
+		if (cpc_readb(&ptdescr->status) & DST_OSB)
+			printk ("\n CH%d RX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
+				 ch, i, cpc_readl(&ptdescr->next),
+				 cpc_readl(&ptdescr->ptbuf),
+				 cpc_readb(&ptdescr->status),
+				 cpc_readw(&ptdescr->len));
+	}
+	printk("\n");
+}
+
+static int dma_get_rx_frame_size(pc300_t * card, int ch)
+{
+	volatile pcsca_bd_t __iomem *ptdescr;
+	u16 first_bd = card->chan[ch].rx_first_bd;
+	int rcvd = 0;
+	volatile u8 status;
+
+	ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
+	while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+		rcvd += cpc_readw(&ptdescr->len);
+		first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
+		if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
+			/* Return the size of a good frame or incomplete bad frame 
+			* (dma_buf_read will clean the buffer descriptors in this case). */
+			return rcvd;
+		}
+		ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
+	}
+	return -1;
+}
+
+/*
+ * dma_buf_write: writes a frame to the Tx DMA buffers
+ * NOTE: this function writes one frame at a time.
+ */
+static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len)
+{
+	int i, nchar;
+	volatile pcsca_bd_t __iomem *ptdescr;
+	int tosend = len;
+	u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1;
+
+	if (nbuf >= card->chan[ch].nfree_tx_bd) {
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nbuf; i++) {
+		ptdescr = (card->hw.rambase +
+					  TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
+		nchar = cpc_min(BD_DEF_LEN, tosend);
+		if (cpc_readb(&ptdescr->status) & DST_OSB) {
+			memcpy_toio((card->hw.rambase + cpc_readl(&ptdescr->ptbuf)),
+				    &ptdata[len - tosend], nchar);
+			cpc_writew(&ptdescr->len, nchar);
+			card->chan[ch].nfree_tx_bd--;
+			if ((i + 1) == nbuf) {
+				/* This must be the last BD to be used */
+				cpc_writeb(&ptdescr->status, DST_EOM);
+			} else {
+				cpc_writeb(&ptdescr->status, 0);
+			}
+		} else {
+			return -ENOMEM;
+		}
+		tosend -= nchar;
+		card->chan[ch].tx_next_bd =
+			(card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
+	}
+	/* If it gets to here, it means we have sent the whole frame */
+	return 0;
+}
+
+/*
+ * dma_buf_read: reads a frame from the Rx DMA buffers
+ * NOTE: this function reads one frame at a time.
+ */
+static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
+{
+	int nchar;
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	volatile pcsca_bd_t __iomem *ptdescr;
+	int rcvd = 0;
+	volatile u8 status;
+
+	ptdescr = (card->hw.rambase +
+				  RX_BD_ADDR(ch, chan->rx_first_bd));
+	while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+		nchar = cpc_readw(&ptdescr->len);
+		if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT)) ||
+		    (nchar > BD_DEF_LEN)) {
+
+			if (nchar > BD_DEF_LEN)
+				status |= DST_RBIT;
+			rcvd = -status;
+			/* Discard remaining descriptors used by the bad frame */
+			while (chan->rx_first_bd != chan->rx_last_bd) {
+				cpc_writeb(&ptdescr->status, 0);
+				chan->rx_first_bd = (chan->rx_first_bd+1) & (N_DMA_RX_BUF-1);
+				if (status & DST_EOM)
+					break;
+				ptdescr = (card->hw.rambase +
+							  cpc_readl(&ptdescr->next));
+				status = cpc_readb(&ptdescr->status);
+			}
+			break;
+		}
+		if (nchar != 0) {
+			if (skb) {
+				memcpy_fromio(skb_put(skb, nchar),
+				 (card->hw.rambase+cpc_readl(&ptdescr->ptbuf)),nchar);
+			}
+			rcvd += nchar;
+		}
+		cpc_writeb(&ptdescr->status, 0);
+		cpc_writeb(&ptdescr->len, 0);
+		chan->rx_first_bd = (chan->rx_first_bd + 1) & (N_DMA_RX_BUF - 1);
+
+		if (status & DST_EOM)
+			break;
+
+		ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
+	}
+
+	if (rcvd != 0) {
+		/* Update pointer */
+		chan->rx_last_bd = (chan->rx_first_bd - 1) & (N_DMA_RX_BUF - 1);
+		/* Update EDA */
+		cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
+			   RX_BD_ADDR(ch, chan->rx_last_bd));
+	}
+	return rcvd;
+}
+
+static void tx_dma_stop(pc300_t * card, int ch)
+{
+	void __iomem *scabase = card->hw.scabase;
+	u8 drr_ena_bit = 1 << (5 + 2 * ch);
+	u8 drr_rst_bit = 1 << (1 + 2 * ch);
+
+	/* Disable DMA */
+	cpc_writeb(scabase + DRR, drr_ena_bit);
+	cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
+}
+
+static void rx_dma_stop(pc300_t * card, int ch)
+{
+	void __iomem *scabase = card->hw.scabase;
+	u8 drr_ena_bit = 1 << (4 + 2 * ch);
+	u8 drr_rst_bit = 1 << (2 * ch);
+
+	/* Disable DMA */
+	cpc_writeb(scabase + DRR, drr_ena_bit);
+	cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
+}
+
+static void rx_dma_start(pc300_t * card, int ch)
+{
+	void __iomem *scabase = card->hw.scabase;
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	
+	/* Start DMA */
+	cpc_writel(scabase + DRX_REG(CDAL, ch),
+		   RX_BD_ADDR(ch, chan->rx_first_bd));
+	if (cpc_readl(scabase + DRX_REG(CDAL,ch)) !=
+				  RX_BD_ADDR(ch, chan->rx_first_bd)) {
+		cpc_writel(scabase + DRX_REG(CDAL, ch),
+				   RX_BD_ADDR(ch, chan->rx_first_bd));
+	}
+	cpc_writel(scabase + DRX_REG(EDAL, ch),
+		   RX_BD_ADDR(ch, chan->rx_last_bd));
+	cpc_writew(scabase + DRX_REG(BFLL, ch), BD_DEF_LEN);
+	cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
+	if (!(cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+	cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
+	}
+}
+
+/*************************/
+/***   FALC Routines   ***/
+/*************************/
+static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd)
+{
+	void __iomem *falcbase = card->hw.falcbase;
+	unsigned long i = 0;
+
+	while (cpc_readb(falcbase + F_REG(SIS, ch)) & SIS_CEC) {
+		if (i++ >= PC300_FALC_MAXLOOP) {
+			printk("%s: FALC command locked(cmd=0x%x).\n",
+			       card->chan[ch].d.name, cmd);
+			break;
+		}
+	}
+	cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
+}
+
+static void falc_intr_enable(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	/* Interrupt pins are open-drain */
+	cpc_writeb(falcbase + F_REG(IPC, ch),
+		   cpc_readb(falcbase + F_REG(IPC, ch)) & ~IPC_IC0);
+	/* Conters updated each second */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_ECM);
+	/* Enable SEC and ES interrupts  */
+	cpc_writeb(falcbase + F_REG(IMR3, ch),
+		   cpc_readb(falcbase + F_REG(IMR3, ch)) & ~(IMR3_SEC | IMR3_ES));
+	if (conf->fr_mode == PC300_FR_UNFRAMED) {
+		cpc_writeb(falcbase + F_REG(IMR4, ch),
+			   cpc_readb(falcbase + F_REG(IMR4, ch)) & ~(IMR4_LOS));
+	} else {
+		cpc_writeb(falcbase + F_REG(IMR4, ch),
+			   cpc_readb(falcbase + F_REG(IMR4, ch)) &
+			   ~(IMR4_LFA | IMR4_AIS | IMR4_LOS | IMR4_SLIP));
+	}
+	if (conf->media == IF_IFACE_T1) {
+		cpc_writeb(falcbase + F_REG(IMR3, ch),
+			   cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
+	} else {
+		cpc_writeb(falcbase + F_REG(IPC, ch),
+			   cpc_readb(falcbase + F_REG(IPC, ch)) | IPC_SCI);
+		if (conf->fr_mode == PC300_FR_UNFRAMED) {
+			cpc_writeb(falcbase + F_REG(IMR2, ch),
+				   cpc_readb(falcbase + F_REG(IMR2, ch)) & ~(IMR2_LOS));
+		} else {
+			cpc_writeb(falcbase + F_REG(IMR2, ch),
+				   cpc_readb(falcbase + F_REG(IMR2, ch)) &
+				   ~(IMR2_FAR | IMR2_LFA | IMR2_AIS | IMR2_LOS));
+			if (pfalc->multiframe_mode) {
+				cpc_writeb(falcbase + F_REG(IMR2, ch),
+					   cpc_readb(falcbase + F_REG(IMR2, ch)) & 
+					   ~(IMR2_T400MS | IMR2_MFAR));
+			} else {
+				cpc_writeb(falcbase + F_REG(IMR2, ch),
+					   cpc_readb(falcbase + F_REG(IMR2, ch)) | 
+					   IMR2_T400MS | IMR2_MFAR);
+			}
+		}
+	}
+}
+
+static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
+{
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 tshf = card->chan[ch].falc.offset;
+
+	cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
+		   cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & 
+		   	~(0x80 >> ((timeslot - tshf) & 0x07)));
+	cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
+		   cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) | 
+   			(0x80 >> (timeslot & 0x07)));
+	cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
+		   cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) | 
+			(0x80 >> (timeslot & 0x07)));
+}
+
+static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
+{
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 tshf = card->chan[ch].falc.offset;
+
+	cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
+		   cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | 
+		   (0x80 >> ((timeslot - tshf) & 0x07)));
+	cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
+		   cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) & 
+		   ~(0x80 >> (timeslot & 0x07)));
+	cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
+		   cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) & 
+		   ~(0x80 >> (timeslot & 0x07)));
+}
+
+static void falc_close_all_timeslots(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	cpc_writeb(falcbase + F_REG(ICB1, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(TTR1, ch), 0);
+	cpc_writeb(falcbase + F_REG(RTR1, ch), 0);
+	cpc_writeb(falcbase + F_REG(ICB2, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(TTR2, ch), 0);
+	cpc_writeb(falcbase + F_REG(RTR2, ch), 0);
+	cpc_writeb(falcbase + F_REG(ICB3, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(TTR3, ch), 0);
+	cpc_writeb(falcbase + F_REG(RTR3, ch), 0);
+	if (conf->media == IF_IFACE_E1) {
+		cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(TTR4, ch), 0);
+		cpc_writeb(falcbase + F_REG(RTR4, ch), 0);
+	}
+}
+
+static void falc_open_all_timeslots(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	cpc_writeb(falcbase + F_REG(ICB1, ch), 0);
+	if (conf->fr_mode == PC300_FR_UNFRAMED) {
+		cpc_writeb(falcbase + F_REG(TTR1, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(RTR1, ch), 0xff);
+	} else {
+		/* Timeslot 0 is never enabled */
+		cpc_writeb(falcbase + F_REG(TTR1, ch), 0x7f);
+		cpc_writeb(falcbase + F_REG(RTR1, ch), 0x7f);
+	}
+	cpc_writeb(falcbase + F_REG(ICB2, ch), 0);
+	cpc_writeb(falcbase + F_REG(TTR2, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(RTR2, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(ICB3, ch), 0);
+	cpc_writeb(falcbase + F_REG(TTR3, ch), 0xff);
+	cpc_writeb(falcbase + F_REG(RTR3, ch), 0xff);
+	if (conf->media == IF_IFACE_E1) {
+		cpc_writeb(falcbase + F_REG(ICB4, ch), 0);
+		cpc_writeb(falcbase + F_REG(TTR4, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(RTR4, ch), 0xff);
+	} else {
+		cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(TTR4, ch), 0x80);
+		cpc_writeb(falcbase + F_REG(RTR4, ch), 0x80);
+	}
+}
+
+static void falc_init_timeslot(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	int tslot;
+
+	for (tslot = 0; tslot < pfalc->num_channels; tslot++) {
+		if (conf->tslot_bitmap & (1 << tslot)) {
+			// Channel enabled
+			falc_open_timeslot(card, ch, tslot + 1);
+		} else {
+			// Channel disabled
+			falc_close_timeslot(card, ch, tslot + 1);
+		}
+	}
+}
+
+static void falc_enable_comm(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+
+	if (pfalc->full_bandwidth) {
+		falc_open_all_timeslots(card, ch);
+	} else {
+		falc_init_timeslot(card, ch);
+	}
+	// CTS/DCD ON
+	cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+		   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+		   ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
+}
+
+static void falc_disable_comm(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+
+	if (pfalc->loop_active != 2) {
+		falc_close_all_timeslots(card, ch);
+	}
+	// CTS/DCD OFF
+	cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+		   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+		   ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
+}
+
+static void falc_init_t1(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+
+	/* Switch to T1 mode (PCM 24) */
+	cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
+
+	/* Wait 20 us for setup */
+	udelay(20);
+
+	/* Transmit Buffer Size (1 frame) */
+	cpc_writeb(falcbase + F_REG(SIC1, ch), SIC1_XBS0);
+
+	/* Clock mode */
+	if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
+	} else { /* Slave mode */
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
+		cpc_writeb(falcbase + F_REG(LOOP, ch),
+			   cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_RTM);
+	}
+
+	cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
+	cpc_writeb(falcbase + F_REG(FMR0, ch),
+		   cpc_readb(falcbase + F_REG(FMR0, ch)) &
+		   ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
+
+	switch (conf->lcode) {
+		case PC300_LC_AMI:
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) |
+				   FMR0_XC1 | FMR0_RC1);
+			/* Clear Channel register to ON for all channels */
+			cpc_writeb(falcbase + F_REG(CCB1, ch), 0xff);
+			cpc_writeb(falcbase + F_REG(CCB2, ch), 0xff);
+			cpc_writeb(falcbase + F_REG(CCB3, ch), 0xff);
+			break;
+
+		case PC300_LC_B8ZS:
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) |
+				   FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
+			break;
+
+		case PC300_LC_NRZ:
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) | 0x00);
+			break;
+	}
+
+	cpc_writeb(falcbase + F_REG(LIM0, ch),
+		   cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_ELOS);
+	cpc_writeb(falcbase + F_REG(LIM0, ch),
+		   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
+	/* Set interface mode to 2 MBPS */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
+
+	switch (conf->fr_mode) {
+		case PC300_FR_ESF:
+			pfalc->multiframe_mode = 0;
+			cpc_writeb(falcbase + F_REG(FMR4, ch),
+				   cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_FM1);
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) | 
+				   FMR1_CRC | FMR1_EDL);
+			cpc_writeb(falcbase + F_REG(XDL1, ch), 0);
+			cpc_writeb(falcbase + F_REG(XDL2, ch), 0);
+			cpc_writeb(falcbase + F_REG(XDL3, ch), 0);
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) & ~FMR0_SRAF);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2,ch)) | FMR2_MCSP | FMR2_SSP);
+			break;
+
+		case PC300_FR_D4:
+			pfalc->multiframe_mode = 1;
+			cpc_writeb(falcbase + F_REG(FMR4, ch),
+				   cpc_readb(falcbase + F_REG(FMR4, ch)) &
+				   ~(FMR4_FM1 | FMR4_FM0));
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) | FMR0_SRAF);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_SSP);
+			break;
+	}
+
+	/* Enable Automatic Resynchronization */
+	cpc_writeb(falcbase + F_REG(FMR4, ch),
+		   cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_AUTO);
+
+	/* Transmit Automatic Remote Alarm */
+	cpc_writeb(falcbase + F_REG(FMR2, ch),
+		   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+	/* Channel translation mode 1 : one to one */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_CTM);
+
+	/* No signaling */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_SIGM);
+	cpc_writeb(falcbase + F_REG(FMR5, ch),
+		   cpc_readb(falcbase + F_REG(FMR5, ch)) &
+		   ~(FMR5_EIBR | FMR5_SRS));
+	cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
+
+	cpc_writeb(falcbase + F_REG(LIM1, ch),
+		   cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
+
+	switch (conf->lbo) {
+			/* Provides proper Line Build Out */
+		case PC300_LBO_0_DB:
+			cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
+			cpc_writeb(falcbase + F_REG(XPM0, ch), 0x5a);
+			cpc_writeb(falcbase + F_REG(XPM1, ch), 0x8f);
+			cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+			break;
+		case PC300_LBO_7_5_DB:
+			cpc_writeb(falcbase + F_REG(LIM2, ch), (0x40 | LIM2_LOS1 | dja));
+			cpc_writeb(falcbase + F_REG(XPM0, ch), 0x11);
+			cpc_writeb(falcbase + F_REG(XPM1, ch), 0x02);
+			cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+			break;
+		case PC300_LBO_15_DB:
+			cpc_writeb(falcbase + F_REG(LIM2, ch), (0x80 | LIM2_LOS1 | dja));
+			cpc_writeb(falcbase + F_REG(XPM0, ch), 0x8e);
+			cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
+			cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+			break;
+		case PC300_LBO_22_5_DB:
+			cpc_writeb(falcbase + F_REG(LIM2, ch), (0xc0 | LIM2_LOS1 | dja));
+			cpc_writeb(falcbase + F_REG(XPM0, ch), 0x09);
+			cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
+			cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
+			break;
+	}
+
+	/* Transmit Clock-Slot Offset */
+	cpc_writeb(falcbase + F_REG(XC0, ch),
+		   cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
+	/* Transmit Time-slot Offset */
+	cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
+	/* Receive  Clock-Slot offset */
+	cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
+	/* Receive  Time-slot offset */
+	cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
+
+	/* LOS Detection after 176 consecutive 0s */
+	cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
+	/* LOS Recovery after 22 ones in the time window of PCD */
+	cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
+
+	cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
+
+	if (conf->fr_mode == PC300_FR_ESF_JAPAN) {
+		cpc_writeb(falcbase + F_REG(RC1, ch),
+			   cpc_readb(falcbase + F_REG(RC1, ch)) | 0x80);
+	}
+
+	falc_close_all_timeslots(card, ch);
+}
+
+static void falc_init_e1(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
+
+	/* Switch to E1 mode (PCM 30) */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_PMOD);
+
+	/* Clock mode */
+	if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
+	} else { /* Slave mode */
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
+	}
+	cpc_writeb(falcbase + F_REG(LOOP, ch),
+		   cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_SFM);
+
+	cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
+	cpc_writeb(falcbase + F_REG(FMR0, ch),
+		   cpc_readb(falcbase + F_REG(FMR0, ch)) &
+		   ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
+
+	switch (conf->lcode) {
+		case PC300_LC_AMI:
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) |
+				   FMR0_XC1 | FMR0_RC1);
+			break;
+
+		case PC300_LC_HDB3:
+			cpc_writeb(falcbase + F_REG(FMR0, ch),
+				   cpc_readb(falcbase + F_REG(FMR0, ch)) |
+				   FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
+			break;
+
+		case PC300_LC_NRZ:
+			break;
+	}
+
+	cpc_writeb(falcbase + F_REG(LIM0, ch),
+		   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
+	/* Set interface mode to 2 MBPS */
+	cpc_writeb(falcbase + F_REG(FMR1, ch),
+		   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
+
+	cpc_writeb(falcbase + F_REG(XPM0, ch), 0x18);
+	cpc_writeb(falcbase + F_REG(XPM1, ch), 0x03);
+	cpc_writeb(falcbase + F_REG(XPM2, ch), 0x00);
+
+	switch (conf->fr_mode) {
+		case PC300_FR_MF_CRC4:
+			pfalc->multiframe_mode = 1;
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_XFS);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_RFS1);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_RFS0);
+			cpc_writeb(falcbase + F_REG(FMR3, ch),
+				   cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_EXTIW);
+
+			/* MultiFrame Resynchronization */
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_MFCS);
+
+			/* Automatic Loss of Multiframe > 914 CRC errors */
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_ALMF);
+
+			/* S1 and SI1/SI2 spare Bits set to 1 */
+			cpc_writeb(falcbase + F_REG(XSP, ch),
+				   cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_AXS);
+			cpc_writeb(falcbase + F_REG(XSP, ch),
+				   cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_EBP);
+			cpc_writeb(falcbase + F_REG(XSP, ch),
+				   cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XS13 | XSP_XS15);
+
+			/* Automatic Force Resynchronization */
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
+
+			/* Transmit Automatic Remote Alarm */
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+			/* Transmit Spare Bits for National Use (Y, Sn, Sa) */
+			cpc_writeb(falcbase + F_REG(XSW, ch),
+				   cpc_readb(falcbase + F_REG(XSW, ch)) |
+				   XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
+			break;
+
+		case PC300_FR_MF_NON_CRC4:
+		case PC300_FR_D4:
+			pfalc->multiframe_mode = 0;
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) & 
+				   ~(FMR2_RFS1 | FMR2_RFS0));
+			cpc_writeb(falcbase + F_REG(XSW, ch),
+				   cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XSIS);
+			cpc_writeb(falcbase + F_REG(XSP, ch),
+				   cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XSIF);
+
+			/* Automatic Force Resynchronization */
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
+
+			/* Transmit Automatic Remote Alarm */
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
+
+			/* Transmit Spare Bits for National Use (Y, Sn, Sa) */
+			cpc_writeb(falcbase + F_REG(XSW, ch),
+				   cpc_readb(falcbase + F_REG(XSW, ch)) |
+				   XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
+			break;
+
+		case PC300_FR_UNFRAMED:
+			pfalc->multiframe_mode = 0;
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) & 
+				   ~(FMR2_RFS1 | FMR2_RFS0));
+			cpc_writeb(falcbase + F_REG(XSP, ch),
+				   cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_TT0);
+			cpc_writeb(falcbase + F_REG(XSW, ch),
+				   cpc_readb(falcbase + F_REG(XSW, ch)) & 
+				   ~(XSW_XTM|XSW_XY0|XSW_XY1|XSW_XY2|XSW_XY3|XSW_XY4));
+			cpc_writeb(falcbase + F_REG(TSWM, ch), 0xff);
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) |
+				   (FMR2_RTM | FMR2_DAIS));
+			cpc_writeb(falcbase + F_REG(FMR2, ch),
+				   cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_AXRA);
+			cpc_writeb(falcbase + F_REG(FMR1, ch),
+				   cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_AFR);
+			pfalc->sync = 1;
+			cpc_writeb(falcbase + card->hw.cpld_reg2,
+				   cpc_readb(falcbase + card->hw.cpld_reg2) |
+				   (CPLD_REG2_FALC_LED2 << (2 * ch)));
+			break;
+	}
+
+	/* No signaling */
+	cpc_writeb(falcbase + F_REG(XSP, ch),
+		   cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_CASEN);
+	cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
+
+	cpc_writeb(falcbase + F_REG(LIM1, ch),
+		   cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
+	cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
+
+	/* Transmit Clock-Slot Offset */
+	cpc_writeb(falcbase + F_REG(XC0, ch),
+		   cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
+	/* Transmit Time-slot Offset */
+	cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
+	/* Receive  Clock-Slot offset */
+	cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
+	/* Receive  Time-slot offset */
+	cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
+
+	/* LOS Detection after 176 consecutive 0s */
+	cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
+	/* LOS Recovery after 22 ones in the time window of PCD */
+	cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
+
+	cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
+
+	falc_close_all_timeslots(card, ch);
+}
+
+static void falc_init_hdlc(pc300_t * card, int ch)
+{
+	void __iomem *falcbase = card->hw.falcbase;
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+
+	/* Enable transparent data transfer */
+	if (conf->fr_mode == PC300_FR_UNFRAMED) {
+		cpc_writeb(falcbase + F_REG(MODE, ch), 0);
+	} else {
+		cpc_writeb(falcbase + F_REG(MODE, ch),
+			   cpc_readb(falcbase + F_REG(MODE, ch)) |
+			   (MODE_HRAC | MODE_MDS2));
+		cpc_writeb(falcbase + F_REG(RAH2, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(RAH1, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(RAL2, ch), 0xff);
+		cpc_writeb(falcbase + F_REG(RAL1, ch), 0xff);
+	}
+
+	/* Tx/Rx reset  */
+	falc_issue_cmd(card, ch, CMDR_RRES | CMDR_XRES | CMDR_SRES);
+
+	/* Enable interrupt sources */
+	falc_intr_enable(card, ch);
+}
+
+static void te_config(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 dummy;
+	unsigned long flags;
+
+	memset(pfalc, 0, sizeof(falc_t));
+	switch (conf->media) {
+		case IF_IFACE_T1:
+			pfalc->num_channels = NUM_OF_T1_CHANNELS;
+			pfalc->offset = 1;
+			break;
+		case IF_IFACE_E1:
+			pfalc->num_channels = NUM_OF_E1_CHANNELS;
+			pfalc->offset = 0;
+			break;
+	}
+	if (conf->tslot_bitmap == 0xffffffffUL)
+		pfalc->full_bandwidth = 1;
+	else
+		pfalc->full_bandwidth = 0;
+
+	CPC_LOCK(card, flags);
+	/* Reset the FALC chip */
+	cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+		   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+		   (CPLD_REG1_FALC_RESET << (2 * ch)));
+	udelay(10000);
+	cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+		   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+		   ~(CPLD_REG1_FALC_RESET << (2 * ch)));
+
+	if (conf->media == IF_IFACE_T1) {
+		falc_init_t1(card, ch);
+	} else {
+		falc_init_e1(card, ch);
+	}
+	falc_init_hdlc(card, ch);
+	if (conf->rx_sens == PC300_RX_SENS_SH) {
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_EQON);
+	} else {
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_EQON);
+	}
+	cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+		   cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+		   ((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK) << (2 * ch)));
+
+	/* Clear all interrupt registers */
+	dummy = cpc_readb(falcbase + F_REG(FISR0, ch)) +
+		cpc_readb(falcbase + F_REG(FISR1, ch)) +
+		cpc_readb(falcbase + F_REG(FISR2, ch)) +
+		cpc_readb(falcbase + F_REG(FISR3, ch));
+	CPC_UNLOCK(card, flags);
+}
+
+static void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	/* Verify LOS */
+	if (frs0 & FRS0_LOS) {
+		if (!pfalc->red_alarm) {
+			pfalc->red_alarm = 1;
+			pfalc->los++;
+			if (!pfalc->blue_alarm) {
+				// EVENT_FALC_ABNORMAL
+				if (conf->media == IF_IFACE_T1) {
+					/* Disable this interrupt as it may otherwise interfere 
+					 * with other working boards. */
+					cpc_writeb(falcbase + F_REG(IMR0, ch), 
+						   cpc_readb(falcbase + F_REG(IMR0, ch))
+						   | IMR0_PDEN);
+				}
+				falc_disable_comm(card, ch);
+				// EVENT_FALC_ABNORMAL
+			}
+		}
+	} else {
+		if (pfalc->red_alarm) {
+			pfalc->red_alarm = 0;
+			pfalc->losr++;
+		}
+	}
+
+	if (conf->fr_mode != PC300_FR_UNFRAMED) {
+		/* Verify AIS alarm */
+		if (frs0 & FRS0_AIS) {
+			if (!pfalc->blue_alarm) {
+				pfalc->blue_alarm = 1;
+				pfalc->ais++;
+				// EVENT_AIS
+				if (conf->media == IF_IFACE_T1) {
+					/* Disable this interrupt as it may otherwise interfere with                       other working boards. */
+					cpc_writeb(falcbase + F_REG(IMR0, ch),
+						   cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+				}
+				falc_disable_comm(card, ch);
+				// EVENT_AIS
+			}
+		} else {
+			pfalc->blue_alarm = 0;
+		}
+
+		/* Verify LFA */
+		if (frs0 & FRS0_LFA) {
+			if (!pfalc->loss_fa) {
+				pfalc->loss_fa = 1;
+				pfalc->lfa++;
+				if (!pfalc->blue_alarm && !pfalc->red_alarm) {
+					// EVENT_FALC_ABNORMAL
+					if (conf->media == IF_IFACE_T1) {
+						/* Disable this interrupt as it may otherwise 
+						 * interfere with other working boards. */
+						cpc_writeb(falcbase + F_REG(IMR0, ch),
+							   cpc_readb(falcbase + F_REG(IMR0, ch))
+							   | IMR0_PDEN);
+					}
+					falc_disable_comm(card, ch);
+					// EVENT_FALC_ABNORMAL
+				}
+			}
+		} else {
+			if (pfalc->loss_fa) {
+				pfalc->loss_fa = 0;
+				pfalc->farec++;
+			}
+		}
+
+		/* Verify LMFA */
+		if (pfalc->multiframe_mode && (frs0 & FRS0_LMFA)) {
+			/* D4 or CRC4 frame mode */
+			if (!pfalc->loss_mfa) {
+				pfalc->loss_mfa = 1;
+				pfalc->lmfa++;
+				if (!pfalc->blue_alarm && !pfalc->red_alarm &&
+				    !pfalc->loss_fa) {
+					// EVENT_FALC_ABNORMAL
+					if (conf->media == IF_IFACE_T1) {
+						/* Disable this interrupt as it may otherwise 
+						 * interfere with other working boards. */
+						cpc_writeb(falcbase + F_REG(IMR0, ch),
+							   cpc_readb(falcbase + F_REG(IMR0, ch))
+							   | IMR0_PDEN);
+					}
+					falc_disable_comm(card, ch);
+					// EVENT_FALC_ABNORMAL
+				}
+			}
+		} else {
+			pfalc->loss_mfa = 0;
+		}
+
+		/* Verify Remote Alarm */
+		if (frs0 & FRS0_RRA) {
+			if (!pfalc->yellow_alarm) {
+				pfalc->yellow_alarm = 1;
+				pfalc->rai++;
+				if (pfalc->sync) {
+					// EVENT_RAI
+					falc_disable_comm(card, ch);
+					// EVENT_RAI
+				}
+			}
+		} else {
+			pfalc->yellow_alarm = 0;
+		}
+	} /* if !PC300_UNFRAMED */
+
+	if (pfalc->red_alarm || pfalc->loss_fa ||
+	    pfalc->loss_mfa || pfalc->blue_alarm) {
+		if (pfalc->sync) {
+			pfalc->sync = 0;
+			chan->d.line_off++;
+			cpc_writeb(falcbase + card->hw.cpld_reg2,
+				   cpc_readb(falcbase + card->hw.cpld_reg2) &
+				   ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+		}
+	} else {
+		if (!pfalc->sync) {
+			pfalc->sync = 1;
+			chan->d.line_on++;
+			cpc_writeb(falcbase + card->hw.cpld_reg2,
+				   cpc_readb(falcbase + card->hw.cpld_reg2) |
+				   (CPLD_REG2_FALC_LED2 << (2 * ch)));
+		}
+	}
+
+	if (pfalc->sync && !pfalc->yellow_alarm) {
+		if (!pfalc->active) {
+			// EVENT_FALC_NORMAL
+			if (pfalc->loop_active) {
+				return;
+			}
+			if (conf->media == IF_IFACE_T1) {
+				cpc_writeb(falcbase + F_REG(IMR0, ch),
+					   cpc_readb(falcbase + F_REG(IMR0, ch)) & ~IMR0_PDEN);
+			}
+			falc_enable_comm(card, ch);
+			// EVENT_FALC_NORMAL
+			pfalc->active = 1;
+		}
+	} else {
+		if (pfalc->active) {
+			pfalc->active = 0;
+		}
+	}
+}
+
+static void falc_update_stats(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u16 counter;
+
+	counter = cpc_readb(falcbase + F_REG(FECL, ch));
+	counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
+	pfalc->fec += counter;
+
+	counter = cpc_readb(falcbase + F_REG(CVCL, ch));
+	counter |= cpc_readb(falcbase + F_REG(CVCH, ch)) << 8;
+	pfalc->cvc += counter;
+
+	counter = cpc_readb(falcbase + F_REG(CECL, ch));
+	counter |= cpc_readb(falcbase + F_REG(CECH, ch)) << 8;
+	pfalc->cec += counter;
+
+	counter = cpc_readb(falcbase + F_REG(EBCL, ch));
+	counter |= cpc_readb(falcbase + F_REG(EBCH, ch)) << 8;
+	pfalc->ebc += counter;
+
+	if (cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) {
+		mdelay(10);
+		counter = cpc_readb(falcbase + F_REG(BECL, ch));
+		counter |= cpc_readb(falcbase + F_REG(BECH, ch)) << 8;
+		pfalc->bec += counter;
+
+		if (((conf->media == IF_IFACE_T1) &&
+		     (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) &&
+		     (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN))) ||
+		    ((conf->media == IF_IFACE_E1) &&
+		     (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) {
+			pfalc->prbs = 2;
+		} else {
+			pfalc->prbs = 1;
+		}
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * falc_remote_loop
+ *----------------------------------------------------------------------------
+ * Description:	In the remote loopback mode the clock and data recovered
+ *		from the line inputs RL1/2 or RDIP/RDIN are routed back
+ *		to the line outputs XL1/2 or XDOP/XDON via the analog
+ *		transmitter. As in normal mode they are processed by
+ *		the synchronizer and then sent to the system interface.
+ *----------------------------------------------------------------------------
+ */
+static void falc_remote_loop(pc300_t * card, int ch, int loop_on)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (loop_on) {
+		// EVENT_FALC_ABNORMAL
+		if (conf->media == IF_IFACE_T1) {
+			/* Disable this interrupt as it may otherwise interfere with 
+			 * other working boards. */
+			cpc_writeb(falcbase + F_REG(IMR0, ch),
+				   cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+		}
+		falc_disable_comm(card, ch);
+		// EVENT_FALC_ABNORMAL
+		cpc_writeb(falcbase + F_REG(LIM1, ch),
+			   cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RL);
+		pfalc->loop_active = 1;
+	} else {
+		cpc_writeb(falcbase + F_REG(LIM1, ch),
+			   cpc_readb(falcbase + F_REG(LIM1, ch)) & ~LIM1_RL);
+		pfalc->sync = 0;
+		cpc_writeb(falcbase + card->hw.cpld_reg2,
+			   cpc_readb(falcbase + card->hw.cpld_reg2) &
+			   ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+		pfalc->active = 0;
+		falc_issue_cmd(card, ch, CMDR_XRES);
+		pfalc->loop_active = 0;
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * falc_local_loop
+ *----------------------------------------------------------------------------
+ * Description: The local loopback mode disconnects the receive lines 
+ *		RL1/RL2 resp. RDIP/RDIN from the receiver. Instead of the
+ *		signals coming from the line the data provided by system
+ *		interface are routed through the analog receiver back to
+ *		the system interface. The unipolar bit stream will be
+ *		undisturbed transmitted on the line. Receiver and transmitter
+ *		coding must be identical.
+ *----------------------------------------------------------------------------
+ */
+static void falc_local_loop(pc300_t * card, int ch, int loop_on)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (loop_on) {
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_LL);
+		pfalc->loop_active = 1;
+	} else {
+		cpc_writeb(falcbase + F_REG(LIM0, ch),
+			   cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_LL);
+		pfalc->loop_active = 0;
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * falc_payload_loop
+ *----------------------------------------------------------------------------
+ * Description: This routine allows to enable/disable payload loopback.
+ *		When the payload loop is activated, the received 192 bits
+ *		of payload data will be looped back to the transmit
+ *		direction. The framing bits, CRC6 and DL bits are not 
+ *		looped. They are originated by the FALC-LH transmitter.
+ *----------------------------------------------------------------------------
+ */
+static void falc_payload_loop(pc300_t * card, int ch, int loop_on)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (loop_on) {
+		// EVENT_FALC_ABNORMAL
+		if (conf->media == IF_IFACE_T1) {
+			/* Disable this interrupt as it may otherwise interfere with 
+			 * other working boards. */
+			cpc_writeb(falcbase + F_REG(IMR0, ch),
+				   cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+		}
+		falc_disable_comm(card, ch);
+		// EVENT_FALC_ABNORMAL
+		cpc_writeb(falcbase + F_REG(FMR2, ch),
+			   cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_PLB);
+		if (conf->media == IF_IFACE_T1) {
+			cpc_writeb(falcbase + F_REG(FMR4, ch),
+				   cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_TM);
+		} else {
+			cpc_writeb(falcbase + F_REG(FMR5, ch),
+				   cpc_readb(falcbase + F_REG(FMR5, ch)) | XSP_TT0);
+		}
+		falc_open_all_timeslots(card, ch);
+		pfalc->loop_active = 2;
+	} else {
+		cpc_writeb(falcbase + F_REG(FMR2, ch),
+			   cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_PLB);
+		if (conf->media == IF_IFACE_T1) {
+			cpc_writeb(falcbase + F_REG(FMR4, ch),
+				   cpc_readb(falcbase + F_REG(FMR4, ch)) & ~FMR4_TM);
+		} else {
+			cpc_writeb(falcbase + F_REG(FMR5, ch),
+				   cpc_readb(falcbase + F_REG(FMR5, ch)) & ~XSP_TT0);
+		}
+		pfalc->sync = 0;
+		cpc_writeb(falcbase + card->hw.cpld_reg2,
+			   cpc_readb(falcbase + card->hw.cpld_reg2) &
+			   ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+		pfalc->active = 0;
+		falc_issue_cmd(card, ch, CMDR_XRES);
+		pfalc->loop_active = 0;
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * turn_off_xlu
+ *----------------------------------------------------------------------------
+ * Description:	Turns XLU bit off in the proper register
+ *----------------------------------------------------------------------------
+ */
+static void turn_off_xlu(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (conf->media == IF_IFACE_T1) {
+		cpc_writeb(falcbase + F_REG(FMR5, ch),
+			   cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLU);
+	} else {
+		cpc_writeb(falcbase + F_REG(FMR3, ch),
+			   cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLU);
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * turn_off_xld
+ *----------------------------------------------------------------------------
+ * Description: Turns XLD bit off in the proper register
+ *----------------------------------------------------------------------------
+ */
+static void turn_off_xld(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (conf->media == IF_IFACE_T1) {
+		cpc_writeb(falcbase + F_REG(FMR5, ch),
+			   cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLD);
+	} else {
+		cpc_writeb(falcbase + F_REG(FMR3, ch),
+			   cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLD);
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * falc_generate_loop_up_code
+ *----------------------------------------------------------------------------
+ * Description:	This routine writes the proper FALC chip register in order
+ *		to generate a LOOP activation code over a T1/E1 line.
+ *----------------------------------------------------------------------------
+ */
+static void falc_generate_loop_up_code(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (conf->media == IF_IFACE_T1) {
+		cpc_writeb(falcbase + F_REG(FMR5, ch),
+			   cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLU);
+	} else {
+		cpc_writeb(falcbase + F_REG(FMR3, ch),
+			   cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLU);
+	}
+	// EVENT_FALC_ABNORMAL
+	if (conf->media == IF_IFACE_T1) {
+		/* Disable this interrupt as it may otherwise interfere with 
+		 * other working boards. */
+		cpc_writeb(falcbase + F_REG(IMR0, ch),
+			   cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
+	}
+	falc_disable_comm(card, ch);
+	// EVENT_FALC_ABNORMAL
+	pfalc->loop_gen = 1;
+}
+
+/*----------------------------------------------------------------------------
+ * falc_generate_loop_down_code
+ *----------------------------------------------------------------------------
+ * Description:	This routine writes the proper FALC chip register in order
+ *		to generate a LOOP deactivation code over a T1/E1 line.
+ *----------------------------------------------------------------------------
+ */
+static void falc_generate_loop_down_code(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (conf->media == IF_IFACE_T1) {
+		cpc_writeb(falcbase + F_REG(FMR5, ch),
+			   cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLD);
+	} else {
+		cpc_writeb(falcbase + F_REG(FMR3, ch),
+			   cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLD);
+	}
+	pfalc->sync = 0;
+	cpc_writeb(falcbase + card->hw.cpld_reg2,
+		   cpc_readb(falcbase + card->hw.cpld_reg2) &
+		   ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
+	pfalc->active = 0;
+//?    falc_issue_cmd(card, ch, CMDR_XRES);
+	pfalc->loop_gen = 0;
+}
+
+/*----------------------------------------------------------------------------
+ * falc_pattern_test
+ *----------------------------------------------------------------------------
+ * Description:	This routine generates a pattern code and checks
+ *		it on the reception side.
+ *----------------------------------------------------------------------------
+ */
+static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (activate) {
+		pfalc->prbs = 1;
+		pfalc->bec = 0;
+		if (conf->media == IF_IFACE_T1) {
+			/* Disable local loop activation/deactivation detect */
+			cpc_writeb(falcbase + F_REG(IMR3, ch),
+				   cpc_readb(falcbase + F_REG(IMR3, ch)) | IMR3_LLBSC);
+		} else {
+			/* Disable local loop activation/deactivation detect */
+			cpc_writeb(falcbase + F_REG(IMR1, ch),
+				   cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_LLBSC);
+		}
+		/* Activates generation and monitoring of PRBS 
+		 * (Pseudo Random Bit Sequence) */
+		cpc_writeb(falcbase + F_REG(LCR1, ch),
+			   cpc_readb(falcbase + F_REG(LCR1, ch)) | LCR1_EPRM | LCR1_XPRBS);
+	} else {
+		pfalc->prbs = 0;
+		/* Deactivates generation and monitoring of PRBS 
+		 * (Pseudo Random Bit Sequence) */
+		cpc_writeb(falcbase + F_REG(LCR1, ch),
+			   cpc_readb(falcbase+F_REG(LCR1,ch)) & ~(LCR1_EPRM | LCR1_XPRBS));
+		if (conf->media == IF_IFACE_T1) {
+			/* Enable local loop activation/deactivation detect */
+			cpc_writeb(falcbase + F_REG(IMR3, ch),
+				   cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
+		} else {
+			/* Enable local loop activation/deactivation detect */
+			cpc_writeb(falcbase + F_REG(IMR1, ch),
+				   cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_LLBSC);
+		}
+	}
+}
+
+/*----------------------------------------------------------------------------
+ * falc_pattern_test_error
+ *----------------------------------------------------------------------------
+ * Description:	This routine returns the bit error counter value
+ *----------------------------------------------------------------------------
+ */
+static u16 falc_pattern_test_error(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+
+	return pfalc->bec;
+}
+
+/**********************************/
+/***   Net Interface Routines   ***/
+/**********************************/
+
+static void
+cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
+{
+	struct sk_buff *skb;
+
+	if ((skb = dev_alloc_skb(10 + skb_main->len)) == NULL) {
+		printk("%s: out of memory\n", dev->name);
+		return;
+	}
+	skb_put(skb, 10 + skb_main->len);
+
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_CUST);
+	skb_reset_mac_header(skb);
+	skb->pkt_type = PACKET_HOST;
+	skb->len = 10 + skb_main->len;
+
+	skb_copy_to_linear_data(skb, dev->name, 5);
+	skb->data[5] = '[';
+	skb->data[6] = rx_tx;
+	skb->data[7] = ']';
+	skb->data[8] = ':';
+	skb->data[9] = ' ';
+	skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len);
+
+	netif_rx(skb);
+}
+
+static void cpc_tx_timeout(struct net_device *dev)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	int ch = chan->channel;
+	unsigned long flags;
+	u8 ilar;
+
+	dev->stats.tx_errors++;
+	dev->stats.tx_aborted_errors++;
+	CPC_LOCK(card, flags);
+	if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) {
+		printk("%s: ILAR=0x%x\n", dev->name, ilar);
+		cpc_writeb(card->hw.scabase + ILAR, ilar);
+		cpc_writeb(card->hw.scabase + DMER, 0x80);
+	}
+	if (card->hw.type == PC300_TE) {
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+			   ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
+	}
+	dev->trans_start = jiffies; /* prevent tx timeout */
+	CPC_UNLOCK(card, flags);
+	netif_wake_queue(dev);
+}
+
+static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	int ch = chan->channel;
+	unsigned long flags;
+#ifdef PC300_DEBUG_TX
+	int i;
+#endif
+
+	if (!netif_carrier_ok(dev)) {
+		/* DCD must be OFF: drop packet */
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		dev->stats.tx_carrier_errors++;
+		return 0;
+	} else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) {
+		printk("%s: DCD is OFF. Going administrative down.\n", dev->name);
+		dev->stats.tx_errors++;
+		dev->stats.tx_carrier_errors++;
+		dev_kfree_skb(skb);
+		netif_carrier_off(dev);
+		CPC_LOCK(card, flags);
+		cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR);
+		if (card->hw.type == PC300_TE) {
+			cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+				   cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & 
+				   			~(CPLD_REG2_FALC_LED1 << (2 * ch)));
+		}
+		CPC_UNLOCK(card, flags);
+		netif_wake_queue(dev);
+		return 0;
+	}
+
+	/* Write buffer to DMA buffers */
+	if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) {
+//		printk("%s: write error. Dropping TX packet.\n", dev->name);
+		netif_stop_queue(dev);
+		dev_kfree_skb(skb);
+		dev->stats.tx_errors++;
+		dev->stats.tx_dropped++;
+		return 0;
+	}
+#ifdef PC300_DEBUG_TX
+	printk("%s T:", dev->name);
+	for (i = 0; i < skb->len; i++)
+		printk(" %02x", *(skb->data + i));
+	printk("\n");
+#endif
+
+	if (d->trace_on) {
+		cpc_trace(dev, skb, 'T');
+	}
+
+	/* Start transmission */
+	CPC_LOCK(card, flags);
+	/* verify if it has more than one free descriptor */
+	if (card->chan[ch].nfree_tx_bd <= 1) {
+		/* don't have so stop the queue */
+		netif_stop_queue(dev);
+	}
+	cpc_writel(card->hw.scabase + DTX_REG(EDAL, ch),
+		   TX_BD_ADDR(ch, chan->tx_next_bd));
+	cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA);
+	cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE);
+	if (card->hw.type == PC300_TE) {
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+			   (CPLD_REG2_FALC_LED1 << (2 * ch)));
+	}
+	CPC_UNLOCK(card, flags);
+	dev_kfree_skb(skb);
+
+	return 0;
+}
+
+static void cpc_net_rx(struct net_device *dev)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	int ch = chan->channel;
+#ifdef PC300_DEBUG_RX
+	int i;
+#endif
+	int rxb;
+	struct sk_buff *skb;
+
+	while (1) {
+		if ((rxb = dma_get_rx_frame_size(card, ch)) == -1)
+			return;
+
+		if (!netif_carrier_ok(dev)) {
+			/* DCD must be OFF: drop packet */
+		    printk("%s : DCD is OFF - drop %d rx bytes\n", dev->name, rxb); 
+			skb = NULL;
+		} else {
+			if (rxb > (dev->mtu + 40)) { /* add headers */
+				printk("%s : MTU exceeded %d\n", dev->name, rxb); 
+				skb = NULL;
+			} else {
+				skb = dev_alloc_skb(rxb);
+				if (skb == NULL) {
+					printk("%s: Memory squeeze!!\n", dev->name);
+					return;
+				}
+				skb->dev = dev;
+			}
+		}
+
+		if (((rxb = dma_buf_read(card, ch, skb)) <= 0) || (skb == NULL)) {
+#ifdef PC300_DEBUG_RX
+			printk("%s: rxb = %x\n", dev->name, rxb);
+#endif
+			if ((skb == NULL) && (rxb > 0)) {
+				/* rxb > dev->mtu */
+				dev->stats.rx_errors++;
+				dev->stats.rx_length_errors++;
+				continue;
+			}
+
+			if (rxb < 0) {	/* Invalid frame */
+				rxb = -rxb;
+				if (rxb & DST_OVR) {
+					dev->stats.rx_errors++;
+					dev->stats.rx_fifo_errors++;
+				}
+				if (rxb & DST_CRC) {
+					dev->stats.rx_errors++;
+					dev->stats.rx_crc_errors++;
+				}
+				if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) {
+					dev->stats.rx_errors++;
+					dev->stats.rx_frame_errors++;
+				}
+			}
+			if (skb) {
+				dev_kfree_skb_irq(skb);
+			}
+			continue;
+		}
+
+		dev->stats.rx_bytes += rxb;
+
+#ifdef PC300_DEBUG_RX
+		printk("%s R:", dev->name);
+		for (i = 0; i < skb->len; i++)
+			printk(" %02x", *(skb->data + i));
+		printk("\n");
+#endif
+		if (d->trace_on) {
+			cpc_trace(dev, skb, 'R');
+		}
+		dev->stats.rx_packets++;
+		skb->protocol = hdlc_type_trans(skb, dev);
+		netif_rx(skb);
+	}
+}
+
+/************************************/
+/***   PC300 Interrupt Routines   ***/
+/************************************/
+static void sca_tx_intr(pc300dev_t *dev)
+{
+	pc300ch_t *chan = (pc300ch_t *)dev->chan; 
+	pc300_t *card = (pc300_t *)chan->card; 
+	int ch = chan->channel; 
+	volatile pcsca_bd_t __iomem * ptdescr; 
+
+    /* Clean up descriptors from previous transmission */
+	ptdescr = (card->hw.rambase +
+						TX_BD_ADDR(ch,chan->tx_first_bd));
+	while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) !=
+		TX_BD_ADDR(ch,chan->tx_first_bd)) &&
+	       (cpc_readb(&ptdescr->status) & DST_OSB)) {
+		dev->dev->stats.tx_packets++;
+		dev->dev->stats.tx_bytes += cpc_readw(&ptdescr->len);
+		cpc_writeb(&ptdescr->status, DST_OSB);
+		cpc_writew(&ptdescr->len, 0);
+		chan->nfree_tx_bd++;
+		chan->tx_first_bd = (chan->tx_first_bd + 1) & (N_DMA_TX_BUF - 1);
+		ptdescr = (card->hw.rambase + TX_BD_ADDR(ch,chan->tx_first_bd));
+    }
+
+#ifdef CONFIG_PC300_MLPPP
+	if (chan->conf.proto == PC300_PROTO_MLPPP) {
+			cpc_tty_trigger_poll(dev);
+	} else {
+#endif
+	/* Tell the upper layer we are ready to transmit more packets */
+		netif_wake_queue(dev->dev);
+#ifdef CONFIG_PC300_MLPPP
+	}
+#endif
+}
+
+static void sca_intr(pc300_t * card)
+{
+	void __iomem *scabase = card->hw.scabase;
+	volatile u32 status;
+	int ch;
+	int intr_count = 0;
+	unsigned char dsr_rx;
+
+	while ((status = cpc_readl(scabase + ISR0)) != 0) {
+		for (ch = 0; ch < card->hw.nchan; ch++) {
+			pc300ch_t *chan = &card->chan[ch];
+			pc300dev_t *d = &chan->d;
+			struct net_device *dev = d->dev;
+
+			spin_lock(&card->card_lock);
+
+	    /**** Reception ****/
+			if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
+				u8 drx_stat = cpc_readb(scabase + DSR_RX(ch));
+
+				/* Clear RX interrupts */
+				cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
+
+#ifdef PC300_DEBUG_INTR
+				printk ("sca_intr: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
+					 ch, status, drx_stat);
+#endif
+				if (status & IR0_DRX(IR0_DMIA, ch)) {
+					if (drx_stat & DSR_BOF) {
+#ifdef CONFIG_PC300_MLPPP
+						if (chan->conf.proto == PC300_PROTO_MLPPP) {
+							/* verify if driver is TTY */
+							if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+								rx_dma_stop(card, ch);
+							}
+							cpc_tty_receive(d);
+							rx_dma_start(card, ch);
+						} else 
+#endif
+						{
+							if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+								rx_dma_stop(card, ch);
+							}
+							cpc_net_rx(dev);
+							/* Discard invalid frames */
+							dev->stats.rx_errors++;
+							dev->stats.rx_over_errors++;
+							chan->rx_first_bd = 0;
+							chan->rx_last_bd = N_DMA_RX_BUF - 1;
+							rx_dma_start(card, ch);
+						}
+					}
+				}
+				if (status & IR0_DRX(IR0_DMIB, ch)) {
+					if (drx_stat & DSR_EOM) {
+						if (card->hw.type == PC300_TE) {
+							cpc_writeb(card->hw.falcbase +
+								   card->hw.cpld_reg2,
+								   cpc_readb (card->hw.falcbase +
+								    	card->hw.cpld_reg2) |
+								   (CPLD_REG2_FALC_LED1 << (2 * ch)));
+						}
+#ifdef CONFIG_PC300_MLPPP
+						if (chan->conf.proto == PC300_PROTO_MLPPP) {
+							/* verify if driver is TTY */
+							cpc_tty_receive(d);
+						} else {
+							cpc_net_rx(dev);
+						}
+#else
+						cpc_net_rx(dev);
+#endif
+						if (card->hw.type == PC300_TE) {
+							cpc_writeb(card->hw.falcbase +
+								   card->hw.cpld_reg2,
+								   cpc_readb (card->hw.falcbase +
+								    		card->hw.cpld_reg2) &
+								   ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+						}
+					}
+				}
+				if (!(dsr_rx = cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
+#ifdef PC300_DEBUG_INTR
+		printk("%s: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x, dsr2=0x%02x)\n",
+			dev->name, ch, status, drx_stat, dsr_rx);
+#endif
+					cpc_writeb(scabase + DSR_RX(ch), (dsr_rx | DSR_DE) & 0xfe);
+				}
+			}
+
+	    /**** Transmission ****/
+			if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
+				u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch));
+
+				/* Clear TX interrupts */
+				cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
+
+#ifdef PC300_DEBUG_INTR
+				printk ("sca_intr: TX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
+					 ch, status, dtx_stat);
+#endif
+				if (status & IR0_DTX(IR0_EFT, ch)) {
+					if (dtx_stat & DSR_UDRF) {
+						if (cpc_readb (scabase + M_REG(TBN, ch)) != 0) {
+							cpc_writeb(scabase + M_REG(CMD,ch), CMD_TX_BUF_CLR);
+						}
+						if (card->hw.type == PC300_TE) {
+							cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+								   cpc_readb (card->hw.falcbase + 
+										   card->hw.cpld_reg2) &
+								   ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+						}
+						dev->stats.tx_errors++;
+						dev->stats.tx_fifo_errors++;
+						sca_tx_intr(d);
+					}
+				}
+				if (status & IR0_DTX(IR0_DMIA, ch)) {
+					if (dtx_stat & DSR_BOF) {
+					}
+				}
+				if (status & IR0_DTX(IR0_DMIB, ch)) {
+					if (dtx_stat & DSR_EOM) {
+						if (card->hw.type == PC300_TE) {
+							cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+								   cpc_readb (card->hw.falcbase +
+								    			card->hw.cpld_reg2) &
+								   ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
+						}
+						sca_tx_intr(d);
+					}
+				}
+			}
+
+	    /**** MSCI ****/
+			if (status & IR0_M(IR0_RXINTA, ch)) {
+				u8 st1 = cpc_readb(scabase + M_REG(ST1, ch));
+
+				/* Clear MSCI interrupts */
+				cpc_writeb(scabase + M_REG(ST1, ch), st1);
+
+#ifdef PC300_DEBUG_INTR
+				printk("sca_intr: MSCI intr chan[%d] (st=0x%08lx, st1=0x%02x)\n",
+					 ch, status, st1);
+#endif
+				if (st1 & ST1_CDCD) {	/* DCD changed */
+					if (cpc_readb(scabase + M_REG(ST3, ch)) & ST3_DCD) {
+						printk ("%s: DCD is OFF. Going administrative down.\n",
+							 dev->name);
+#ifdef CONFIG_PC300_MLPPP
+						if (chan->conf.proto != PC300_PROTO_MLPPP) {
+							netif_carrier_off(dev);
+						}
+#else
+						netif_carrier_off(dev);
+
+#endif
+						card->chan[ch].d.line_off++;
+					} else {	/* DCD = 1 */
+						printk ("%s: DCD is ON. Going administrative up.\n",
+							 dev->name);
+#ifdef CONFIG_PC300_MLPPP
+						if (chan->conf.proto != PC300_PROTO_MLPPP)
+							/* verify if driver is not TTY */
+#endif
+							netif_carrier_on(dev);
+						card->chan[ch].d.line_on++;
+					}
+				}
+			}
+			spin_unlock(&card->card_lock);
+		}
+		if (++intr_count == 10)
+			/* Too much work at this board. Force exit */
+			break;
+	}
+}
+
+static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
+	    !pfalc->loop_gen) {
+		if (frs1 & FRS1_LLBDD) {
+			// A Line Loop Back Deactivation signal detected
+			if (pfalc->loop_active) {
+				falc_remote_loop(card, ch, 0);
+			}
+		} else {
+			if ((frs1 & FRS1_LLBAD) &&
+			    ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
+				// A Line Loop Back Activation signal detected  
+				if (!pfalc->loop_active) {
+					falc_remote_loop(card, ch, 1);
+				}
+			}
+		}
+	}
+}
+
+static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+
+	if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
+	    !pfalc->loop_gen) {
+		if (rsp & RSP_LLBDD) {
+			// A Line Loop Back Deactivation signal detected
+			if (pfalc->loop_active) {
+				falc_remote_loop(card, ch, 0);
+			}
+		} else {
+			if ((rsp & RSP_LLBAD) &&
+			    ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
+				// A Line Loop Back Activation signal detected  
+				if (!pfalc->loop_active) {
+					falc_remote_loop(card, ch, 1);
+				}
+			}
+		}
+	}
+}
+
+static void falc_t1_intr(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 isr0, isr3, gis;
+	u8 dummy;
+
+	while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
+		if (gis & GIS_ISR0) {
+			isr0 = cpc_readb(falcbase + F_REG(FISR0, ch));
+			if (isr0 & FISR0_PDEN) {
+				/* Read the bit to clear the situation */
+				if (cpc_readb(falcbase + F_REG(FRS1, ch)) &
+				    FRS1_PDEN) {
+					pfalc->pden++;
+				}
+			}
+		}
+
+		if (gis & GIS_ISR1) {
+			dummy = cpc_readb(falcbase + F_REG(FISR1, ch));
+		}
+
+		if (gis & GIS_ISR2) {
+			dummy = cpc_readb(falcbase + F_REG(FISR2, ch));
+		}
+
+		if (gis & GIS_ISR3) {
+			isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
+			if (isr3 & FISR3_SEC) {
+				pfalc->sec++;
+				falc_update_stats(card, ch);
+				falc_check_status(card, ch,
+						  cpc_readb(falcbase + F_REG(FRS0, ch)));
+			}
+			if (isr3 & FISR3_ES) {
+				pfalc->es++;
+			}
+			if (isr3 & FISR3_LLBSC) {
+				falc_t1_loop_detection(card, ch,
+						       cpc_readb(falcbase + F_REG(FRS1, ch)));
+			}
+		}
+	}
+}
+
+static void falc_e1_intr(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	void __iomem *falcbase = card->hw.falcbase;
+	u8 isr1, isr2, isr3, gis, rsp;
+	u8 dummy;
+
+	while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
+		rsp = cpc_readb(falcbase + F_REG(RSP, ch));
+
+		if (gis & GIS_ISR0) {
+			dummy = cpc_readb(falcbase + F_REG(FISR0, ch));
+		}
+		if (gis & GIS_ISR1) {
+			isr1 = cpc_readb(falcbase + F_REG(FISR1, ch));
+			if (isr1 & FISR1_XMB) {
+				if ((pfalc->xmb_cause & 2) &&
+				    pfalc->multiframe_mode) {
+					if (cpc_readb (falcbase + F_REG(FRS0, ch)) & 
+									(FRS0_LOS | FRS0_AIS | FRS0_LFA)) {
+						cpc_writeb(falcbase + F_REG(XSP, ch),
+							   cpc_readb(falcbase + F_REG(XSP, ch))
+							   & ~XSP_AXS);
+					} else {
+						cpc_writeb(falcbase + F_REG(XSP, ch),
+							   cpc_readb(falcbase + F_REG(XSP, ch))
+							   | XSP_AXS);
+					}
+				}
+				pfalc->xmb_cause = 0;
+				cpc_writeb(falcbase + F_REG(IMR1, ch),
+					   cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_XMB);
+			}
+			if (isr1 & FISR1_LLBSC) {
+				falc_e1_loop_detection(card, ch, rsp);
+			}
+		}
+		if (gis & GIS_ISR2) {
+			isr2 = cpc_readb(falcbase + F_REG(FISR2, ch));
+			if (isr2 & FISR2_T400MS) {
+				cpc_writeb(falcbase + F_REG(XSW, ch),
+					   cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XRA);
+			}
+			if (isr2 & FISR2_MFAR) {
+				cpc_writeb(falcbase + F_REG(XSW, ch),
+					   cpc_readb(falcbase + F_REG(XSW, ch)) & ~XSW_XRA);
+			}
+			if (isr2 & (FISR2_FAR | FISR2_LFA | FISR2_AIS | FISR2_LOS)) {
+				pfalc->xmb_cause |= 2;
+				cpc_writeb(falcbase + F_REG(IMR1, ch),
+					   cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_XMB);
+			}
+		}
+		if (gis & GIS_ISR3) {
+			isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
+			if (isr3 & FISR3_SEC) {
+				pfalc->sec++;
+				falc_update_stats(card, ch);
+				falc_check_status(card, ch,
+						  cpc_readb(falcbase + F_REG(FRS0, ch)));
+			}
+			if (isr3 & FISR3_ES) {
+				pfalc->es++;
+			}
+		}
+	}
+}
+
+static void falc_intr(pc300_t * card)
+{
+	int ch;
+
+	for (ch = 0; ch < card->hw.nchan; ch++) {
+		pc300ch_t *chan = &card->chan[ch];
+		pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+
+		if (conf->media == IF_IFACE_T1) {
+			falc_t1_intr(card, ch);
+		} else {
+			falc_e1_intr(card, ch);
+		}
+	}
+}
+
+static irqreturn_t cpc_intr(int irq, void *dev_id)
+{
+	pc300_t *card = dev_id;
+	volatile u8 plx_status;
+
+	if (!card) {
+#ifdef PC300_DEBUG_INTR
+		printk("cpc_intr: spurious intr %d\n", irq);
+#endif
+		return IRQ_NONE;		/* spurious intr */
+	}
+
+	if (!card->hw.rambase) {
+#ifdef PC300_DEBUG_INTR
+		printk("cpc_intr: spurious intr2 %d\n", irq);
+#endif
+		return IRQ_NONE;		/* spurious intr */
+	}
+
+	switch (card->hw.type) {
+		case PC300_RSV:
+		case PC300_X21:
+			sca_intr(card);
+			break;
+
+		case PC300_TE:
+			while ( (plx_status = (cpc_readb(card->hw.plxbase + card->hw.intctl_reg) &
+				 (PLX_9050_LINT1_STATUS | PLX_9050_LINT2_STATUS))) != 0) {
+				if (plx_status & PLX_9050_LINT1_STATUS) {	/* SCA Interrupt */
+					sca_intr(card);
+				}
+				if (plx_status & PLX_9050_LINT2_STATUS) {	/* FALC Interrupt */
+					falc_intr(card);
+				}
+			}
+			break;
+	}
+	return IRQ_HANDLED;
+}
+
+static void cpc_sca_status(pc300_t * card, int ch)
+{
+	u8 ilar;
+	void __iomem *scabase = card->hw.scabase;
+	unsigned long flags;
+
+	tx_dma_buf_check(card, ch);
+	rx_dma_buf_check(card, ch);
+	ilar = cpc_readb(scabase + ILAR);
+	printk ("ILAR=0x%02x, WCRL=0x%02x, PCR=0x%02x, BTCR=0x%02x, BOLR=0x%02x\n",
+		 ilar, cpc_readb(scabase + WCRL), cpc_readb(scabase + PCR),
+		 cpc_readb(scabase + BTCR), cpc_readb(scabase + BOLR));
+	printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
+	       cpc_readl(scabase + DTX_REG(CDAL, ch)),
+	       cpc_readl(scabase + DTX_REG(EDAL, ch)));
+	printk("RX_CDA=0x%08x, RX_EDA=0x%08x, BFL=0x%04x\n",
+	       cpc_readl(scabase + DRX_REG(CDAL, ch)),
+	       cpc_readl(scabase + DRX_REG(EDAL, ch)),
+	       cpc_readw(scabase + DRX_REG(BFLL, ch)));
+	printk("DMER=0x%02x, DSR_TX=0x%02x, DSR_RX=0x%02x\n",
+	       cpc_readb(scabase + DMER), cpc_readb(scabase + DSR_TX(ch)),
+	       cpc_readb(scabase + DSR_RX(ch)));
+	printk("DMR_TX=0x%02x, DMR_RX=0x%02x, DIR_TX=0x%02x, DIR_RX=0x%02x\n",
+	       cpc_readb(scabase + DMR_TX(ch)), cpc_readb(scabase + DMR_RX(ch)),
+	       cpc_readb(scabase + DIR_TX(ch)),
+	       cpc_readb(scabase + DIR_RX(ch)));
+	printk("DCR_TX=0x%02x, DCR_RX=0x%02x, FCT_TX=0x%02x, FCT_RX=0x%02x\n",
+	       cpc_readb(scabase + DCR_TX(ch)), cpc_readb(scabase + DCR_RX(ch)),
+	       cpc_readb(scabase + FCT_TX(ch)),
+	       cpc_readb(scabase + FCT_RX(ch)));
+	printk("MD0=0x%02x, MD1=0x%02x, MD2=0x%02x, MD3=0x%02x, IDL=0x%02x\n",
+	       cpc_readb(scabase + M_REG(MD0, ch)),
+	       cpc_readb(scabase + M_REG(MD1, ch)),
+	       cpc_readb(scabase + M_REG(MD2, ch)),
+	       cpc_readb(scabase + M_REG(MD3, ch)),
+	       cpc_readb(scabase + M_REG(IDL, ch)));
+	printk("CMD=0x%02x, SA0=0x%02x, SA1=0x%02x, TFN=0x%02x, CTL=0x%02x\n",
+	       cpc_readb(scabase + M_REG(CMD, ch)),
+	       cpc_readb(scabase + M_REG(SA0, ch)),
+	       cpc_readb(scabase + M_REG(SA1, ch)),
+	       cpc_readb(scabase + M_REG(TFN, ch)),
+	       cpc_readb(scabase + M_REG(CTL, ch)));
+	printk("ST0=0x%02x, ST1=0x%02x, ST2=0x%02x, ST3=0x%02x, ST4=0x%02x\n",
+	       cpc_readb(scabase + M_REG(ST0, ch)),
+	       cpc_readb(scabase + M_REG(ST1, ch)),
+	       cpc_readb(scabase + M_REG(ST2, ch)),
+	       cpc_readb(scabase + M_REG(ST3, ch)),
+	       cpc_readb(scabase + M_REG(ST4, ch)));
+	printk ("CST0=0x%02x, CST1=0x%02x, CST2=0x%02x, CST3=0x%02x, FST=0x%02x\n",
+		 cpc_readb(scabase + M_REG(CST0, ch)),
+		 cpc_readb(scabase + M_REG(CST1, ch)),
+		 cpc_readb(scabase + M_REG(CST2, ch)),
+		 cpc_readb(scabase + M_REG(CST3, ch)),
+		 cpc_readb(scabase + M_REG(FST, ch)));
+	printk("TRC0=0x%02x, TRC1=0x%02x, RRC=0x%02x, TBN=0x%02x, RBN=0x%02x\n",
+	       cpc_readb(scabase + M_REG(TRC0, ch)),
+	       cpc_readb(scabase + M_REG(TRC1, ch)),
+	       cpc_readb(scabase + M_REG(RRC, ch)),
+	       cpc_readb(scabase + M_REG(TBN, ch)),
+	       cpc_readb(scabase + M_REG(RBN, ch)));
+	printk("TFS=0x%02x, TNR0=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
+	       cpc_readb(scabase + M_REG(TFS, ch)),
+	       cpc_readb(scabase + M_REG(TNR0, ch)),
+	       cpc_readb(scabase + M_REG(TNR1, ch)),
+	       cpc_readb(scabase + M_REG(RNR, ch)));
+	printk("TCR=0x%02x, RCR=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
+	       cpc_readb(scabase + M_REG(TCR, ch)),
+	       cpc_readb(scabase + M_REG(RCR, ch)),
+	       cpc_readb(scabase + M_REG(TNR1, ch)),
+	       cpc_readb(scabase + M_REG(RNR, ch)));
+	printk("TXS=0x%02x, RXS=0x%02x, EXS=0x%02x, TMCT=0x%02x, TMCR=0x%02x\n",
+	       cpc_readb(scabase + M_REG(TXS, ch)),
+	       cpc_readb(scabase + M_REG(RXS, ch)),
+	       cpc_readb(scabase + M_REG(EXS, ch)),
+	       cpc_readb(scabase + M_REG(TMCT, ch)),
+	       cpc_readb(scabase + M_REG(TMCR, ch)));
+	printk("IE0=0x%02x, IE1=0x%02x, IE2=0x%02x, IE4=0x%02x, FIE=0x%02x\n",
+	       cpc_readb(scabase + M_REG(IE0, ch)),
+	       cpc_readb(scabase + M_REG(IE1, ch)),
+	       cpc_readb(scabase + M_REG(IE2, ch)),
+	       cpc_readb(scabase + M_REG(IE4, ch)),
+	       cpc_readb(scabase + M_REG(FIE, ch)));
+	printk("IER0=0x%08x\n", cpc_readl(scabase + IER0));
+
+	if (ilar != 0) {
+		CPC_LOCK(card, flags);
+		cpc_writeb(scabase + ILAR, ilar);
+		cpc_writeb(scabase + DMER, 0x80);
+		CPC_UNLOCK(card, flags);
+	}
+}
+
+static void cpc_falc_status(pc300_t * card, int ch)
+{
+	pc300ch_t *chan = &card->chan[ch];
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	unsigned long flags;
+
+	CPC_LOCK(card, flags);
+	printk("CH%d:   %s %s  %d channels\n",
+	       ch, (pfalc->sync ? "SYNC" : ""), (pfalc->active ? "ACTIVE" : ""),
+	       pfalc->num_channels);
+
+	printk("        pden=%d,  los=%d,  losr=%d,  lfa=%d,  farec=%d\n",
+	       pfalc->pden, pfalc->los, pfalc->losr, pfalc->lfa, pfalc->farec);
+	printk("        lmfa=%d,  ais=%d,  sec=%d,  es=%d,  rai=%d\n",
+	       pfalc->lmfa, pfalc->ais, pfalc->sec, pfalc->es, pfalc->rai);
+	printk("        bec=%d,  fec=%d,  cvc=%d,  cec=%d,  ebc=%d\n",
+	       pfalc->bec, pfalc->fec, pfalc->cvc, pfalc->cec, pfalc->ebc);
+
+	printk("\n");
+	printk("        STATUS: %s  %s  %s  %s  %s  %s\n",
+	       (pfalc->red_alarm ? "RED" : ""),
+	       (pfalc->blue_alarm ? "BLU" : ""),
+	       (pfalc->yellow_alarm ? "YEL" : ""),
+	       (pfalc->loss_fa ? "LFA" : ""),
+	       (pfalc->loss_mfa ? "LMF" : ""), (pfalc->prbs ? "PRB" : ""));
+	CPC_UNLOCK(card, flags);
+}
+
+static int cpc_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	pc300conf_t conf_aux;
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	int ch = chan->channel;
+	void __user *arg = ifr->ifr_data;
+	struct if_settings *settings = &ifr->ifr_settings;
+	void __iomem *scabase = card->hw.scabase;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	switch (cmd) {
+		case SIOCGPC300CONF:
+#ifdef CONFIG_PC300_MLPPP
+			if (conf->proto != PC300_PROTO_MLPPP) {
+				conf->proto = /* FIXME hdlc->proto.id */ 0;
+			}
+#else
+			conf->proto = /* FIXME hdlc->proto.id */ 0;
+#endif
+			memcpy(&conf_aux.conf, conf, sizeof(pc300chconf_t));
+			memcpy(&conf_aux.hw, &card->hw, sizeof(pc300hw_t));
+			if (!arg || 
+				copy_to_user(arg, &conf_aux, sizeof(pc300conf_t))) 
+				return -EINVAL;
+			return 0;
+		case SIOCSPC300CONF:
+			if (!capable(CAP_NET_ADMIN))
+				return -EPERM;
+			if (!arg || 
+				copy_from_user(&conf_aux.conf, arg, sizeof(pc300chconf_t)))
+				return -EINVAL;
+			if (card->hw.cpld_id < 0x02 &&
+			    conf_aux.conf.fr_mode == PC300_FR_UNFRAMED) {
+				/* CPLD_ID < 0x02 doesn't support Unframed E1 */
+				return -EINVAL;
+			}
+#ifdef CONFIG_PC300_MLPPP
+			if (conf_aux.conf.proto == PC300_PROTO_MLPPP) {
+				if (conf->proto != PC300_PROTO_MLPPP) {
+					memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+					cpc_tty_init(d);	/* init TTY driver */
+				}
+			} else {
+				if (conf_aux.conf.proto == 0xffff) {
+					if (conf->proto == PC300_PROTO_MLPPP){ 
+						/* ifdown interface */
+						cpc_close(dev);
+					}
+				} else {
+					memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+					/* FIXME hdlc->proto.id = conf->proto; */
+				}
+			}
+#else
+			memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
+			/* FIXME hdlc->proto.id = conf->proto; */
+#endif
+			return 0;
+		case SIOCGPC300STATUS:
+			cpc_sca_status(card, ch);
+			return 0;
+		case SIOCGPC300FALCSTATUS:
+			cpc_falc_status(card, ch);
+			return 0;
+
+		case SIOCGPC300UTILSTATS:
+			{
+				if (!arg) {	/* clear statistics */
+					memset(&dev->stats, 0, sizeof(dev->stats));
+					if (card->hw.type == PC300_TE) {
+						memset(&chan->falc, 0, sizeof(falc_t));
+					}
+				} else {
+					pc300stats_t pc300stats;
+
+					memset(&pc300stats, 0, sizeof(pc300stats_t));
+					pc300stats.hw_type = card->hw.type;
+					pc300stats.line_on = card->chan[ch].d.line_on;
+					pc300stats.line_off = card->chan[ch].d.line_off;
+					memcpy(&pc300stats.gen_stats, &dev->stats,
+					       sizeof(dev->stats));
+					if (card->hw.type == PC300_TE)
+						memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t));
+				    	if (copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)))
+						return -EFAULT;
+				}
+				return 0;
+			}
+
+		case SIOCGPC300UTILSTATUS:
+			{
+				struct pc300status pc300status;
+
+				pc300status.hw_type = card->hw.type;
+				if (card->hw.type == PC300_TE) {
+					pc300status.te_status.sync = chan->falc.sync;
+					pc300status.te_status.red_alarm = chan->falc.red_alarm;
+					pc300status.te_status.blue_alarm = chan->falc.blue_alarm;
+					pc300status.te_status.loss_fa = chan->falc.loss_fa;
+					pc300status.te_status.yellow_alarm =chan->falc.yellow_alarm;
+					pc300status.te_status.loss_mfa = chan->falc.loss_mfa;
+					pc300status.te_status.prbs = chan->falc.prbs;
+				} else {
+					pc300status.gen_status.dcd =
+						!(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_DCD);
+					pc300status.gen_status.cts =
+						!(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_CTS);
+					pc300status.gen_status.rts =
+						!(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_RTS);
+					pc300status.gen_status.dtr =
+						!(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR);
+					/* There is no DSR in HD64572 */
+				}
+				if (!arg ||
+				    copy_to_user(arg, &pc300status, sizeof(pc300status_t)))
+					return -EINVAL;
+				return 0;
+			}
+
+		case SIOCSPC300TRACE:
+			/* Sets/resets a trace_flag for the respective device */
+			if (!arg || copy_from_user(&d->trace_on, arg,sizeof(unsigned char)))
+					return -EINVAL;
+			return 0;
+
+		case SIOCSPC300LOOPBACK:
+			{
+				struct pc300loopback pc300loop;
+
+				/* TE boards only */
+				if (card->hw.type != PC300_TE)
+					return -EINVAL;
+
+				if (!arg || 
+					copy_from_user(&pc300loop, arg, sizeof(pc300loopback_t)))
+						return -EINVAL;
+				switch (pc300loop.loop_type) {
+					case PC300LOCLOOP:	/* Turn the local loop on/off */
+						falc_local_loop(card, ch, pc300loop.loop_on);
+						return 0;
+
+					case PC300REMLOOP:	/* Turn the remote loop on/off */
+						falc_remote_loop(card, ch, pc300loop.loop_on);
+						return 0;
+
+					case PC300PAYLOADLOOP:	/* Turn the payload loop on/off */
+						falc_payload_loop(card, ch, pc300loop.loop_on);
+						return 0;
+
+					case PC300GENLOOPUP:	/* Generate loop UP */
+						if (pc300loop.loop_on) {
+							falc_generate_loop_up_code (card, ch);
+						} else {
+							turn_off_xlu(card, ch);
+						}
+						return 0;
+
+					case PC300GENLOOPDOWN:	/* Generate loop DOWN */
+						if (pc300loop.loop_on) {
+							falc_generate_loop_down_code (card, ch);
+						} else {
+							turn_off_xld(card, ch);
+						}
+						return 0;
+
+					default:
+						return -EINVAL;
+				}
+			}
+
+		case SIOCSPC300PATTERNTEST:
+			/* Turn the pattern test on/off and show the errors counter */
+			{
+				struct pc300patterntst pc300patrntst;
+
+				/* TE boards only */
+				if (card->hw.type != PC300_TE)
+					return -EINVAL;
+
+				if (card->hw.cpld_id < 0x02) {
+					/* CPLD_ID < 0x02 doesn't support pattern test */
+					return -EINVAL;
+				}
+
+				if (!arg || 
+					copy_from_user(&pc300patrntst,arg,sizeof(pc300patterntst_t)))
+						return -EINVAL;
+				if (pc300patrntst.patrntst_on == 2) {
+					if (chan->falc.prbs == 0) {
+						falc_pattern_test(card, ch, 1);
+					}
+					pc300patrntst.num_errors =
+						falc_pattern_test_error(card, ch);
+					if (copy_to_user(arg, &pc300patrntst,
+							 sizeof(pc300patterntst_t)))
+							return -EINVAL;
+				} else {
+					falc_pattern_test(card, ch, pc300patrntst.patrntst_on);
+				}
+				return 0;
+			}
+
+		case SIOCWANDEV:
+			switch (ifr->ifr_settings.type) {
+				case IF_GET_IFACE:
+				{
+					const size_t size = sizeof(sync_serial_settings);
+					ifr->ifr_settings.type = conf->media;
+					if (ifr->ifr_settings.size < size) {
+						/* data size wanted */
+						ifr->ifr_settings.size = size;
+						return -ENOBUFS;
+					}
+	
+					if (copy_to_user(settings->ifs_ifsu.sync,
+							 &conf->phys_settings, size)) {
+						return -EFAULT;
+					}
+					return 0;
+				}
+
+				case IF_IFACE_V35:
+				case IF_IFACE_V24:
+				case IF_IFACE_X21:
+				{
+					const size_t size = sizeof(sync_serial_settings);
+
+					if (!capable(CAP_NET_ADMIN)) {
+						return -EPERM;
+					}
+					/* incorrect data len? */
+					if (ifr->ifr_settings.size != size) {
+						return -ENOBUFS;
+					}
+
+					if (copy_from_user(&conf->phys_settings, 
+							   settings->ifs_ifsu.sync, size)) {
+						return -EFAULT;
+					}
+
+					if (conf->phys_settings.loopback) {
+						cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
+							cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | 
+							MD2_LOOP_MIR);
+					}
+					conf->media = ifr->ifr_settings.type;
+					return 0;
+				}
+
+				case IF_IFACE_T1:
+				case IF_IFACE_E1:
+				{
+					const size_t te_size = sizeof(te1_settings);
+					const size_t size = sizeof(sync_serial_settings);
+
+					if (!capable(CAP_NET_ADMIN)) {
+						return -EPERM;
+					}
+
+					/* incorrect data len? */
+					if (ifr->ifr_settings.size != te_size) {
+						return -ENOBUFS;
+					}
+
+					if (copy_from_user(&conf->phys_settings, 
+							   settings->ifs_ifsu.te1, size)) {
+						return -EFAULT;
+					}/* Ignoring HDLC slot_map for a while */
+					
+					if (conf->phys_settings.loopback) {
+						cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
+							cpc_readb(card->hw.scabase + M_REG(MD2, ch)) | 
+							MD2_LOOP_MIR);
+					}
+					conf->media = ifr->ifr_settings.type;
+					return 0;
+				}
+				default:
+					return hdlc_ioctl(dev, ifr, cmd);
+			}
+
+		default:
+			return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
+{
+	int br, tc;
+	int br_pwr, error;
+
+	*br_io = 0;
+
+	if (rate == 0)
+		return 0;
+
+	for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
+		if ((tc = clock / br_pwr / rate) <= 0xff) {
+			*br_io = br;
+			break;
+		}
+	}
+
+	if (tc <= 0xff) {
+		error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
+		/* Errors bigger than +/- 1% won't be tolerated */
+		if (error < -10 || error > 10)
+			return -1;
+		else
+			return tc;
+	} else {
+		return -1;
+	}
+}
+
+static int ch_config(pc300dev_t * d)
+{
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
+	pc300_t *card = (pc300_t *) chan->card;
+	void __iomem *scabase = card->hw.scabase;
+	void __iomem *plxbase = card->hw.plxbase;
+	int ch = chan->channel;
+	u32 clkrate = chan->conf.phys_settings.clock_rate;
+	u32 clktype = chan->conf.phys_settings.clock_type;
+	u16 encoding = chan->conf.proto_settings.encoding;
+	u16 parity = chan->conf.proto_settings.parity;
+	u8 md0, md2;
+
+	/* Reset the channel */
+	cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
+
+	/* Configure the SCA registers */
+	switch (parity) {
+		case PARITY_NONE:
+			md0 = MD0_BIT_SYNC;
+			break;
+		case PARITY_CRC16_PR0:
+			md0 = MD0_CRC16_0|MD0_CRCC0|MD0_BIT_SYNC;
+			break;
+		case PARITY_CRC16_PR1:
+			md0 = MD0_CRC16_1|MD0_CRCC0|MD0_BIT_SYNC;
+			break;
+		case PARITY_CRC32_PR1_CCITT:
+			md0 = MD0_CRC32|MD0_CRCC0|MD0_BIT_SYNC;
+			break;
+		case PARITY_CRC16_PR1_CCITT:
+		default:
+			md0 = MD0_CRC_CCITT|MD0_CRCC0|MD0_BIT_SYNC;
+			break;
+	}
+	switch (encoding) {
+		case ENCODING_NRZI:
+			md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZI;
+			break;
+		case ENCODING_FM_MARK:	/* FM1 */
+			md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM1;
+			break;
+		case ENCODING_FM_SPACE:	/* FM0 */
+			md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM0;
+			break;
+		case ENCODING_MANCHESTER: /* It's not working... */
+			md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_MANCH;
+			break;
+		case ENCODING_NRZ:
+		default:
+			md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZ;
+			break;
+	}
+	cpc_writeb(scabase + M_REG(MD0, ch), md0);
+	cpc_writeb(scabase + M_REG(MD1, ch), 0);
+	cpc_writeb(scabase + M_REG(MD2, ch), md2);
+ 	cpc_writeb(scabase + M_REG(IDL, ch), 0x7e);
+	cpc_writeb(scabase + M_REG(CTL, ch), CTL_URSKP | CTL_IDLC);
+
+	/* Configure HW media */
+	switch (card->hw.type) {
+		case PC300_RSV:
+			if (conf->media == IF_IFACE_V35) {
+				cpc_writel((plxbase + card->hw.gpioc_reg),
+					   cpc_readl(plxbase + card->hw.gpioc_reg) | PC300_CHMEDIA_MASK(ch));
+			} else {
+				cpc_writel((plxbase + card->hw.gpioc_reg),
+					   cpc_readl(plxbase + card->hw.gpioc_reg) & ~PC300_CHMEDIA_MASK(ch));
+			}
+			break;
+
+		case PC300_X21:
+			break;
+
+		case PC300_TE:
+			te_config(card, ch);
+			break;
+	}
+
+	switch (card->hw.type) {
+		case PC300_RSV:
+		case PC300_X21:
+			if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) {
+				int tmc, br;
+
+				/* Calculate the clkrate parameters */
+				tmc = clock_rate_calc(clkrate, card->hw.clock, &br);
+				if (tmc < 0)
+					return -EIO;
+				cpc_writeb(scabase + M_REG(TMCT, ch), tmc);
+				cpc_writeb(scabase + M_REG(TXS, ch),
+					   (TXS_DTRXC | TXS_IBRG | br));
+				if (clktype == CLOCK_INT) {
+					cpc_writeb(scabase + M_REG(TMCR, ch), tmc);
+					cpc_writeb(scabase + M_REG(RXS, ch), 
+						   (RXS_IBRG | br));
+				} else {
+					cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+					cpc_writeb(scabase + M_REG(RXS, ch), 0);
+				}
+	    			if (card->hw.type == PC300_X21) {
+					cpc_writeb(scabase + M_REG(GPO, ch), 1);
+					cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
+				} else {
+					cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
+				}
+			} else {
+				cpc_writeb(scabase + M_REG(TMCT, ch), 1);
+				if (clktype == CLOCK_EXT) {
+					cpc_writeb(scabase + M_REG(TXS, ch), 
+						   TXS_DTRXC);
+				} else {
+					cpc_writeb(scabase + M_REG(TXS, ch), 
+						   TXS_DTRXC|TXS_RCLK);
+				}
+	    			cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+				cpc_writeb(scabase + M_REG(RXS, ch), 0);
+				if (card->hw.type == PC300_X21) {
+					cpc_writeb(scabase + M_REG(GPO, ch), 0);
+					cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
+				} else {
+					cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
+				}
+			}
+			break;
+
+		case PC300_TE:
+			/* SCA always receives clock from the FALC chip */
+			cpc_writeb(scabase + M_REG(TMCT, ch), 1);
+			cpc_writeb(scabase + M_REG(TXS, ch), 0);
+			cpc_writeb(scabase + M_REG(TMCR, ch), 1);
+			cpc_writeb(scabase + M_REG(RXS, ch), 0);
+			cpc_writeb(scabase + M_REG(EXS, ch), 0);
+			break;
+	}
+
+	/* Enable Interrupts */
+	cpc_writel(scabase + IER0,
+		   cpc_readl(scabase + IER0) |
+		   IR0_M(IR0_RXINTA, ch) |
+		   IR0_DRX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch) |
+		   IR0_DTX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch));
+	cpc_writeb(scabase + M_REG(IE0, ch),
+		   cpc_readl(scabase + M_REG(IE0, ch)) | IE0_RXINTA);
+	cpc_writeb(scabase + M_REG(IE1, ch),
+		   cpc_readl(scabase + M_REG(IE1, ch)) | IE1_CDCD);
+
+	return 0;
+}
+
+static int rx_config(pc300dev_t * d)
+{
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	void __iomem *scabase = card->hw.scabase;
+	int ch = chan->channel;
+
+	cpc_writeb(scabase + DSR_RX(ch), 0);
+
+	/* General RX settings */
+	cpc_writeb(scabase + M_REG(RRC, ch), 0);
+	cpc_writeb(scabase + M_REG(RNR, ch), 16);
+
+	/* Enable reception */
+	cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_CRC_INIT);
+	cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_ENA);
+
+	/* Initialize DMA stuff */
+	chan->rx_first_bd = 0;
+	chan->rx_last_bd = N_DMA_RX_BUF - 1;
+	rx_dma_buf_init(card, ch);
+	cpc_writeb(scabase + DCR_RX(ch), DCR_FCT_CLR);
+	cpc_writeb(scabase + DMR_RX(ch), (DMR_TMOD | DMR_NF));
+	cpc_writeb(scabase + DIR_RX(ch), (DIR_EOM | DIR_BOF));
+
+	/* Start DMA */
+	rx_dma_start(card, ch);
+
+	return 0;
+}
+
+static int tx_config(pc300dev_t * d)
+{
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	void __iomem *scabase = card->hw.scabase;
+	int ch = chan->channel;
+
+	cpc_writeb(scabase + DSR_TX(ch), 0);
+
+	/* General TX settings */
+	cpc_writeb(scabase + M_REG(TRC0, ch), 0);
+	cpc_writeb(scabase + M_REG(TFS, ch), 32);
+	cpc_writeb(scabase + M_REG(TNR0, ch), 20);
+	cpc_writeb(scabase + M_REG(TNR1, ch), 48);
+	cpc_writeb(scabase + M_REG(TCR, ch), 8);
+
+	/* Enable transmission */
+	cpc_writeb(scabase + M_REG(CMD, ch), CMD_TX_CRC_INIT);
+
+	/* Initialize DMA stuff */
+	chan->tx_first_bd = 0;
+	chan->tx_next_bd = 0;
+	tx_dma_buf_init(card, ch);
+	cpc_writeb(scabase + DCR_TX(ch), DCR_FCT_CLR);
+	cpc_writeb(scabase + DMR_TX(ch), (DMR_TMOD | DMR_NF));
+	cpc_writeb(scabase + DIR_TX(ch), (DIR_EOM | DIR_BOF | DIR_UDRF));
+	cpc_writel(scabase + DTX_REG(CDAL, ch), TX_BD_ADDR(ch, chan->tx_first_bd));
+	cpc_writel(scabase + DTX_REG(EDAL, ch), TX_BD_ADDR(ch, chan->tx_next_bd));
+
+	return 0;
+}
+
+static int cpc_attach(struct net_device *dev, unsigned short encoding,
+		      unsigned short parity)
+{
+	pc300dev_t *d = (pc300dev_t *)dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *)d->chan;
+	pc300_t *card = (pc300_t *)chan->card;
+	pc300chconf_t *conf = (pc300chconf_t *)&chan->conf;
+
+	if (card->hw.type == PC300_TE) {
+		if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) {
+			return -EINVAL;
+		}
+	} else {
+		if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI &&
+		    encoding != ENCODING_FM_MARK && encoding != ENCODING_FM_SPACE) {
+			/* Driver doesn't support ENCODING_MANCHESTER yet */
+			return -EINVAL;
+		}
+	}
+
+	if (parity != PARITY_NONE && parity != PARITY_CRC16_PR0 &&
+	    parity != PARITY_CRC16_PR1 && parity != PARITY_CRC32_PR1_CCITT &&
+	    parity != PARITY_CRC16_PR1_CCITT) {
+		return -EINVAL;
+	}
+
+	conf->proto_settings.encoding = encoding;
+	conf->proto_settings.parity = parity;
+	return 0;
+}
+
+static int cpc_opench(pc300dev_t * d)
+{
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	int ch = chan->channel, rc;
+	void __iomem *scabase = card->hw.scabase;
+
+	rc = ch_config(d);
+	if (rc)
+		return rc;
+
+	rx_config(d);
+
+	tx_config(d);
+
+	/* Assert RTS and DTR */
+	cpc_writeb(scabase + M_REG(CTL, ch),
+		   cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
+
+	return 0;
+}
+
+static void cpc_closech(pc300dev_t * d)
+{
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	falc_t *pfalc = (falc_t *) & chan->falc;
+	int ch = chan->channel;
+
+	cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_CH_RST);
+	rx_dma_stop(card, ch);
+	tx_dma_stop(card, ch);
+
+	if (card->hw.type == PC300_TE) {
+		memset(pfalc, 0, sizeof(falc_t));
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
+			   ~((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK |
+			      CPLD_REG2_FALC_LED2) << (2 * ch)));
+		/* Reset the FALC chip */
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+			   (CPLD_REG1_FALC_RESET << (2 * ch)));
+		udelay(10000);
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
+			   ~(CPLD_REG1_FALC_RESET << (2 * ch)));
+	}
+}
+
+int cpc_open(struct net_device *dev)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	struct ifreq ifr;
+	int result;
+
+#ifdef	PC300_DEBUG_OTHER
+	printk("pc300: cpc_open");
+#endif
+
+	result = hdlc_open(dev);
+
+	if (result)
+		return result;
+
+	sprintf(ifr.ifr_name, "%s", dev->name);
+	result = cpc_opench(d);
+	if (result)
+		goto err_out;
+
+	netif_start_queue(dev);
+	return 0;
+
+err_out:
+	hdlc_close(dev);
+	return result;
+}
+
+static int cpc_close(struct net_device *dev)
+{
+	pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
+	pc300ch_t *chan = (pc300ch_t *) d->chan;
+	pc300_t *card = (pc300_t *) chan->card;
+	unsigned long flags;
+
+#ifdef	PC300_DEBUG_OTHER
+	printk("pc300: cpc_close");
+#endif
+
+	netif_stop_queue(dev);
+
+	CPC_LOCK(card, flags);
+	cpc_closech(d);
+	CPC_UNLOCK(card, flags);
+
+	hdlc_close(dev);
+
+#ifdef CONFIG_PC300_MLPPP
+	if (chan->conf.proto == PC300_PROTO_MLPPP) {
+		cpc_tty_unregister_service(d);
+		chan->conf.proto = 0xffff;
+	}
+#endif
+
+	return 0;
+}
+
+static u32 detect_ram(pc300_t * card)
+{
+	u32 i;
+	u8 data;
+	void __iomem *rambase = card->hw.rambase;
+
+	card->hw.ramsize = PC300_RAMSIZE;
+	/* Let's find out how much RAM is present on this board */
+	for (i = 0; i < card->hw.ramsize; i++) {
+		data = (u8)(i & 0xff);
+		cpc_writeb(rambase + i, data);
+		if (cpc_readb(rambase + i) != data) {
+			break;
+		}
+	}
+	return i;
+}
+
+static void plx_init(pc300_t * card)
+{
+	struct RUNTIME_9050 __iomem *plx_ctl = card->hw.plxbase;
+
+	/* Reset PLX */
+	cpc_writel(&plx_ctl->init_ctrl,
+		   cpc_readl(&plx_ctl->init_ctrl) | 0x40000000);
+	udelay(10000L);
+	cpc_writel(&plx_ctl->init_ctrl,
+		   cpc_readl(&plx_ctl->init_ctrl) & ~0x40000000);
+
+	/* Reload Config. Registers from EEPROM */
+	cpc_writel(&plx_ctl->init_ctrl,
+		   cpc_readl(&plx_ctl->init_ctrl) | 0x20000000);
+	udelay(10000L);
+	cpc_writel(&plx_ctl->init_ctrl,
+		   cpc_readl(&plx_ctl->init_ctrl) & ~0x20000000);
+
+}
+
+static void show_version(void)
+{
+	char *rcsvers, *rcsdate, *tmp;
+
+	rcsvers = strchr(rcsid, ' ');
+	rcsvers++;
+	tmp = strchr(rcsvers, ' ');
+	*tmp++ = '\0';
+	rcsdate = strchr(tmp, ' ');
+	rcsdate++;
+	tmp = strrchr(rcsdate, ' ');
+	*tmp = '\0';
+	pr_info("Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
+}				/* show_version */
+
+static const struct net_device_ops cpc_netdev_ops = {
+	.ndo_open		= cpc_open,
+	.ndo_stop		= cpc_close,
+	.ndo_tx_timeout		= cpc_tx_timeout,
+	.ndo_set_mac_address	= NULL,
+	.ndo_change_mtu		= cpc_change_mtu,
+	.ndo_do_ioctl		= cpc_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static void cpc_init_card(pc300_t * card)
+{
+	int i, devcount = 0;
+	static int board_nbr = 1;
+
+	/* Enable interrupts on the PCI bridge */
+	plx_init(card);
+	cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
+		   cpc_readw(card->hw.plxbase + card->hw.intctl_reg) | 0x0040);
+
+#ifdef USE_PCI_CLOCK
+	/* Set board clock to PCI clock */
+	cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
+		   cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) | 0x00000004UL);
+	card->hw.clock = PC300_PCI_CLOCK;
+#else
+	/* Set board clock to internal oscillator clock */
+	cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
+		   cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & ~0x00000004UL);
+	card->hw.clock = PC300_OSC_CLOCK;
+#endif
+
+	/* Detect actual on-board RAM size */
+	card->hw.ramsize = detect_ram(card);
+
+	/* Set Global SCA-II registers */
+	cpc_writeb(card->hw.scabase + PCR, PCR_PR2);
+	cpc_writeb(card->hw.scabase + BTCR, 0x10);
+	cpc_writeb(card->hw.scabase + WCRL, 0);
+	cpc_writeb(card->hw.scabase + DMER, 0x80);
+
+	if (card->hw.type == PC300_TE) {
+		u8 reg1;
+
+		/* Check CPLD version */
+		reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
+		cpc_writeb(card->hw.falcbase + CPLD_REG1, (reg1 + 0x5a));
+		if (cpc_readb(card->hw.falcbase + CPLD_REG1) == reg1) {
+			/* New CPLD */
+			card->hw.cpld_id = cpc_readb(card->hw.falcbase + CPLD_ID_REG);
+			card->hw.cpld_reg1 = CPLD_V2_REG1;
+			card->hw.cpld_reg2 = CPLD_V2_REG2;
+		} else {
+			/* old CPLD */
+			card->hw.cpld_id = 0;
+			card->hw.cpld_reg1 = CPLD_REG1;
+			card->hw.cpld_reg2 = CPLD_REG2;
+			cpc_writeb(card->hw.falcbase + CPLD_REG1, reg1);
+		}
+
+		/* Enable the board's global clock */
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
+			   cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
+			   CPLD_REG1_GLOBAL_CLK);
+
+	}
+
+	for (i = 0; i < card->hw.nchan; i++) {
+		pc300ch_t *chan = &card->chan[i];
+		pc300dev_t *d = &chan->d;
+		hdlc_device *hdlc;
+		struct net_device *dev;
+
+		chan->card = card;
+		chan->channel = i;
+		chan->conf.phys_settings.clock_rate = 0;
+		chan->conf.phys_settings.clock_type = CLOCK_EXT;
+		chan->conf.proto_settings.encoding = ENCODING_NRZ;
+		chan->conf.proto_settings.parity = PARITY_CRC16_PR1_CCITT;
+		switch (card->hw.type) {
+			case PC300_TE:
+				chan->conf.media = IF_IFACE_T1;
+				chan->conf.lcode = PC300_LC_B8ZS;
+				chan->conf.fr_mode = PC300_FR_ESF;
+				chan->conf.lbo = PC300_LBO_0_DB;
+				chan->conf.rx_sens = PC300_RX_SENS_SH;
+				chan->conf.tslot_bitmap = 0xffffffffUL;
+				break;
+
+			case PC300_X21:
+				chan->conf.media = IF_IFACE_X21;
+				break;
+
+			case PC300_RSV:
+			default:
+				chan->conf.media = IF_IFACE_V35;
+				break;
+		}
+		chan->conf.proto = IF_PROTO_PPP;
+		chan->tx_first_bd = 0;
+		chan->tx_next_bd = 0;
+		chan->rx_first_bd = 0;
+		chan->rx_last_bd = N_DMA_RX_BUF - 1;
+		chan->nfree_tx_bd = N_DMA_TX_BUF;
+
+		d->chan = chan;
+		d->trace_on = 0;
+		d->line_on = 0;
+		d->line_off = 0;
+
+		dev = alloc_hdlcdev(d);
+		if (dev == NULL)
+			continue;
+
+		hdlc = dev_to_hdlc(dev);
+		hdlc->xmit = cpc_queue_xmit;
+		hdlc->attach = cpc_attach;
+		d->dev = dev;
+		dev->mem_start = card->hw.ramphys;
+		dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1;
+		dev->irq = card->hw.irq;
+		dev->tx_queue_len = PC300_TX_QUEUE_LEN;
+		dev->mtu = PC300_DEF_MTU;
+
+		dev->netdev_ops = &cpc_netdev_ops;
+		dev->watchdog_timeo = PC300_TX_TIMEOUT;
+
+		if (register_hdlc_device(dev) == 0) {
+			printk("%s: Cyclades-PC300/", dev->name);
+			switch (card->hw.type) {
+				case PC300_TE:
+					if (card->hw.bus == PC300_PMC) {
+						printk("TE-M");
+					} else {
+						printk("TE  ");
+					}
+					break;
+
+				case PC300_X21:
+					printk("X21 ");
+					break;
+
+				case PC300_RSV:
+				default:
+					printk("RSV ");
+					break;
+			}
+			printk (" #%d, %dKB of RAM at 0x%08x, IRQ%d, channel %d.\n",
+				 board_nbr, card->hw.ramsize / 1024,
+				 card->hw.ramphys, card->hw.irq, i + 1);
+			devcount++;
+		} else {
+			printk ("Dev%d on card(0x%08x): unable to allocate i/f name.\n",
+				 i + 1, card->hw.ramphys);
+			free_netdev(dev);
+			continue;
+		}
+	}
+	spin_lock_init(&card->card_lock);
+
+	board_nbr++;
+}
+
+static int __devinit
+cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err, eeprom_outdated = 0;
+	u16 device_id;
+	pc300_t *card;
+
+	if ((err = pci_enable_device(pdev)) < 0)
+		return err;
+
+	card = kzalloc(sizeof(pc300_t), GFP_KERNEL);
+	if (card == NULL) {
+		printk("PC300 found at RAM 0x%016llx, "
+		       "but could not allocate card structure.\n",
+		       (unsigned long long)pci_resource_start(pdev, 3));
+		err = -ENOMEM;
+		goto err_disable_dev;
+	}
+
+	err = -ENODEV;
+
+	/* read PCI configuration area */
+	device_id = ent->device;
+	card->hw.irq = pdev->irq;
+	card->hw.iophys = pci_resource_start(pdev, 1);
+	card->hw.iosize = pci_resource_len(pdev, 1);
+	card->hw.scaphys = pci_resource_start(pdev, 2);
+	card->hw.scasize = pci_resource_len(pdev, 2);
+	card->hw.ramphys = pci_resource_start(pdev, 3);
+	card->hw.alloc_ramsize = pci_resource_len(pdev, 3);
+	card->hw.falcphys = pci_resource_start(pdev, 4);
+	card->hw.falcsize = pci_resource_len(pdev, 4);
+	card->hw.plxphys = pci_resource_start(pdev, 5);
+	card->hw.plxsize = pci_resource_len(pdev, 5);
+
+	switch (device_id) {
+		case PCI_DEVICE_ID_PC300_RX_1:
+		case PCI_DEVICE_ID_PC300_TE_1:
+		case PCI_DEVICE_ID_PC300_TE_M_1:
+			card->hw.nchan = 1;
+			break;
+
+		case PCI_DEVICE_ID_PC300_RX_2:
+		case PCI_DEVICE_ID_PC300_TE_2:
+		case PCI_DEVICE_ID_PC300_TE_M_2:
+		default:
+			card->hw.nchan = PC300_MAXCHAN;
+			break;
+	}
+#ifdef PC300_DEBUG_PCI
+	printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn);
+	printk("rev_id=%d) IRQ%d\n", pdev->revision, card->hw.irq);
+	printk("cpc:found  ramaddr=0x%08lx plxaddr=0x%08lx "
+	       "ctladdr=0x%08lx falcaddr=0x%08lx\n",
+	       card->hw.ramphys, card->hw.plxphys, card->hw.scaphys,
+	       card->hw.falcphys);
+#endif
+	/* Although we don't use this I/O region, we should
+	 * request it from the kernel anyway, to avoid problems
+	 * with other drivers accessing it. */
+	if (!request_region(card->hw.iophys, card->hw.iosize, "PLX Registers")) {
+		/* In case we can't allocate it, warn user */
+		printk("WARNING: couldn't allocate I/O region for PC300 board "
+		       "at 0x%08x!\n", card->hw.ramphys);
+	}
+
+	if (card->hw.plxphys) {
+		pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, card->hw.plxphys);
+	} else {
+		eeprom_outdated = 1;
+		card->hw.plxphys = pci_resource_start(pdev, 0);
+		card->hw.plxsize = pci_resource_len(pdev, 0);
+	}
+
+	if (!request_mem_region(card->hw.plxphys, card->hw.plxsize,
+				"PLX Registers")) {
+		printk("PC300 found at RAM 0x%08x, "
+		       "but could not allocate PLX mem region.\n",
+		       card->hw.ramphys);
+		goto err_release_io;
+	}
+	if (!request_mem_region(card->hw.ramphys, card->hw.alloc_ramsize,
+				"On-board RAM")) {
+		printk("PC300 found at RAM 0x%08x, "
+		       "but could not allocate RAM mem region.\n",
+		       card->hw.ramphys);
+		goto err_release_plx;
+	}
+	if (!request_mem_region(card->hw.scaphys, card->hw.scasize,
+				"SCA-II Registers")) {
+		printk("PC300 found at RAM 0x%08x, "
+		       "but could not allocate SCA mem region.\n",
+		       card->hw.ramphys);
+		goto err_release_ram;
+	}
+
+	card->hw.plxbase = ioremap(card->hw.plxphys, card->hw.plxsize);
+	card->hw.rambase = ioremap(card->hw.ramphys, card->hw.alloc_ramsize);
+	card->hw.scabase = ioremap(card->hw.scaphys, card->hw.scasize);
+	switch (device_id) {
+		case PCI_DEVICE_ID_PC300_TE_1:
+		case PCI_DEVICE_ID_PC300_TE_2:
+		case PCI_DEVICE_ID_PC300_TE_M_1:
+		case PCI_DEVICE_ID_PC300_TE_M_2:
+			request_mem_region(card->hw.falcphys, card->hw.falcsize,
+					   "FALC Registers");
+			card->hw.falcbase = ioremap(card->hw.falcphys, card->hw.falcsize);
+			break;
+
+		case PCI_DEVICE_ID_PC300_RX_1:
+		case PCI_DEVICE_ID_PC300_RX_2:
+		default:
+			card->hw.falcbase = NULL;
+			break;
+	}
+
+#ifdef PC300_DEBUG_PCI
+	printk("cpc: relocate ramaddr=0x%08lx plxaddr=0x%08lx "
+	       "ctladdr=0x%08lx falcaddr=0x%08lx\n",
+	       card->hw.rambase, card->hw.plxbase, card->hw.scabase,
+	       card->hw.falcbase);
+#endif
+
+	/* Set PCI drv pointer to the card structure */
+	pci_set_drvdata(pdev, card);
+
+	/* Set board type */
+	switch (device_id) {
+		case PCI_DEVICE_ID_PC300_TE_1:
+		case PCI_DEVICE_ID_PC300_TE_2:
+		case PCI_DEVICE_ID_PC300_TE_M_1:
+		case PCI_DEVICE_ID_PC300_TE_M_2:
+			card->hw.type = PC300_TE;
+
+			if ((device_id == PCI_DEVICE_ID_PC300_TE_M_1) ||
+			    (device_id == PCI_DEVICE_ID_PC300_TE_M_2)) {
+				card->hw.bus = PC300_PMC;
+				/* Set PLX register offsets */
+				card->hw.gpioc_reg = 0x54;
+				card->hw.intctl_reg = 0x4c;
+			} else {
+				card->hw.bus = PC300_PCI;
+				/* Set PLX register offsets */
+				card->hw.gpioc_reg = 0x50;
+				card->hw.intctl_reg = 0x4c;
+			}
+			break;
+
+		case PCI_DEVICE_ID_PC300_RX_1:
+		case PCI_DEVICE_ID_PC300_RX_2:
+		default:
+			card->hw.bus = PC300_PCI;
+			/* Set PLX register offsets */
+			card->hw.gpioc_reg = 0x50;
+			card->hw.intctl_reg = 0x4c;
+
+			if ((cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & PC300_CTYPE_MASK)) {
+				card->hw.type = PC300_X21;
+			} else {
+				card->hw.type = PC300_RSV;
+			}
+			break;
+	}
+
+	/* Allocate IRQ */
+	if (request_irq(card->hw.irq, cpc_intr, IRQF_SHARED, "Cyclades-PC300", card)) {
+		printk ("PC300 found at RAM 0x%08x, but could not allocate IRQ%d.\n",
+			 card->hw.ramphys, card->hw.irq);
+		goto err_io_unmap;
+	}
+
+	cpc_init_card(card);
+
+	if (eeprom_outdated)
+		printk("WARNING: PC300 with outdated EEPROM.\n");
+	return 0;
+
+err_io_unmap:
+	iounmap(card->hw.plxbase);
+	iounmap(card->hw.scabase);
+	iounmap(card->hw.rambase);
+	if (card->hw.type == PC300_TE) {
+		iounmap(card->hw.falcbase);
+		release_mem_region(card->hw.falcphys, card->hw.falcsize);
+	}
+	release_mem_region(card->hw.scaphys, card->hw.scasize);
+err_release_ram:
+	release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
+err_release_plx:
+	release_mem_region(card->hw.plxphys, card->hw.plxsize);
+err_release_io:
+	release_region(card->hw.iophys, card->hw.iosize);
+	kfree(card);
+err_disable_dev:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void __devexit cpc_remove_one(struct pci_dev *pdev)
+{
+	pc300_t *card = pci_get_drvdata(pdev);
+
+	if (card->hw.rambase) {
+		int i;
+
+		/* Disable interrupts on the PCI bridge */
+		cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
+			   cpc_readw(card->hw.plxbase + card->hw.intctl_reg) & ~(0x0040));
+
+		for (i = 0; i < card->hw.nchan; i++) {
+			unregister_hdlc_device(card->chan[i].d.dev);
+		}
+		iounmap(card->hw.plxbase);
+		iounmap(card->hw.scabase);
+		iounmap(card->hw.rambase);
+		release_mem_region(card->hw.plxphys, card->hw.plxsize);
+		release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
+		release_mem_region(card->hw.scaphys, card->hw.scasize);
+		release_region(card->hw.iophys, card->hw.iosize);
+		if (card->hw.type == PC300_TE) {
+			iounmap(card->hw.falcbase);
+			release_mem_region(card->hw.falcphys, card->hw.falcsize);
+		}
+		for (i = 0; i < card->hw.nchan; i++)
+			if (card->chan[i].d.dev)
+				free_netdev(card->chan[i].d.dev);
+		if (card->hw.irq)
+			free_irq(card->hw.irq, card);
+		kfree(card);
+		pci_disable_device(pdev);
+	}
+}
+
+static struct pci_driver cpc_driver = {
+	.name           = "pc300",
+	.id_table       = cpc_pci_dev_id,
+	.probe          = cpc_init_one,
+	.remove         = __devexit_p(cpc_remove_one),
+};
+
+static int __init cpc_init(void)
+{
+	show_version();
+	return pci_register_driver(&cpc_driver);
+}
+
+static void __exit cpc_cleanup_module(void)
+{
+	pci_unregister_driver(&cpc_driver);
+}
+
+module_init(cpc_init);
+module_exit(cpc_cleanup_module);
+
+MODULE_DESCRIPTION("Cyclades-PC300 cards driver");
+MODULE_AUTHOR(  "Author: Ivan Passos <ivan@cyclades.com>\r\n"
+                "Maintainer: PC300 Maintainer <pc300@cyclades.com");
+MODULE_LICENSE("GPL");
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_tty.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_tty.c
new file mode 100644
index 0000000..4709f42
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300_tty.c
@@ -0,0 +1,1079 @@
+/*
+ * pc300_tty.c	Cyclades-PC300(tm) TTY Driver.
+ *
+ * Author:	Regina Kodato <reginak@cyclades.com>
+ *
+ * Copyright:	(c) 1999-2002 Cyclades Corp.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *   
+ *  $Log: pc300_tty.c,v $
+ *  Revision 3.7  2002/03/07 14:17:09  henrique
+ *  License data fixed
+ *
+ *  Revision 3.6  2001/12/10 12:29:42  regina
+ *  Fix the MLPPP bug
+ *
+ *  Revision 3.5  2001/10/31 11:20:05  regina
+ *  automatic pppd starts
+ *
+ *  Revision 3.4  2001/08/06 12:01:51  regina
+ *  problem in DSR_DE bit
+ *
+ *  Revision 3.3  2001/07/26 22:58:41  regina
+ *  update EDA value
+ *
+ *  Revision 3.2  2001/07/12 13:11:20  regina
+ *  bug fix - DCD-OFF in pc300 tty driver
+ *
+ *	DMA transmission bug fix
+ *  
+ *  Revision 3.1  2001/06/22 13:13:02  regina
+ *  MLPPP implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/if.h>
+#include <linux/skbuff.h>
+/* TTY includes */
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include "pc300.h"
+
+/* defines and macros */
+/* TTY Global definitions */
+#define	CPC_TTY_NPORTS	8	/* maximum number of the sync tty connections */
+#define	CPC_TTY_MAJOR	CYCLADES_MAJOR	
+#define CPC_TTY_MINOR_START	240	/* minor of the first PC300 interface */
+
+#define CPC_TTY_MAX_MTU	2000	
+
+/* tty interface state */
+#define	CPC_TTY_ST_IDLE	0
+#define CPC_TTY_ST_INIT	1	/* configured with MLPPP and up */
+#define CPC_TTY_ST_OPEN	2	/* opened by application */
+
+#define	CPC_TTY_LOCK(card,flags)\
+	do {\
+		spin_lock_irqsave(&card->card_lock, flags);	\
+	} while (0)
+
+#define CPC_TTY_UNLOCK(card,flags)	\
+	do {\
+		spin_unlock_irqrestore(&card->card_lock, flags);	\
+	} while (0)
+
+//#define	CPC_TTY_DBG(format,a...)	printk(format,##a)
+#define	CPC_TTY_DBG(format,a...)
+
+/* data structures */
+typedef struct _st_cpc_rx_buf {
+	struct _st_cpc_rx_buf	*next;
+	int		size;
+	unsigned char	data[1];
+} st_cpc_rx_buf;
+
+struct st_cpc_rx_list {
+	st_cpc_rx_buf	*first;
+	st_cpc_rx_buf	*last;
+};
+
+typedef	struct _st_cpc_tty_area {
+	int		state;		/* state of the TTY interface */
+	int		num_open;	
+	unsigned int 	tty_minor;	/* minor this interface */
+	volatile struct st_cpc_rx_list buf_rx;	/* ptr. to reception buffer */
+	unsigned char*	buf_tx;		/* ptr. to transmission buffer */
+	pc300dev_t*	pc300dev;	/* ptr. to info struct in PC300 driver */
+	unsigned char	name[20];	/* interf. name + "-tty" */
+	struct tty_struct *tty;		
+	struct work_struct tty_tx_work; /* tx work - tx interrupt */
+	struct work_struct tty_rx_work; /* rx work - rx interrupt */
+	} st_cpc_tty_area;
+
+/* TTY data structures */
+static struct tty_driver serial_drv;
+
+/* local variables */
+static st_cpc_tty_area	cpc_tty_area[CPC_TTY_NPORTS];
+
+static int cpc_tty_cnt = 0;	/* number of intrfaces configured with MLPPP */
+static int cpc_tty_unreg_flag = 0;
+
+/* TTY functions prototype */
+static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
+static void cpc_tty_close(struct tty_struct *tty, struct file *flip);
+static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static int cpc_tty_write_room(struct tty_struct *tty);
+static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
+static void cpc_tty_flush_buffer(struct tty_struct *tty);
+static void cpc_tty_hangup(struct tty_struct *tty);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
+static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
+static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
+static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
+static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
+
+static int pc300_tiocmset(struct tty_struct *, unsigned int, unsigned int);
+static int pc300_tiocmget(struct tty_struct *);
+
+/* functions called by PC300 driver */
+void cpc_tty_init(pc300dev_t *dev);
+void cpc_tty_unregister_service(pc300dev_t *pc300dev);
+void cpc_tty_receive(pc300dev_t *pc300dev);
+void cpc_tty_trigger_poll(pc300dev_t *pc300dev);
+
+/*
+ * PC300 TTY clear "signal"
+ */
+static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char signal)
+{
+	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; 
+	pc300_t *card = (pc300_t *) pc300chan->card; 
+	int ch = pc300chan->channel; 
+	unsigned long flags; 
+
+	CPC_TTY_DBG("%s-tty: Clear signal %x\n",
+		pc300dev->dev->name, signal);
+	CPC_TTY_LOCK(card, flags); 
+	cpc_writeb(card->hw.scabase + M_REG(CTL,ch), 
+		cpc_readb(card->hw.scabase+M_REG(CTL,ch))& signal);
+	CPC_TTY_UNLOCK(card,flags); 
+}
+
+/*
+ * PC300 TTY set "signal" to ON
+ */
+static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
+{
+	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; 
+	pc300_t *card = (pc300_t *) pc300chan->card; 
+	int ch = pc300chan->channel; 
+	unsigned long flags; 
+
+	CPC_TTY_DBG("%s-tty: Set signal %x\n",
+		pc300dev->dev->name, signal);
+	CPC_TTY_LOCK(card, flags); 
+	cpc_writeb(card->hw.scabase + M_REG(CTL,ch), 
+		cpc_readb(card->hw.scabase+M_REG(CTL,ch))& ~signal);
+	CPC_TTY_UNLOCK(card,flags); 
+}
+
+
+static const struct tty_operations pc300_ops = {
+	.open = cpc_tty_open,
+	.close = cpc_tty_close,
+	.write = cpc_tty_write,
+	.write_room = cpc_tty_write_room,
+	.chars_in_buffer = cpc_tty_chars_in_buffer,
+	.tiocmset = pc300_tiocmset,
+	.tiocmget = pc300_tiocmget,
+	.flush_buffer = cpc_tty_flush_buffer,
+	.hangup = cpc_tty_hangup,
+};
+
+
+/*
+ * PC300 TTY initialization routine
+ *
+ * This routine is called by the PC300 driver during board configuration 
+ * (ioctl=SIOCSP300CONF). At this point the adapter is completely
+ * initialized.
+ * o verify kernel version (only 2.4.x)
+ * o register TTY driver
+ * o init cpc_tty_area struct
+ */
+void cpc_tty_init(pc300dev_t *pc300dev)
+{
+	unsigned long port;
+	int aux;
+	st_cpc_tty_area * cpc_tty;
+
+	/* hdlcX - X=interface number */
+	port = pc300dev->dev->name[4] - '0';
+	if (port >= CPC_TTY_NPORTS) {
+		printk("%s-tty: invalid interface selected (0-%i): %li",
+			pc300dev->dev->name,
+			CPC_TTY_NPORTS-1,port);
+		return;
+	}
+
+	if (cpc_tty_cnt == 0) { /* first TTY connection -> register driver */
+		CPC_TTY_DBG("%s-tty: driver init, major:%i, minor range:%i=%i\n",
+			pc300dev->dev->name,
+			CPC_TTY_MAJOR, CPC_TTY_MINOR_START,
+			CPC_TTY_MINOR_START+CPC_TTY_NPORTS);
+		/* initialize tty driver struct */
+		memset(&serial_drv,0,sizeof(struct tty_driver));
+		serial_drv.magic = TTY_DRIVER_MAGIC;
+		serial_drv.owner = THIS_MODULE;
+		serial_drv.driver_name = "pc300_tty";
+		serial_drv.name = "ttyCP";
+		serial_drv.major = CPC_TTY_MAJOR;
+		serial_drv.minor_start = CPC_TTY_MINOR_START;
+		serial_drv.num = CPC_TTY_NPORTS;
+		serial_drv.type = TTY_DRIVER_TYPE_SERIAL;
+		serial_drv.subtype = SERIAL_TYPE_NORMAL;
+
+		serial_drv.init_termios = tty_std_termios;
+		serial_drv.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
+		serial_drv.flags = TTY_DRIVER_REAL_RAW;
+
+		/* interface routines from the upper tty layer to the tty driver */
+		tty_set_operations(&serial_drv, &pc300_ops);
+
+		/* register the TTY driver */
+		if (tty_register_driver(&serial_drv)) { 
+			printk("%s-tty: Failed to register serial driver! ",
+				pc300dev->dev->name);
+		   	return;
+		} 
+
+		memset((void *)cpc_tty_area, 0,
+								sizeof(st_cpc_tty_area) * CPC_TTY_NPORTS);
+	}
+
+	cpc_tty = &cpc_tty_area[port];
+	
+	if (cpc_tty->state != CPC_TTY_ST_IDLE) {
+		CPC_TTY_DBG("%s-tty: TTY port %i, already in use.\n",
+				pc300dev->dev->name, port);
+		return;
+	}
+
+	cpc_tty_cnt++;
+	cpc_tty->state = CPC_TTY_ST_INIT; 
+	cpc_tty->num_open= 0;
+	cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
+	cpc_tty->pc300dev = pc300dev; 
+
+	INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+	INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
+	
+	cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
+
+	pc300dev->cpc_tty = (void *)cpc_tty; 
+	
+	aux = strlen(pc300dev->dev->name);
+	memcpy(cpc_tty->name, pc300dev->dev->name, aux);
+	memcpy(&cpc_tty->name[aux], "-tty", 5);
+	
+	cpc_open(pc300dev->dev);
+	cpc_tty_signal_off(pc300dev, CTL_DTR);
+
+	CPC_TTY_DBG("%s: Initializing TTY Sync Driver, tty major#%d minor#%i\n",
+			cpc_tty->name,CPC_TTY_MAJOR,cpc_tty->tty_minor); 
+	return; 
+} 
+
+/*
+ * PC300 TTY OPEN routine
+ *
+ * This routine is called by the tty driver to open the interface 
+ * o verify minor
+ * o allocate buffer to Rx and Tx
+ */
+static int cpc_tty_open(struct tty_struct *tty, struct file *flip)
+{
+	int port ;
+	st_cpc_tty_area *cpc_tty;
+
+	if (!tty) { 
+		return -ENODEV;
+	} 
+
+	port = tty->index;
+
+	if ((port < 0) || (port >= CPC_TTY_NPORTS)){ 
+		CPC_TTY_DBG("pc300_tty: open invalid port %d\n", port);
+		return -ENODEV;
+	} 
+
+	cpc_tty = &cpc_tty_area[port];
+	
+	if (cpc_tty->state == CPC_TTY_ST_IDLE){
+		CPC_TTY_DBG("%s: open - invalid interface, port=%d\n",
+					cpc_tty->name, tty->index);
+		return -ENODEV;
+	}
+
+	if (cpc_tty->num_open == 0) { /* first open of this tty */
+		if (!cpc_tty_area[port].buf_tx){
+			cpc_tty_area[port].buf_tx = kmalloc(CPC_TTY_MAX_MTU,GFP_KERNEL);
+			if (!cpc_tty_area[port].buf_tx) {
+				CPC_TTY_DBG("%s: error in memory allocation\n",cpc_tty->name);
+				return -ENOMEM;
+			}
+		} 
+
+		if (cpc_tty_area[port].buf_rx.first) {
+			unsigned char * aux;
+			while (cpc_tty_area[port].buf_rx.first) {
+				aux = (unsigned char *)cpc_tty_area[port].buf_rx.first;
+				cpc_tty_area[port].buf_rx.first = cpc_tty_area[port].buf_rx.first->next;
+				kfree(aux);
+			}
+			cpc_tty_area[port].buf_rx.first = NULL;
+			cpc_tty_area[port].buf_rx.last = NULL;
+		}
+
+		cpc_tty_area[port].state = CPC_TTY_ST_OPEN;
+		cpc_tty_area[port].tty = tty;
+		tty->driver_data = &cpc_tty_area[port];
+
+		cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
+	} 
+
+	cpc_tty->num_open++;
+
+	CPC_TTY_DBG("%s: opening TTY driver\n", cpc_tty->name);
+	
+	/* avisar driver PC300 */ 
+	return 0; 
+}
+
+/*
+ * PC300 TTY CLOSE routine
+ *
+ * This routine is called by the tty driver to close the interface 
+ * o call close channel in PC300 driver (cpc_closech)
+ * o free Rx and Tx buffers
+ */
+
+static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
+{
+	st_cpc_tty_area    *cpc_tty;
+	unsigned long flags;
+	int res;
+
+	if (!tty || !tty->driver_data ) {
+		CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
+		return;
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+	if ((cpc_tty->tty != tty)|| (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+		CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+		return;
+	}
+   	
+	if (!cpc_tty->num_open) {
+		CPC_TTY_DBG("%s: TTY is closed\n",cpc_tty->name);
+		return;
+	}
+
+	if (--cpc_tty->num_open > 0) {
+		CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
+		return;
+	}
+
+	cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+
+	CPC_TTY_LOCK(cpc_tty->pc300dev->chan->card, flags);  /* lock irq */ 
+	cpc_tty->tty = NULL;
+	cpc_tty->state = CPC_TTY_ST_INIT;
+	CPC_TTY_UNLOCK(cpc_tty->pc300dev->chan->card, flags); /* unlock irq */ 
+	
+	if (cpc_tty->buf_rx.first) {
+		unsigned char * aux;
+		while (cpc_tty->buf_rx.first) {
+			aux = (unsigned char *)cpc_tty->buf_rx.first;
+			cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
+			kfree(aux);
+		}
+		cpc_tty->buf_rx.first = NULL;
+		cpc_tty->buf_rx.last = NULL;
+	}
+	
+	kfree(cpc_tty->buf_tx);
+	cpc_tty->buf_tx = NULL;
+
+	CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
+	
+	if (!serial_drv.refcount && cpc_tty_unreg_flag) {
+		cpc_tty_unreg_flag = 0;
+		CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+		if ((res=tty_unregister_driver(&serial_drv))) { 
+			CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+							cpc_tty->name,res);
+		}
+	}
+	return; 
+} 
+
+/*
+ * PC300 TTY WRITE routine
+ *
+ * This routine is called by the tty driver to write a series of characters
+ * to the tty device. The characters may come from user or kernel space.
+ * o verify the DCD signal
+ * o send characters to board and start the transmission
+ */
+static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	st_cpc_tty_area    *cpc_tty; 
+	pc300ch_t *pc300chan; 
+	pc300_t *card; 
+	int ch; 
+	unsigned long flags; 
+	struct net_device_stats *stats; 
+
+	if (!tty || !tty->driver_data ) { 
+		CPC_TTY_DBG("hdlcX-tty: no TTY in write\n");
+		return -ENODEV;
+	} 
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if ((cpc_tty->tty != tty) ||  (cpc_tty->state != CPC_TTY_ST_OPEN)) { 
+		CPC_TTY_DBG("%s: TTY is not opened\n", cpc_tty->name);
+		return -ENODEV; 
+	}
+
+	if (count > CPC_TTY_MAX_MTU) { 
+		CPC_TTY_DBG("%s: count is invalid\n",cpc_tty->name);
+		return -EINVAL;        /* frame too big */ 
+	}
+
+	CPC_TTY_DBG("%s: cpc_tty_write data len=%i\n",cpc_tty->name,count);
+	
+	pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan; 
+	stats = &cpc_tty->pc300dev->dev->stats;
+	card = (pc300_t *) pc300chan->card;
+	ch = pc300chan->channel; 
+
+	/* verify DCD signal*/ 
+	if (cpc_readb(card->hw.scabase + M_REG(ST3,ch)) & ST3_DCD) { 
+		/* DCD is OFF */ 
+		CPC_TTY_DBG("%s : DCD is OFF\n", cpc_tty->name);
+		stats->tx_errors++;
+		stats->tx_carrier_errors++;
+		CPC_TTY_LOCK(card, flags); 
+		cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR); 
+		
+		if (card->hw.type == PC300_TE) { 
+			cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, 
+				cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & 
+				~(CPLD_REG2_FALC_LED1 << (2 *ch))); 
+		}
+
+		CPC_TTY_UNLOCK(card, flags); 
+
+		return -EINVAL; 
+	}
+
+	if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*)buf, count)) { 
+	   /* failed to send */
+	   CPC_TTY_DBG("%s: trasmition error\n", cpc_tty->name);
+	   return 0;
+	}
+	return count; 
+} 
+
+/*
+ * PC300 TTY Write Room routine
+ * 
+ * This routine returns the numbers of characteres the tty driver will accept
+ * for queuing to be written. 
+ * o return MTU
+ */
+static int cpc_tty_write_room(struct tty_struct *tty)
+{
+	st_cpc_tty_area    *cpc_tty; 
+
+	if (!tty || !tty->driver_data ) { 
+		CPC_TTY_DBG("hdlcX-tty: no TTY to write room\n");
+		return -ENODEV;
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if ((cpc_tty->tty != tty) ||  (cpc_tty->state != CPC_TTY_ST_OPEN)) { 
+		CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+		return -ENODEV; 
+	}
+   	
+	CPC_TTY_DBG("%s: write room\n",cpc_tty->name);
+	
+	return CPC_TTY_MAX_MTU;
+} 
+
+/*
+ * PC300 TTY chars in buffer routine
+ * 
+ * This routine returns the chars number in the transmission buffer 
+ * o returns 0
+ */
+static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	st_cpc_tty_area    *cpc_tty; 
+
+	if (!tty || !tty->driver_data ) {
+		CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
+		return -ENODEV; 
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) { 
+		CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+		return -ENODEV; 
+	}
+   
+	return 0;
+} 
+
+static int pc300_tiocmset(struct tty_struct *tty,
+			  unsigned int set, unsigned int clear)
+{
+	st_cpc_tty_area    *cpc_tty; 
+
+	CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear);
+
+	if (!tty || !tty->driver_data ) {
+	   	CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");	
+		return -ENODEV; 
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if (set & TIOCM_RTS)
+		cpc_tty_signal_on(cpc_tty->pc300dev, CTL_RTS);
+	if (set & TIOCM_DTR)
+		cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
+
+	if (clear & TIOCM_RTS)
+		cpc_tty_signal_off(cpc_tty->pc300dev, CTL_RTS);
+	if (clear & TIOCM_DTR)
+		cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+
+	return 0;
+}
+
+static int pc300_tiocmget(struct tty_struct *tty)
+{
+	unsigned int result;
+	unsigned char status;
+	unsigned long flags;
+	st_cpc_tty_area  *cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+	pc300dev_t *pc300dev = cpc_tty->pc300dev;
+	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
+	pc300_t *card = (pc300_t *) pc300chan->card;
+	int ch = pc300chan->channel;
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data;
+
+	CPC_TTY_DBG("%s-tty: tiocmget\n",
+		((struct net_device*)(pc300dev->hdlc))->name);
+
+	CPC_TTY_LOCK(card, flags);
+	status = cpc_readb(card->hw.scabase+M_REG(CTL,ch));
+	CPC_TTY_UNLOCK(card,flags);
+
+	result = ((status & CTL_DTR) ? TIOCM_DTR : 0) |
+		 ((status & CTL_RTS) ? TIOCM_RTS : 0);
+
+	return result;
+}
+
+/*
+ * PC300 TTY Flush Buffer routine
+ *
+ * This routine resets the transmission buffer 
+ */
+static void cpc_tty_flush_buffer(struct tty_struct *tty)
+{ 
+	st_cpc_tty_area    *cpc_tty; 
+	
+	if (!tty || !tty->driver_data ) {
+	   	CPC_TTY_DBG("hdlcX-tty: no TTY to flush buffer\n");	
+		return; 
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if ((cpc_tty->tty != tty) ||  (cpc_tty->state != CPC_TTY_ST_OPEN)) { 
+		CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+		return; 
+	}
+
+	CPC_TTY_DBG("%s: call wake_up_interruptible\n",cpc_tty->name);
+
+	tty_wakeup(tty);	
+	return; 
+} 
+
+/*
+ * PC300 TTY Hangup routine
+ *
+ * This routine is called by the tty driver to hangup the interface 
+ * o clear DTR signal
+ */
+
+static void cpc_tty_hangup(struct tty_struct *tty)
+{ 
+	st_cpc_tty_area    *cpc_tty; 
+	int res;
+
+	if (!tty || !tty->driver_data ) {
+		CPC_TTY_DBG("hdlcX-tty: no TTY to hangup\n");	
+		return ; 
+	}
+
+	cpc_tty = (st_cpc_tty_area *) tty->driver_data; 
+
+	if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
+		CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
+		return ;
+	}
+	if (!serial_drv.refcount && cpc_tty_unreg_flag) {
+		cpc_tty_unreg_flag = 0;
+		CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+		if ((res=tty_unregister_driver(&serial_drv))) { 
+			CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+							cpc_tty->name,res);
+		}
+	}
+	cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
+}
+
+/*
+ * PC300 TTY RX work routine
+ * This routine treats RX work
+ * o verify read buffer
+ * o call the line disc. read
+ * o free memory
+ */
+static void cpc_tty_rx_work(struct work_struct *work)
+{
+	st_cpc_tty_area *cpc_tty;
+	unsigned long port;
+	int i, j;
+	volatile st_cpc_rx_buf *buf;
+	char flags=0,flg_rx=1; 
+	struct tty_ldisc *ld;
+
+	if (cpc_tty_cnt == 0) return;
+	
+	for (i=0; (i < 4) && flg_rx ; i++) {
+		flg_rx = 0;
+
+		cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+		port = cpc_tty - cpc_tty_area;
+
+		for (j=0; j < CPC_TTY_NPORTS; j++) {
+			cpc_tty = &cpc_tty_area[port];
+		
+			if ((buf=cpc_tty->buf_rx.first) != NULL) {
+				if (cpc_tty->tty) {
+					ld = tty_ldisc_ref(cpc_tty->tty);
+					if (ld) {
+						if (ld->ops->receive_buf) {
+							CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
+							ld->ops->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
+						}
+						tty_ldisc_deref(ld);
+					}
+				}	
+				cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
+				kfree((void *)buf);
+				buf = cpc_tty->buf_rx.first;
+				flg_rx = 1;
+			}
+			if (++port == CPC_TTY_NPORTS) port = 0;
+		}
+	}
+} 
+
+/*
+ * PC300 TTY RX work routine
+ *
+ * This routine treats RX interrupt. 
+ * o read all frames in card
+ * o verify the frame size
+ * o read the frame in rx buffer
+ */
+static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
+{
+	volatile pcsca_bd_t __iomem * ptdescr; 
+	volatile unsigned char status; 
+	pc300_t *card = (pc300_t *)pc300chan->card; 
+	int ch = pc300chan->channel; 
+
+	/* dma buf read */ 
+	ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + 
+				RX_BD_ADDR(ch, pc300chan->rx_first_bd)); 
+	while (pc300chan->rx_first_bd != pc300chan->rx_last_bd) { 
+		status = cpc_readb(&ptdescr->status); 
+		cpc_writeb(&ptdescr->status, 0); 
+		cpc_writeb(&ptdescr->len, 0); 
+		pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & 
+					(N_DMA_RX_BUF - 1); 
+		if (status & DST_EOM) { 
+			break; /* end of message */
+		}
+		ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + cpc_readl(&ptdescr->next)); 
+	}
+}
+
+void cpc_tty_receive(pc300dev_t *pc300dev)
+{
+	st_cpc_tty_area *cpc_tty; 
+	pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; 
+	pc300_t *card = (pc300_t *)pc300chan->card; 
+	int ch = pc300chan->channel; 
+	volatile pcsca_bd_t  __iomem * ptdescr; 
+	struct net_device_stats *stats = &pc300dev->dev->stats;
+	int rx_len, rx_aux; 
+	volatile unsigned char status; 
+	unsigned short first_bd = pc300chan->rx_first_bd;
+	st_cpc_rx_buf *new = NULL;
+	unsigned char dsr_rx;
+
+	if (pc300dev->cpc_tty == NULL) { 
+		return; 
+	}
+
+	dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch));
+
+	cpc_tty = pc300dev->cpc_tty;
+
+	while (1) { 
+		rx_len = 0;
+		ptdescr = (pcsca_bd_t  __iomem *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd));
+		while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+			rx_len += cpc_readw(&ptdescr->len);
+			first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
+			if (status & DST_EOM) {
+				break;
+			}
+			ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
+		}
+			
+		if (!rx_len) { 
+			if (dsr_rx & DSR_BOF) {
+				/* update EDA */ 
+				cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 
+						RX_BD_ADDR(ch, pc300chan->rx_last_bd)); 
+			}
+			kfree(new);
+			return; 
+		}
+		
+		if (rx_len > CPC_TTY_MAX_MTU) { 
+			/* Free RX descriptors */ 
+			CPC_TTY_DBG("%s: frame size is invalid.\n",cpc_tty->name);
+			stats->rx_errors++; 
+			stats->rx_frame_errors++; 
+			cpc_tty_rx_disc_frame(pc300chan);
+			continue;
+		} 
+		
+		new = kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
+		if (!new) {
+			cpc_tty_rx_disc_frame(pc300chan);
+			continue;
+		}
+		
+		/* dma buf read */ 
+		ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + 
+				RX_BD_ADDR(ch, pc300chan->rx_first_bd)); 
+
+		rx_len = 0;	/* counter frame size */
+		
+		while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
+			rx_aux = cpc_readw(&ptdescr->len);
+			if ((status & (DST_OVR | DST_CRC | DST_RBIT |  DST_SHRT | DST_ABT))
+				|| (rx_aux > BD_DEF_LEN)) {
+				CPC_TTY_DBG("%s: reception error\n", cpc_tty->name);
+				stats->rx_errors++; 
+				if (status & DST_OVR) { 
+					stats->rx_fifo_errors++; 
+				}
+				if (status & DST_CRC) { 
+					stats->rx_crc_errors++; 
+				}
+				if ((status & (DST_RBIT | DST_SHRT | DST_ABT)) ||
+					(rx_aux > BD_DEF_LEN))	{ 
+					stats->rx_frame_errors++; 
+				} 
+				/* discard remainig descriptors used by the bad frame */ 
+				CPC_TTY_DBG("%s: reception error - discard descriptors",
+						cpc_tty->name);
+				cpc_tty_rx_disc_frame(pc300chan);
+				rx_len = 0;
+				kfree(new);
+				new = NULL;
+				break; /* read next frame - while(1) */
+			}
+
+			if (cpc_tty->state != CPC_TTY_ST_OPEN) {
+				/* Free RX descriptors */ 
+				cpc_tty_rx_disc_frame(pc300chan);
+				stats->rx_dropped++; 
+				rx_len = 0; 
+				kfree(new);
+				new = NULL;
+				break; /* read next frame - while(1) */
+			}
+
+			/* read the segment of the frame */
+			if (rx_aux != 0) {
+				memcpy_fromio((new->data + rx_len), 
+					(void __iomem *)(card->hw.rambase + 
+					 cpc_readl(&ptdescr->ptbuf)), rx_aux);
+				rx_len += rx_aux; 
+			}
+			cpc_writeb(&ptdescr->status,0); 
+			cpc_writeb(&ptdescr->len, 0); 
+			pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) & 
+					(N_DMA_RX_BUF -1); 
+			if (status & DST_EOM)break;
+			
+			ptdescr = (pcsca_bd_t __iomem *) (card->hw.rambase + 
+					cpc_readl(&ptdescr->next)); 
+		}
+		/* update pointer */ 
+		pc300chan->rx_last_bd = (pc300chan->rx_first_bd - 1) & 
+					(N_DMA_RX_BUF - 1) ; 
+		if (!(dsr_rx & DSR_BOF)) {
+			/* update EDA */ 
+			cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 
+					RX_BD_ADDR(ch, pc300chan->rx_last_bd)); 
+		}
+		if (rx_len != 0) { 
+			stats->rx_bytes += rx_len; 
+		
+			if (pc300dev->trace_on) { 
+				cpc_tty_trace(pc300dev, new->data,rx_len, 'R'); 
+			} 
+			new->size = rx_len;
+			new->next = NULL;
+			if (cpc_tty->buf_rx.first == NULL) {
+				cpc_tty->buf_rx.first = new;
+				cpc_tty->buf_rx.last = new;
+			} else {
+				cpc_tty->buf_rx.last->next = new;
+				cpc_tty->buf_rx.last = new;
+			}
+			schedule_work(&(cpc_tty->tty_rx_work));
+			stats->rx_packets++;
+		}
+	} 
+} 
+
+/*
+ * PC300 TTY TX work routine
+ * 
+ * This routine treats TX interrupt. 
+ * o if need call line discipline wakeup
+ * o call wake_up_interruptible
+ */
+static void cpc_tty_tx_work(struct work_struct *work)
+{
+	st_cpc_tty_area *cpc_tty =
+		container_of(work, st_cpc_tty_area, tty_tx_work);
+	struct tty_struct *tty; 
+
+	CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
+	
+	if ((tty = cpc_tty->tty) == NULL) { 
+		CPC_TTY_DBG("%s: the interface is not opened\n",cpc_tty->name);
+		return; 
+	}
+	tty_wakeup(tty);
+}
+
+/*
+ * PC300 TTY send to card routine
+ * 
+ * This routine send data to card. 
+ * o clear descriptors
+ * o write data to DMA buffers
+ * o start the transmission
+ */
+static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len)
+{
+	pc300ch_t *chan = (pc300ch_t *)dev->chan; 
+	pc300_t *card = (pc300_t *)chan->card; 
+	int ch = chan->channel; 
+	struct net_device_stats *stats = &dev->dev->stats;
+	unsigned long flags; 
+	volatile pcsca_bd_t __iomem *ptdescr; 
+	int i, nchar;
+	int tosend = len;
+	int nbuf = ((len - 1)/BD_DEF_LEN) + 1;
+	unsigned char *pdata=buf;
+
+	CPC_TTY_DBG("%s:cpc_tty_send_to_cars len=%i", 
+			(st_cpc_tty_area *)dev->cpc_tty->name,len);	
+
+	if (nbuf >= card->chan[ch].nfree_tx_bd) {
+		return 1;
+	}
+	
+	/* write buffer to DMA buffers */ 
+	CPC_TTY_DBG("%s: call dma_buf_write\n",
+			(st_cpc_tty_area *)dev->cpc_tty->name);	
+	for (i = 0 ; i < nbuf ; i++) {
+		ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + 
+			TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
+		nchar = (BD_DEF_LEN > tosend) ? tosend : BD_DEF_LEN;
+		if (cpc_readb(&ptdescr->status) & DST_OSB) {
+			memcpy_toio((void __iomem *)(card->hw.rambase + 
+				cpc_readl(&ptdescr->ptbuf)), 
+				&pdata[len - tosend], 
+				nchar);
+			card->chan[ch].nfree_tx_bd--;
+			if ((i + 1) == nbuf) {
+				/* This must be the last BD to be used */
+				cpc_writeb(&ptdescr->status, DST_EOM);
+			} else {
+				cpc_writeb(&ptdescr->status, 0);
+			}
+			cpc_writew(&ptdescr->len, nchar);
+		} else {
+			CPC_TTY_DBG("%s: error in dma_buf_write\n",
+					(st_cpc_tty_area *)dev->cpc_tty->name);	
+			stats->tx_dropped++;
+			return 1; 
+		}
+		tosend -= nchar;
+		card->chan[ch].tx_next_bd = 
+			(card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
+	}
+
+	if (dev->trace_on) { 
+		cpc_tty_trace(dev, buf, len,'T'); 
+	}
+
+	/* start transmission */ 
+	CPC_TTY_DBG("%s: start transmission\n",
+		(st_cpc_tty_area *)dev->cpc_tty->name);	
+	
+	CPC_TTY_LOCK(card, flags); 
+	cpc_writeb(card->hw.scabase + DTX_REG(EDAL, ch), 
+			TX_BD_ADDR(ch, chan->tx_next_bd)); 
+	cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA); 
+	cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE); 
+
+	if (card->hw.type == PC300_TE) { 
+		cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2, 
+			cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
+			(CPLD_REG2_FALC_LED1 << (2 * ch))); 
+	}
+	CPC_TTY_UNLOCK(card, flags); 
+	return 0; 
+} 
+
+/*
+ *	PC300 TTY trace routine
+ *
+ *  This routine send trace of connection to application. 
+ *  o clear descriptors
+ *  o write data to DMA buffers
+ *  o start the transmission
+ */
+
+static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx)
+{
+	struct sk_buff *skb; 
+
+	if ((skb = dev_alloc_skb(10 + len)) == NULL) { 
+		/* out of memory */ 
+		CPC_TTY_DBG("%s: tty_trace - out of memory\n", dev->dev->name);
+		return; 
+	}
+
+	skb_put (skb, 10 + len); 
+	skb->dev = dev->dev; 
+	skb->protocol = htons(ETH_P_CUST); 
+	skb_reset_mac_header(skb);
+	skb->pkt_type = PACKET_HOST; 
+	skb->len = 10 + len; 
+
+	skb_copy_to_linear_data(skb, dev->dev->name, 5);
+	skb->data[5] = '['; 
+	skb->data[6] = rxtx; 
+	skb->data[7] = ']'; 
+	skb->data[8] = ':'; 
+	skb->data[9] = ' '; 
+	skb_copy_to_linear_data_offset(skb, 10, buf, len);
+	netif_rx(skb); 
+} 	
+
+/*
+ *	PC300 TTY unregister service routine
+ *
+ *	This routine unregister one interface. 
+ */
+void cpc_tty_unregister_service(pc300dev_t *pc300dev)
+{
+	st_cpc_tty_area *cpc_tty; 
+	ulong flags;
+	int res;
+
+	if ((cpc_tty= (st_cpc_tty_area *) pc300dev->cpc_tty) == NULL) {
+		CPC_TTY_DBG("%s: interface is not TTY\n", pc300dev->dev->name);
+		return; 
+	}
+	CPC_TTY_DBG("%s: cpc_tty_unregister_service", cpc_tty->name);
+
+	if (cpc_tty->pc300dev != pc300dev) { 
+		CPC_TTY_DBG("%s: invalid tty ptr=%s\n", 
+		pc300dev->dev->name, cpc_tty->name);
+		return; 
+	}
+
+	if (--cpc_tty_cnt == 0) { 
+		if (serial_drv.refcount) {
+			CPC_TTY_DBG("%s: unregister is not possible, refcount=%d",
+							cpc_tty->name, serial_drv.refcount);
+			cpc_tty_cnt++;
+			cpc_tty_unreg_flag = 1;
+			return;
+		} else { 
+			CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
+			if ((res=tty_unregister_driver(&serial_drv))) { 
+				CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
+								cpc_tty->name,res);
+			}
+		}
+	}
+	CPC_TTY_LOCK(pc300dev->chan->card,flags);
+	cpc_tty->tty = NULL; 
+	CPC_TTY_UNLOCK(pc300dev->chan->card, flags);
+	cpc_tty->tty_minor = 0; 
+	cpc_tty->state = CPC_TTY_ST_IDLE; 
+} 
+
+/*
+ * PC300 TTY trigger poll routine
+ * This routine is called by pc300driver to treats Tx interrupt. 
+ */
+void cpc_tty_trigger_poll(pc300dev_t *pc300dev)
+{
+	st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty; 
+	if (!cpc_tty) {
+		return;
+	}
+	schedule_work(&(cpc_tty->tty_tx_work)); 
+} 
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300too.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300too.c
new file mode 100644
index 0000000..5fe246e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pc300too.c
@@ -0,0 +1,535 @@
+/*
+ * Cyclades PC300 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+ *
+ * Sources of information:
+ *    Hitachi HD64572 SCA-II User's Manual
+ *    Original Cyclades PC300 Linux driver
+ *
+ * This driver currently supports only PC300/RSV (V.24/V.35) and
+ * PC300/X21 cards.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hd64572.h"
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define PC300_PLX_SIZE		0x80    /* PLX control window size (128 B) */
+#define PC300_SCA_SIZE		0x400   /* SCA window size (1 KB) */
+#define MAX_TX_BUFFERS		10
+
+static int pci_clock_freq = 33000000;
+static int use_crystal_clock = 0;
+static unsigned int CLOCK_BASE;
+
+/* Masks to access the init_ctrl PLX register */
+#define PC300_CLKSEL_MASK	 (0x00000004UL)
+#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
+#define PC300_CTYPE_MASK	 (0x00000800UL)
+
+
+enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
+
+/*
+ *      PLX PCI9050-1 local configuration and shared runtime registers.
+ *      This structure can be used to access 9050 registers (memory mapped).
+ */
+typedef struct {
+	u32 loc_addr_range[4];	/* 00-0Ch : Local Address Ranges */
+	u32 loc_rom_range;	/* 10h : Local ROM Range */
+	u32 loc_addr_base[4];	/* 14-20h : Local Address Base Addrs */
+	u32 loc_rom_base;	/* 24h : Local ROM Base */
+	u32 loc_bus_descr[4];	/* 28-34h : Local Bus Descriptors */
+	u32 rom_bus_descr;	/* 38h : ROM Bus Descriptor */
+	u32 cs_base[4];		/* 3C-48h : Chip Select Base Addrs */
+	u32 intr_ctrl_stat;	/* 4Ch : Interrupt Control/Status */
+	u32 init_ctrl;		/* 50h : EEPROM ctrl, Init Ctrl, etc */
+}plx9050;
+
+
+
+typedef struct port_s {
+	struct napi_struct napi;
+	struct net_device *netdev;
+	struct card_s *card;
+	spinlock_t lock;	/* TX lock */
+	sync_serial_settings settings;
+	int rxpart;		/* partial frame received, next frame invalid*/
+	unsigned short encoding;
+	unsigned short parity;
+	unsigned int iface;
+	u16 rxin;		/* rx ring buffer 'in' pointer */
+	u16 txin;		/* tx ring buffer 'in' and 'last' pointers */
+	u16 txlast;
+	u8 rxs, txs, tmc;	/* SCA registers */
+	u8 chan;		/* physical port # - 0 or 1 */
+}port_t;
+
+
+
+typedef struct card_s {
+	int type;		/* RSV, X21, etc. */
+	int n_ports;		/* 1 or 2 ports */
+	u8 __iomem *rambase;	/* buffer memory base (virtual) */
+	u8 __iomem *scabase;	/* SCA memory base (virtual) */
+	plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */
+	u32 init_ctrl_value;	/* Saved value - 9050 bug workaround */
+	u16 rx_ring_buffers;	/* number of buffers in a ring */
+	u16 tx_ring_buffers;
+	u16 buff_offset;	/* offset of first buffer of first channel */
+	u8 irq;			/* interrupt request level */
+
+	port_t ports[2];
+}card_t;
+
+
+#define get_port(card, port)	     ((port) < (card)->n_ports ? \
+					 (&(card)->ports[port]) : (NULL))
+
+#include "hd64572.c"
+
+
+static void pc300_set_iface(port_t *port)
+{
+	card_t *card = port->card;
+	u32 __iomem * init_ctrl = &card->plxbase->init_ctrl;
+	u16 msci = get_msci(port);
+	u8 rxs = port->rxs & CLK_BRG_MASK;
+	u8 txs = port->txs & CLK_BRG_MASK;
+
+	sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
+		port->card);
+	switch(port->settings.clock_type) {
+	case CLOCK_INT:
+		rxs |= CLK_BRG; /* BRG output */
+		txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+		break;
+
+	case CLOCK_TXINT:
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
+		break;
+
+	case CLOCK_TXFROMRX:
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+		break;
+
+	default:		/* EXTernal clock */
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
+		break;
+	}
+
+	port->rxs = rxs;
+	port->txs = txs;
+	sca_out(rxs, msci + RXS, card);
+	sca_out(txs, msci + TXS, card);
+	sca_set_port(port);
+
+	if (port->card->type == PC300_RSV) {
+		if (port->iface == IF_IFACE_V35)
+			writel(card->init_ctrl_value |
+			       PC300_CHMEDIA_MASK(port->chan), init_ctrl);
+		else
+			writel(card->init_ctrl_value &
+			       ~PC300_CHMEDIA_MASK(port->chan), init_ctrl);
+	}
+}
+
+
+
+static int pc300_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+
+	int result = hdlc_open(dev);
+	if (result)
+		return result;
+
+	sca_open(dev);
+	pc300_set_iface(port);
+	return 0;
+}
+
+
+
+static int pc300_close(struct net_device *dev)
+{
+	sca_close(dev);
+	hdlc_close(dev);
+	return 0;
+}
+
+
+
+static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	int new_type;
+	port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+	if (cmd == SIOCDEVPRIVATE) {
+		sca_dump_rings(dev);
+		return 0;
+	}
+#endif
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	if (ifr->ifr_settings.type == IF_GET_IFACE) {
+		ifr->ifr_settings.type = port->iface;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(line, &port->settings, size))
+			return -EFAULT;
+		return 0;
+
+	}
+
+	if (port->card->type == PC300_X21 &&
+	    (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
+	     ifr->ifr_settings.type == IF_IFACE_X21))
+		new_type = IF_IFACE_X21;
+
+	else if (port->card->type == PC300_RSV &&
+		 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
+		  ifr->ifr_settings.type == IF_IFACE_V35))
+		new_type = IF_IFACE_V35;
+
+	else if (port->card->type == PC300_RSV &&
+		 ifr->ifr_settings.type == IF_IFACE_V24)
+		new_type = IF_IFACE_V24;
+
+	else
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (copy_from_user(&new_line, line, size))
+		return -EFAULT;
+
+	if (new_line.clock_type != CLOCK_EXT &&
+	    new_line.clock_type != CLOCK_TXFROMRX &&
+	    new_line.clock_type != CLOCK_INT &&
+	    new_line.clock_type != CLOCK_TXINT)
+		return -EINVAL;	/* No such clock setting */
+
+	if (new_line.loopback != 0 && new_line.loopback != 1)
+		return -EINVAL;
+
+	memcpy(&port->settings, &new_line, size); /* Update settings */
+	port->iface = new_type;
+	pc300_set_iface(port);
+	return 0;
+}
+
+
+
+static void pc300_pci_remove_one(struct pci_dev *pdev)
+{
+	int i;
+	card_t *card = pci_get_drvdata(pdev);
+
+	for (i = 0; i < 2; i++)
+		if (card->ports[i].card)
+			unregister_hdlc_device(card->ports[i].netdev);
+
+	if (card->irq)
+		free_irq(card->irq, card);
+
+	if (card->rambase)
+		iounmap(card->rambase);
+	if (card->scabase)
+		iounmap(card->scabase);
+	if (card->plxbase)
+		iounmap(card->plxbase);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	if (card->ports[0].netdev)
+		free_netdev(card->ports[0].netdev);
+	if (card->ports[1].netdev)
+		free_netdev(card->ports[1].netdev);
+	kfree(card);
+}
+
+static const struct net_device_ops pc300_ops = {
+	.ndo_open       = pc300_open,
+	.ndo_stop       = pc300_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = pc300_ioctl,
+};
+
+static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
+					const struct pci_device_id *ent)
+{
+	card_t *card;
+	u32 __iomem *p;
+	int i;
+	u32 ramsize;
+	u32 ramphys;		/* buffer memory base */
+	u32 scaphys;		/* SCA memory base */
+	u32 plxphys;		/* PLX registers memory base */
+
+	i = pci_enable_device(pdev);
+	if (i)
+		return i;
+
+	i = pci_request_regions(pdev, "PC300");
+	if (i) {
+		pci_disable_device(pdev);
+		return i;
+	}
+
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
+	if (card == NULL) {
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		return -ENOBUFS;
+	}
+	pci_set_drvdata(pdev, card);
+
+	if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
+	    pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
+	    pci_resource_len(pdev, 3) < 16384) {
+		pr_err("invalid card EEPROM parameters\n");
+		pc300_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
+
+	scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
+
+	ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->rambase = pci_ioremap_bar(pdev, 3);
+
+	if (card->plxbase == NULL ||
+	    card->scabase == NULL ||
+	    card->rambase == NULL) {
+		pr_err("ioremap() failed\n");
+		pc300_pci_remove_one(pdev);
+	}
+
+	/* PLX PCI 9050 workaround for local configuration register read bug */
+	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
+	card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
+	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
+
+	if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
+	    pdev->device == PCI_DEVICE_ID_PC300_TE_2)
+		card->type = PC300_TE; /* not fully supported */
+	else if (card->init_ctrl_value & PC300_CTYPE_MASK)
+		card->type = PC300_X21;
+	else
+		card->type = PC300_RSV;
+
+	if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
+	    pdev->device == PCI_DEVICE_ID_PC300_TE_1)
+		card->n_ports = 1;
+	else
+		card->n_ports = 2;
+
+	for (i = 0; i < card->n_ports; i++)
+		if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
+			pr_err("unable to allocate memory\n");
+			pc300_pci_remove_one(pdev);
+			return -ENOMEM;
+		}
+
+	/* Reset PLX */
+	p = &card->plxbase->init_ctrl;
+	writel(card->init_ctrl_value | 0x40000000, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	writel(card->init_ctrl_value, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	/* Reload Config. Registers from EEPROM */
+	writel(card->init_ctrl_value | 0x20000000, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	writel(card->init_ctrl_value, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	ramsize = sca_detect_ram(card, card->rambase,
+				 pci_resource_len(pdev, 3));
+
+	if (use_crystal_clock)
+		card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
+	else
+		card->init_ctrl_value |= PC300_CLKSEL_MASK;
+
+	writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
+	/* number of TX + RX buffers for one port */
+	i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
+	card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
+	card->rx_ring_buffers = i - card->tx_ring_buffers;
+
+	card->buff_offset = card->n_ports * sizeof(pkt_desc) *
+		(card->tx_ring_buffers + card->rx_ring_buffers);
+
+	pr_info("PC300/%s, %u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
+		card->type == PC300_X21 ? "X21" :
+		card->type == PC300_TE ? "TE" : "RSV",
+		ramsize / 1024, ramphys, pdev->irq,
+		card->tx_ring_buffers, card->rx_ring_buffers);
+
+	if (card->tx_ring_buffers < 1) {
+		pr_err("RAM test failed\n");
+		pc300_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	/* Enable interrupts on the PCI bridge, LINTi1 active low */
+	writew(0x0041, &card->plxbase->intr_ctrl_stat);
+
+	/* Allocate IRQ */
+	if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
+		pr_warn("could not allocate IRQ%d\n", pdev->irq);
+		pc300_pci_remove_one(pdev);
+		return -EBUSY;
+	}
+	card->irq = pdev->irq;
+
+	sca_init(card, 0);
+
+	// COTE not set - allows better TX DMA settings
+	// sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);
+
+	sca_out(0x10, BTCR, card);
+
+	for (i = 0; i < card->n_ports; i++) {
+		port_t *port = &card->ports[i];
+		struct net_device *dev = port->netdev;
+		hdlc_device *hdlc = dev_to_hdlc(dev);
+		port->chan = i;
+
+		spin_lock_init(&port->lock);
+		dev->irq = card->irq;
+		dev->mem_start = ramphys;
+		dev->mem_end = ramphys + ramsize - 1;
+		dev->tx_queue_len = 50;
+		dev->netdev_ops = &pc300_ops;
+		hdlc->attach = sca_attach;
+		hdlc->xmit = sca_xmit;
+		port->settings.clock_type = CLOCK_EXT;
+		port->card = card;
+		if (card->type == PC300_X21)
+			port->iface = IF_IFACE_X21;
+		else
+			port->iface = IF_IFACE_V35;
+
+		sca_init_port(port);
+		if (register_hdlc_device(dev)) {
+			pr_err("unable to register hdlc device\n");
+			port->card = NULL;
+			pc300_pci_remove_one(pdev);
+			return -ENOBUFS;
+		}
+
+		netdev_info(dev, "PC300 channel %d\n", port->chan);
+	}
+	return 0;
+}
+
+
+
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
+	{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ 0, }
+};
+
+
+static struct pci_driver pc300_pci_driver = {
+	.name =          "PC300",
+	.id_table =      pc300_pci_tbl,
+	.probe =         pc300_pci_init_one,
+	.remove =        pc300_pci_remove_one,
+};
+
+
+static int __init pc300_init_module(void)
+{
+	if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
+		pr_err("Invalid PCI clock frequency\n");
+		return -EINVAL;
+	}
+	if (use_crystal_clock != 0 && use_crystal_clock != 1) {
+		pr_err("Invalid 'use_crystal_clock' value\n");
+		return -EINVAL;
+	}
+
+	CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq;
+
+	return pci_register_driver(&pc300_pci_driver);
+}
+
+
+
+static void __exit pc300_cleanup_module(void)
+{
+	pci_unregister_driver(&pc300_pci_driver);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Cyclades PC300 serial port driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pc300_pci_tbl);
+module_param(pci_clock_freq, int, 0444);
+MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
+module_param(use_crystal_clock, int, 0444);
+MODULE_PARM_DESC(use_crystal_clock,
+		 "Use 24.576 MHz clock instead of PCI clock");
+module_init(pc300_init_module);
+module_exit(pc300_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/pci200syn.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/pci200syn.c
new file mode 100644
index 0000000..9659fca
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/pci200syn.c
@@ -0,0 +1,456 @@
+/*
+ * Goramo PCI200SYN synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ *
+ * Sources of information:
+ *    Hitachi HD64572 SCA-II User's Manual
+ *    PLX Technology Inc. PCI9052 Data Book
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hd64572.h"
+
+#undef DEBUG_PKT
+#define DEBUG_RINGS
+
+#define PCI200SYN_PLX_SIZE	0x80	/* PLX control window size (128b) */
+#define PCI200SYN_SCA_SIZE	0x400	/* SCA window size (1Kb) */
+#define MAX_TX_BUFFERS		10
+
+static int pci_clock_freq = 33000000;
+#define CLOCK_BASE pci_clock_freq
+
+/*
+ *      PLX PCI9052 local configuration and shared runtime registers.
+ *      This structure can be used to access 9052 registers (memory mapped).
+ */
+typedef struct {
+	u32 loc_addr_range[4];	/* 00-0Ch : Local Address Ranges */
+	u32 loc_rom_range;	/* 10h : Local ROM Range */
+	u32 loc_addr_base[4];	/* 14-20h : Local Address Base Addrs */
+	u32 loc_rom_base;	/* 24h : Local ROM Base */
+	u32 loc_bus_descr[4];	/* 28-34h : Local Bus Descriptors */
+	u32 rom_bus_descr;	/* 38h : ROM Bus Descriptor */
+	u32 cs_base[4];		/* 3C-48h : Chip Select Base Addrs */
+	u32 intr_ctrl_stat;	/* 4Ch : Interrupt Control/Status */
+	u32 init_ctrl;		/* 50h : EEPROM ctrl, Init Ctrl, etc */
+}plx9052;
+
+
+
+typedef struct port_s {
+	struct napi_struct napi;
+	struct net_device *netdev;
+	struct card_s *card;
+	spinlock_t lock;	/* TX lock */
+	sync_serial_settings settings;
+	int rxpart;		/* partial frame received, next frame invalid*/
+	unsigned short encoding;
+	unsigned short parity;
+	u16 rxin;		/* rx ring buffer 'in' pointer */
+	u16 txin;		/* tx ring buffer 'in' and 'last' pointers */
+	u16 txlast;
+	u8 rxs, txs, tmc;	/* SCA registers */
+	u8 chan;		/* physical port # - 0 or 1 */
+}port_t;
+
+
+
+typedef struct card_s {
+	u8 __iomem *rambase;	/* buffer memory base (virtual) */
+	u8 __iomem *scabase;	/* SCA memory base (virtual) */
+	plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
+	u16 rx_ring_buffers;	/* number of buffers in a ring */
+	u16 tx_ring_buffers;
+	u16 buff_offset;	/* offset of first buffer of first channel */
+	u8 irq;			/* interrupt request level */
+
+	port_t ports[2];
+}card_t;
+
+
+#define get_port(card, port)	     (&card->ports[port])
+#define sca_flush(card)		     (sca_in(IER0, card));
+
+static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
+{
+	int len;
+	do {
+		len = length > 256 ? 256 : length;
+		memcpy_toio(dest, src, len);
+		dest += len;
+		src += len;
+		length -= len;
+		readb(dest);
+	} while (len);
+}
+
+#undef memcpy_toio
+#define memcpy_toio new_memcpy_toio
+
+#include "hd64572.c"
+
+
+static void pci200_set_iface(port_t *port)
+{
+	card_t *card = port->card;
+	u16 msci = get_msci(port);
+	u8 rxs = port->rxs & CLK_BRG_MASK;
+	u8 txs = port->txs & CLK_BRG_MASK;
+
+	sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
+		port->card);
+	switch(port->settings.clock_type) {
+	case CLOCK_INT:
+		rxs |= CLK_BRG; /* BRG output */
+		txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+		break;
+
+	case CLOCK_TXINT:
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
+		break;
+
+	case CLOCK_TXFROMRX:
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
+		break;
+
+	default:		/* EXTernal clock */
+		rxs |= CLK_LINE; /* RXC input */
+		txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
+		break;
+	}
+
+	port->rxs = rxs;
+	port->txs = txs;
+	sca_out(rxs, msci + RXS, card);
+	sca_out(txs, msci + TXS, card);
+	sca_set_port(port);
+}
+
+
+
+static int pci200_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+
+	int result = hdlc_open(dev);
+	if (result)
+		return result;
+
+	sca_open(dev);
+	pci200_set_iface(port);
+	sca_flush(port->card);
+	return 0;
+}
+
+
+
+static int pci200_close(struct net_device *dev)
+{
+	sca_close(dev);
+	sca_flush(dev_to_port(dev)->card);
+	hdlc_close(dev);
+	return 0;
+}
+
+
+
+static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings new_line;
+	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+	port_t *port = dev_to_port(dev);
+
+#ifdef DEBUG_RINGS
+	if (cmd == SIOCDEVPRIVATE) {
+		sca_dump_rings(dev);
+		return 0;
+	}
+#endif
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	switch(ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_V35;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		if (copy_to_user(line, &port->settings, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_V35:
+	case IF_IFACE_SYNC_SERIAL:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&new_line, line, size))
+			return -EFAULT;
+
+		if (new_line.clock_type != CLOCK_EXT &&
+		    new_line.clock_type != CLOCK_TXFROMRX &&
+		    new_line.clock_type != CLOCK_INT &&
+		    new_line.clock_type != CLOCK_TXINT)
+			return -EINVAL;	/* No such clock setting */
+
+		if (new_line.loopback != 0 && new_line.loopback != 1)
+			return -EINVAL;
+
+		memcpy(&port->settings, &new_line, size); /* Update settings */
+		pci200_set_iface(port);
+		sca_flush(port->card);
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+	}
+}
+
+
+
+static void pci200_pci_remove_one(struct pci_dev *pdev)
+{
+	int i;
+	card_t *card = pci_get_drvdata(pdev);
+
+	for (i = 0; i < 2; i++)
+		if (card->ports[i].card)
+			unregister_hdlc_device(card->ports[i].netdev);
+
+	if (card->irq)
+		free_irq(card->irq, card);
+
+	if (card->rambase)
+		iounmap(card->rambase);
+	if (card->scabase)
+		iounmap(card->scabase);
+	if (card->plxbase)
+		iounmap(card->plxbase);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	if (card->ports[0].netdev)
+		free_netdev(card->ports[0].netdev);
+	if (card->ports[1].netdev)
+		free_netdev(card->ports[1].netdev);
+	kfree(card);
+}
+
+static const struct net_device_ops pci200_ops = {
+	.ndo_open       = pci200_open,
+	.ndo_stop       = pci200_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = pci200_ioctl,
+};
+
+static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
+					 const struct pci_device_id *ent)
+{
+	card_t *card;
+	u32 __iomem *p;
+	int i;
+	u32 ramsize;
+	u32 ramphys;		/* buffer memory base */
+	u32 scaphys;		/* SCA memory base */
+	u32 plxphys;		/* PLX registers memory base */
+
+	i = pci_enable_device(pdev);
+	if (i)
+		return i;
+
+	i = pci_request_regions(pdev, "PCI200SYN");
+	if (i) {
+		pci_disable_device(pdev);
+		return i;
+	}
+
+	card = kzalloc(sizeof(card_t), GFP_KERNEL);
+	if (card == NULL) {
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		return -ENOBUFS;
+	}
+	pci_set_drvdata(pdev, card);
+	card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
+	card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
+	if (!card->ports[0].netdev || !card->ports[1].netdev) {
+		pr_err("unable to allocate memory\n");
+		pci200_pci_remove_one(pdev);
+		return -ENOMEM;
+	}
+
+	if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
+	    pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
+	    pci_resource_len(pdev, 3) < 16384) {
+		pr_err("invalid card EEPROM parameters\n");
+		pci200_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
+
+	scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
+
+	ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
+	card->rambase = pci_ioremap_bar(pdev, 3);
+
+	if (card->plxbase == NULL ||
+	    card->scabase == NULL ||
+	    card->rambase == NULL) {
+		pr_err("ioremap() failed\n");
+		pci200_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	/* Reset PLX */
+	p = &card->plxbase->init_ctrl;
+	writel(readl(p) | 0x40000000, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	writel(readl(p) & ~0x40000000, p);
+	readl(p);		/* Flush the write - do not use sca_flush */
+	udelay(1);
+
+	ramsize = sca_detect_ram(card, card->rambase,
+				 pci_resource_len(pdev, 3));
+
+	/* number of TX + RX buffers for one port - this is dual port card */
+	i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
+	card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
+	card->rx_ring_buffers = i - card->tx_ring_buffers;
+
+	card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
+						    card->rx_ring_buffers);
+
+	pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
+		ramsize / 1024, ramphys,
+		pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+
+	if (card->tx_ring_buffers < 1) {
+		pr_err("RAM test failed\n");
+		pci200_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	/* Enable interrupts on the PCI bridge */
+	p = &card->plxbase->intr_ctrl_stat;
+	writew(readw(p) | 0x0040, p);
+
+	/* Allocate IRQ */
+	if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
+		pr_warn("could not allocate IRQ%d\n", pdev->irq);
+		pci200_pci_remove_one(pdev);
+		return -EBUSY;
+	}
+	card->irq = pdev->irq;
+
+	sca_init(card, 0);
+
+	for (i = 0; i < 2; i++) {
+		port_t *port = &card->ports[i];
+		struct net_device *dev = port->netdev;
+		hdlc_device *hdlc = dev_to_hdlc(dev);
+		port->chan = i;
+
+		spin_lock_init(&port->lock);
+		dev->irq = card->irq;
+		dev->mem_start = ramphys;
+		dev->mem_end = ramphys + ramsize - 1;
+		dev->tx_queue_len = 50;
+		dev->netdev_ops = &pci200_ops;
+		hdlc->attach = sca_attach;
+		hdlc->xmit = sca_xmit;
+		port->settings.clock_type = CLOCK_EXT;
+		port->card = card;
+		sca_init_port(port);
+		if (register_hdlc_device(dev)) {
+			pr_err("unable to register hdlc device\n");
+			port->card = NULL;
+			pci200_pci_remove_one(pdev);
+			return -ENOBUFS;
+		}
+
+		netdev_info(dev, "PCI200SYN channel %d\n", port->chan);
+	}
+
+	sca_flush(card);
+	return 0;
+}
+
+
+
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+	  PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
+	{ 0, }
+};
+
+
+static struct pci_driver pci200_pci_driver = {
+	.name		= "PCI200SYN",
+	.id_table	= pci200_pci_tbl,
+	.probe		= pci200_pci_init_one,
+	.remove		= pci200_pci_remove_one,
+};
+
+
+static int __init pci200_init_module(void)
+{
+	if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
+		pr_err("Invalid PCI clock frequency\n");
+		return -EINVAL;
+	}
+	return pci_register_driver(&pci200_pci_driver);
+}
+
+
+
+static void __exit pci200_cleanup_module(void)
+{
+	pci_unregister_driver(&pci200_pci_driver);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
+module_param(pci_clock_freq, int, 0444);
+MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
+module_init(pci200_init_module);
+module_exit(pci200_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.c
new file mode 100644
index 0000000..d43f4ef
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.c
@@ -0,0 +1,1714 @@
+/* sbni.c:  Granch SBNI12 leased line adapters driver for linux
+ *
+ *	Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
+ *
+ *	Previous versions were written by Yaroslav Polyakov,
+ *	Alexey Zverev and Max Khon.
+ *
+ *	Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
+ *	double-channel, PCI and ISA modifications.
+ *	More info and useful utilities to work with SBNI12 cards you can find
+ *	at http://www.granch.com (English) or http://www.granch.ru (Russian)
+ *
+ *	This software may be used and distributed according to the terms
+ *	of the GNU General Public License.
+ *
+ *
+ *  5.0.1	Jun 22 2001
+ *	  - Fixed bug in probe
+ *  5.0.0	Jun 06 2001
+ *	  - Driver was completely redesigned by Denis I.Timofeev,
+ *	  - now PCI/Dual, ISA/Dual (with single interrupt line) models are
+ *	  - supported
+ *  3.3.0	Thu Feb 24 21:30:28 NOVT 2000 
+ *        - PCI cards support
+ *  3.2.0	Mon Dec 13 22:26:53 NOVT 1999
+ * 	  - Completely rebuilt all the packet storage system
+ * 	  -    to work in Ethernet-like style.
+ *  3.1.1	just fixed some bugs (5 aug 1999)
+ *  3.1.0	added balancing feature	(26 apr 1999)
+ *  3.0.1	just fixed some bugs (14 apr 1999).
+ *  3.0.0	Initial Revision, Yaroslav Polyakov (24 Feb 1999)
+ *        - added pre-calculation for CRC, fixed bug with "len-2" frames, 
+ *        - removed outbound fragmentation (MTU=1000), written CRC-calculation 
+ *        - on asm, added work with hard_headers and now we have our own cache 
+ *        - for them, optionally supported word-interchange on some chipsets,
+ * 
+ *	Known problem: this driver wasn't tested on multiprocessor machine.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <net/net_namespace.h>
+#include <net/arp.h>
+
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "sbni.h"
+
+/* device private data */
+
+struct net_local {
+	struct timer_list	watchdog;
+
+	spinlock_t	lock;
+	struct sk_buff  *rx_buf_p;		/* receive buffer ptr */
+	struct sk_buff  *tx_buf_p;		/* transmit buffer ptr */
+	
+	unsigned int	framelen;		/* current frame length */
+	unsigned int	maxframe;		/* maximum valid frame length */
+	unsigned int	state;
+	unsigned int	inppos, outpos;		/* positions in rx/tx buffers */
+
+	/* transmitting frame number - from frames qty to 1 */
+	unsigned int	tx_frameno;
+
+	/* expected number of next receiving frame */
+	unsigned int	wait_frameno;
+
+	/* count of failed attempts to frame send - 32 attempts do before
+	   error - while receiver tunes on opposite side of wire */
+	unsigned int	trans_errors;
+
+	/* idle time; send pong when limit exceeded */
+	unsigned int	timer_ticks;
+
+	/* fields used for receive level autoselection */
+	int	delta_rxl;
+	unsigned int	cur_rxl_index, timeout_rxl;
+	unsigned long	cur_rxl_rcvd, prev_rxl_rcvd;
+
+	struct sbni_csr1	csr1;		/* current value of CSR1 */
+	struct sbni_in_stats	in_stats; 	/* internal statistics */ 
+
+	struct net_device		*second;	/* for ISA/dual cards */
+
+#ifdef CONFIG_SBNI_MULTILINE
+	struct net_device		*master;
+	struct net_device		*link;
+#endif
+};
+
+
+static int  sbni_card_probe( unsigned long );
+static int  sbni_pci_probe( struct net_device  * );
+static struct net_device  *sbni_probe1(struct net_device *, unsigned long, int);
+static int  sbni_open( struct net_device * );
+static int  sbni_close( struct net_device * );
+static netdev_tx_t sbni_start_xmit(struct sk_buff *,
+					 struct net_device * );
+static int  sbni_ioctl( struct net_device *, struct ifreq *, int );
+static void  set_multicast_list( struct net_device * );
+
+static irqreturn_t sbni_interrupt( int, void * );
+static void  handle_channel( struct net_device * );
+static int   recv_frame( struct net_device * );
+static void  send_frame( struct net_device * );
+static int   upload_data( struct net_device *,
+			  unsigned, unsigned, unsigned, u32 );
+static void  download_data( struct net_device *, u32 * );
+static void  sbni_watchdog( unsigned long );
+static void  interpret_ack( struct net_device *, unsigned );
+static int   append_frame_to_pkt( struct net_device *, unsigned, u32 );
+static void  indicate_pkt( struct net_device * );
+static void  card_start( struct net_device * );
+static void  prepare_to_send( struct sk_buff *, struct net_device * );
+static void  drop_xmit_queue( struct net_device * );
+static void  send_frame_header( struct net_device *, u32 * );
+static int   skip_tail( unsigned int, unsigned int, u32 );
+static int   check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
+static void  change_level( struct net_device * );
+static void  timeout_change_level( struct net_device * );
+static u32   calc_crc32( u32, u8 *, u32 );
+static struct sk_buff *  get_rx_buf( struct net_device * );
+static int  sbni_init( struct net_device * );
+
+#ifdef CONFIG_SBNI_MULTILINE
+static int  enslave( struct net_device *, struct net_device * );
+static int  emancipate( struct net_device * );
+#endif
+
+#ifdef __i386__
+#define ASM_CRC 1
+#endif
+
+static const char  version[] =
+	"Granch SBNI12 driver ver 5.0.1  Jun 22 2001  Denis I.Timofeev.\n";
+
+static bool skip_pci_probe	__initdata = false;
+static int  scandone	__initdata = 0;
+static int  num		__initdata = 0;
+
+static unsigned char  rxl_tab[];
+static u32  crc32tab[];
+
+/* A list of all installed devices, for removing the driver module. */
+static struct net_device  *sbni_cards[ SBNI_MAX_NUM_CARDS ];
+
+/* Lists of device's parameters */
+static u32	io[   SBNI_MAX_NUM_CARDS ] __initdata =
+	{ [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
+static u32	irq[  SBNI_MAX_NUM_CARDS ] __initdata;
+static u32	baud[ SBNI_MAX_NUM_CARDS ] __initdata;
+static u32	rxl[  SBNI_MAX_NUM_CARDS ] __initdata =
+	{ [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
+static u32	mac[  SBNI_MAX_NUM_CARDS ] __initdata;
+
+#ifndef MODULE
+typedef u32  iarr[];
+static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
+#endif
+
+/* A zero-terminated list of I/O addresses to be probed on ISA bus */
+static unsigned int  netcard_portlist[ ] __initdata = { 
+	0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
+	0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
+	0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
+	0 };
+
+#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
+
+/*
+ * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
+ * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
+ */
+
+static inline int __init
+sbni_isa_probe( struct net_device  *dev )
+{
+	if( dev->base_addr > 0x1ff &&
+	    request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
+	    sbni_probe1( dev, dev->base_addr, dev->irq ) )
+
+		return  0;
+	else {
+		pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
+		       dev->base_addr);
+		return  -ENODEV;
+	}
+}
+
+static const struct net_device_ops sbni_netdev_ops = {
+	.ndo_open		= sbni_open,
+	.ndo_stop		= sbni_close,
+	.ndo_start_xmit		= sbni_start_xmit,
+	.ndo_set_rx_mode	= set_multicast_list,
+	.ndo_do_ioctl		= sbni_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static void __init sbni_devsetup(struct net_device *dev)
+{
+	ether_setup( dev );
+	dev->netdev_ops = &sbni_netdev_ops;
+}
+
+int __init sbni_probe(int unit)
+{
+	struct net_device *dev;
+	int err;
+
+	dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->netdev_ops = &sbni_netdev_ops;
+
+	sprintf(dev->name, "sbni%d", unit);
+	netdev_boot_setup_check(dev);
+
+	err = sbni_init(dev);
+	if (err) {
+		free_netdev(dev);
+		return err;
+	}
+
+	err = register_netdev(dev);
+	if (err) {
+		release_region( dev->base_addr, SBNI_IO_EXTENT );
+		free_netdev(dev);
+		return err;
+	}
+	pr_info_once("%s", version);
+	return 0;
+}
+
+static int __init sbni_init(struct net_device *dev)
+{
+	int  i;
+	if( dev->base_addr )
+		return  sbni_isa_probe( dev );
+	/* otherwise we have to perform search our adapter */
+
+	if( io[ num ] != -1 )
+		dev->base_addr	= io[ num ],
+		dev->irq	= irq[ num ];
+	else if( scandone  ||  io[ 0 ] != -1 )
+		return  -ENODEV;
+
+	/* if io[ num ] contains non-zero address, then that is on ISA bus */
+	if( dev->base_addr )
+		return  sbni_isa_probe( dev );
+
+	/* ...otherwise - scan PCI first */
+	if( !skip_pci_probe  &&  !sbni_pci_probe( dev ) )
+		return  0;
+
+	if( io[ num ] == -1 ) {
+		/* Auto-scan will be stopped when first ISA card were found */
+		scandone = 1;
+		if( num > 0 )
+			return  -ENODEV;
+	}
+
+	for( i = 0;  netcard_portlist[ i ];  ++i ) {
+		int  ioaddr = netcard_portlist[ i ];
+		if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
+		    sbni_probe1( dev, ioaddr, 0 ))
+			return 0;
+	}
+
+	return  -ENODEV;
+}
+
+
+static int __init
+sbni_pci_probe( struct net_device  *dev )
+{
+	struct pci_dev  *pdev = NULL;
+
+	while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
+	       != NULL ) {
+		int  pci_irq_line;
+		unsigned long  pci_ioaddr;
+
+		if( pdev->vendor != SBNI_PCI_VENDOR &&
+		    pdev->device != SBNI_PCI_DEVICE )
+			continue;
+
+		pci_ioaddr = pci_resource_start( pdev, 0 );
+		pci_irq_line = pdev->irq;
+
+		/* Avoid already found cards from previous calls */
+		if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
+			if (pdev->subsystem_device != 2)
+				continue;
+
+			/* Dual adapter is present */
+			if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
+							dev->name ) )
+				continue;
+		}
+
+		if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
+			pr_warn(
+"WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
+"You should use the PCI BIOS setup to assign a valid IRQ line.\n",
+				pci_irq_line );
+
+		/* avoiding re-enable dual adapters */
+		if( (pci_ioaddr & 7) == 0  &&  pci_enable_device( pdev ) ) {
+			release_region( pci_ioaddr, SBNI_IO_EXTENT );
+			pci_dev_put( pdev );
+			return  -EIO;
+		}
+		if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
+			SET_NETDEV_DEV(dev, &pdev->dev);
+			/* not the best thing to do, but this is all messed up 
+			   for hotplug systems anyway... */
+			pci_dev_put( pdev );
+			return  0;
+		}
+	}
+	return  -ENODEV;
+}
+
+
+static struct net_device * __init
+sbni_probe1( struct net_device  *dev,  unsigned long  ioaddr,  int  irq )
+{
+	struct net_local  *nl;
+
+	if( sbni_card_probe( ioaddr ) ) {
+		release_region( ioaddr, SBNI_IO_EXTENT );
+		return NULL;
+	}
+
+	outb( 0, ioaddr + CSR0 );
+
+	if( irq < 2 ) {
+		unsigned long irq_mask;
+
+		irq_mask = probe_irq_on();
+		outb( EN_INT | TR_REQ, ioaddr + CSR0 );
+		outb( PR_RES, ioaddr + CSR1 );
+		mdelay(50);
+		irq = probe_irq_off(irq_mask);
+		outb( 0, ioaddr + CSR0 );
+
+		if( !irq ) {
+			pr_err("%s: can't detect device irq!\n", dev->name);
+			release_region( ioaddr, SBNI_IO_EXTENT );
+			return NULL;
+		}
+	} else if( irq == 2 )
+		irq = 9;
+
+	dev->irq = irq;
+	dev->base_addr = ioaddr;
+
+	/* Fill in sbni-specific dev fields. */
+	nl = netdev_priv(dev);
+	if( !nl ) {
+		pr_err("%s: unable to get memory!\n", dev->name);
+		release_region( ioaddr, SBNI_IO_EXTENT );
+		return NULL;
+	}
+
+	memset( nl, 0, sizeof(struct net_local) );
+	spin_lock_init( &nl->lock );
+
+	/* store MAC address (generate if that isn't known) */
+	*(__be16 *)dev->dev_addr = htons( 0x00ff );
+	*(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
+		((mac[num] ?
+		mac[num] :
+		(u32)((long)netdev_priv(dev))) & 0x00ffffff));
+
+	/* store link settings (speed, receive level ) */
+	nl->maxframe  = DEFAULT_FRAME_LEN;
+	nl->csr1.rate = baud[ num ];
+
+	if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
+		/* autotune rxl */
+		nl->cur_rxl_index = DEF_RXL,
+		nl->delta_rxl = DEF_RXL_DELTA;
+	else
+		nl->delta_rxl = 0;
+	nl->csr1.rxl  = rxl_tab[ nl->cur_rxl_index ];
+	if( inb( ioaddr + CSR0 ) & 0x01 )
+		nl->state |= FL_SLOW_MODE;
+
+	pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
+		  dev->name, dev->base_addr, dev->irq,
+		  ((u8 *)dev->dev_addr)[3],
+		  ((u8 *)dev->dev_addr)[4],
+		  ((u8 *)dev->dev_addr)[5]);
+
+	pr_notice("%s: speed %d",
+		  dev->name,
+		  ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
+		  / (1 << nl->csr1.rate));
+
+	if( nl->delta_rxl == 0 )
+		pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
+	else
+		pr_cont(", receive level (auto)\n");
+
+#ifdef CONFIG_SBNI_MULTILINE
+	nl->master = dev;
+	nl->link   = NULL;
+#endif
+   
+	sbni_cards[ num++ ] = dev;
+	return  dev;
+}
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+static netdev_tx_t
+sbni_start_xmit( struct sk_buff  *skb,  struct net_device  *dev )
+{
+	struct net_device  *p;
+
+	netif_stop_queue( dev );
+
+	/* Looking for idle device in the list */
+	for( p = dev;  p; ) {
+		struct net_local  *nl = netdev_priv(p);
+		spin_lock( &nl->lock );
+		if( nl->tx_buf_p  ||  (nl->state & FL_LINE_DOWN) ) {
+			p = nl->link;
+			spin_unlock( &nl->lock );
+		} else {
+			/* Idle dev is found */
+			prepare_to_send( skb, p );
+			spin_unlock( &nl->lock );
+			netif_start_queue( dev );
+			return NETDEV_TX_OK;
+		}
+	}
+
+	return NETDEV_TX_BUSY;
+}
+
+#else	/* CONFIG_SBNI_MULTILINE */
+
+static netdev_tx_t
+sbni_start_xmit( struct sk_buff  *skb,  struct net_device  *dev )
+{
+	struct net_local  *nl  = netdev_priv(dev);
+
+	netif_stop_queue( dev );
+	spin_lock( &nl->lock );
+
+	prepare_to_send( skb, dev );
+
+	spin_unlock( &nl->lock );
+	return NETDEV_TX_OK;
+}
+
+#endif	/* CONFIG_SBNI_MULTILINE */
+
+/* -------------------------------------------------------------------------- */
+
+/* interrupt handler */
+
+/*
+ * 	SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
+ * be looked as two independent single-channel devices. Every channel seems
+ * as Ethernet interface but interrupt handler must be common. Really, first
+ * channel ("master") driver only registers the handler. In its struct net_local
+ * it has got pointer to "slave" channel's struct net_local and handles that's
+ * interrupts too.
+ *	dev of successfully attached ISA SBNI boards is linked to list.
+ * While next board driver is initialized, it scans this list. If one
+ * has found dev with same irq and ioaddr different by 4 then it assumes
+ * this board to be "master".
+ */ 
+
+static irqreturn_t
+sbni_interrupt( int  irq,  void  *dev_id )
+{
+	struct net_device	  *dev = dev_id;
+	struct net_local  *nl  = netdev_priv(dev);
+	int	repeat;
+
+	spin_lock( &nl->lock );
+	if( nl->second )
+		spin_lock(&NET_LOCAL_LOCK(nl->second));
+
+	do {
+		repeat = 0;
+		if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
+			handle_channel( dev ),
+			repeat = 1;
+		if( nl->second  && 	/* second channel present */
+		    (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
+			handle_channel( nl->second ),
+			repeat = 1;
+	} while( repeat );
+
+	if( nl->second )
+		spin_unlock(&NET_LOCAL_LOCK(nl->second));
+	spin_unlock( &nl->lock );
+	return IRQ_HANDLED;
+}
+
+
+static void
+handle_channel( struct net_device  *dev )
+{
+	struct net_local	*nl    = netdev_priv(dev);
+	unsigned long		ioaddr = dev->base_addr;
+
+	int  req_ans;
+	unsigned char  csr0;
+
+#ifdef CONFIG_SBNI_MULTILINE
+	/* Lock the master device because we going to change its local data */
+	if( nl->state & FL_SLAVE )
+		spin_lock(&NET_LOCAL_LOCK(nl->master));
+#endif
+
+	outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
+
+	nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
+	for(;;) {
+		csr0 = inb( ioaddr + CSR0 );
+		if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
+			break;
+
+		req_ans = !(nl->state & FL_PREV_OK);
+
+		if( csr0 & RC_RDY )
+			req_ans = recv_frame( dev );
+
+		/*
+		 * TR_RDY always equals 1 here because we have owned the marker,
+		 * and we set TR_REQ when disabled interrupts
+		 */
+		csr0 = inb( ioaddr + CSR0 );
+		if( !(csr0 & TR_RDY)  ||  (csr0 & RC_RDY) )
+			netdev_err(dev, "internal error!\n");
+
+		/* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
+		if( req_ans  ||  nl->tx_frameno != 0 )
+			send_frame( dev );
+		else
+			/* send marker without any data */
+			outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
+	}
+
+	outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
+
+#ifdef CONFIG_SBNI_MULTILINE
+	if( nl->state & FL_SLAVE )
+		spin_unlock(&NET_LOCAL_LOCK(nl->master));
+#endif
+}
+
+
+/*
+ * Routine returns 1 if it need to acknoweledge received frame.
+ * Empty frame received without errors won't be acknoweledged.
+ */
+
+static int
+recv_frame( struct net_device  *dev )
+{
+	struct net_local  *nl   = netdev_priv(dev);
+	unsigned long  ioaddr	= dev->base_addr;
+
+	u32  crc = CRC32_INITIAL;
+
+	unsigned  framelen = 0, frameno, ack;
+	unsigned  is_first, frame_ok = 0;
+
+	if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
+		frame_ok = framelen > 4
+			?  upload_data( dev, framelen, frameno, is_first, crc )
+			:  skip_tail( ioaddr, framelen, crc );
+		if( frame_ok )
+			interpret_ack( dev, ack );
+	}
+
+	outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
+	if( frame_ok ) {
+		nl->state |= FL_PREV_OK;
+		if( framelen > 4 )
+			nl->in_stats.all_rx_number++;
+	} else
+		nl->state &= ~FL_PREV_OK,
+		change_level( dev ),
+		nl->in_stats.all_rx_number++,
+		nl->in_stats.bad_rx_number++;
+
+	return  !frame_ok  ||  framelen > 4;
+}
+
+
+static void
+send_frame( struct net_device  *dev )
+{
+	struct net_local  *nl    = netdev_priv(dev);
+
+	u32  crc = CRC32_INITIAL;
+
+	if( nl->state & FL_NEED_RESEND ) {
+
+		/* if frame was sended but not ACK'ed - resend it */
+		if( nl->trans_errors ) {
+			--nl->trans_errors;
+			if( nl->framelen != 0 )
+				nl->in_stats.resend_tx_number++;
+		} else {
+			/* cannot xmit with many attempts */
+#ifdef CONFIG_SBNI_MULTILINE
+			if( (nl->state & FL_SLAVE)  ||  nl->link )
+#endif
+			nl->state |= FL_LINE_DOWN;
+			drop_xmit_queue( dev );
+			goto  do_send;
+		}
+	} else
+		nl->trans_errors = TR_ERROR_COUNT;
+
+	send_frame_header( dev, &crc );
+	nl->state |= FL_NEED_RESEND;
+	/*
+	 * FL_NEED_RESEND will be cleared after ACK, but if empty
+	 * frame sended then in prepare_to_send next frame
+	 */
+
+
+	if( nl->framelen ) {
+		download_data( dev, &crc );
+		nl->in_stats.all_tx_number++;
+		nl->state |= FL_WAIT_ACK;
+	}
+
+	outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
+
+do_send:
+	outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
+
+	if( nl->tx_frameno )
+		/* next frame exists - we request card to send it */
+		outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
+		      dev->base_addr + CSR0 );
+}
+
+
+/*
+ * Write the frame data into adapter's buffer memory, and calculate CRC.
+ * Do padding if necessary.
+ */
+
+static void
+download_data( struct net_device  *dev,  u32  *crc_p )
+{
+	struct net_local  *nl    = netdev_priv(dev);
+	struct sk_buff    *skb	 = nl->tx_buf_p;
+
+	unsigned  len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
+
+	outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
+	*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
+
+	/* if packet too short we should write some more bytes to pad */
+	for( len = nl->framelen - len;  len--; )
+		outb( 0, dev->base_addr + DAT ),
+		*crc_p = CRC32( 0, *crc_p );
+}
+
+
+static int
+upload_data( struct net_device  *dev,  unsigned  framelen,  unsigned  frameno,
+	     unsigned  is_first,  u32  crc )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	int  frame_ok;
+
+	if( is_first )
+		nl->wait_frameno = frameno,
+		nl->inppos = 0;
+
+	if( nl->wait_frameno == frameno ) {
+
+		if( nl->inppos + framelen  <=  ETHER_MAX_LEN )
+			frame_ok = append_frame_to_pkt( dev, framelen, crc );
+
+		/*
+		 * if CRC is right but framelen incorrect then transmitter
+		 * error was occurred... drop entire packet
+		 */
+		else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
+			 != 0 )
+			nl->wait_frameno = 0,
+			nl->inppos = 0,
+#ifdef CONFIG_SBNI_MULTILINE
+			nl->master->stats.rx_errors++,
+			nl->master->stats.rx_missed_errors++;
+#else
+		        dev->stats.rx_errors++,
+			dev->stats.rx_missed_errors++;
+#endif
+			/* now skip all frames until is_first != 0 */
+	} else
+		frame_ok = skip_tail( dev->base_addr, framelen, crc );
+
+	if( is_first  &&  !frame_ok )
+		/*
+		 * Frame has been broken, but we had already stored
+		 * is_first... Drop entire packet.
+		 */
+		nl->wait_frameno = 0,
+#ifdef CONFIG_SBNI_MULTILINE
+		nl->master->stats.rx_errors++,
+		nl->master->stats.rx_crc_errors++;
+#else
+		dev->stats.rx_errors++,
+		dev->stats.rx_crc_errors++;
+#endif
+
+	return  frame_ok;
+}
+
+
+static inline void
+send_complete( struct net_device *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+#ifdef CONFIG_SBNI_MULTILINE
+	nl->master->stats.tx_packets++;
+	nl->master->stats.tx_bytes += nl->tx_buf_p->len;
+#else
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += nl->tx_buf_p->len;
+#endif
+	dev_kfree_skb_irq( nl->tx_buf_p );
+
+	nl->tx_buf_p = NULL;
+
+	nl->outpos = 0;
+	nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+	nl->framelen   = 0;
+}
+
+
+static void
+interpret_ack( struct net_device  *dev,  unsigned  ack )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	if( ack == FRAME_SENT_OK ) {
+		nl->state &= ~FL_NEED_RESEND;
+
+		if( nl->state & FL_WAIT_ACK ) {
+			nl->outpos += nl->framelen;
+
+			if( --nl->tx_frameno )
+				nl->framelen = min_t(unsigned int,
+						   nl->maxframe,
+						   nl->tx_buf_p->len - nl->outpos);
+			else
+				send_complete( dev ),
+#ifdef CONFIG_SBNI_MULTILINE
+				netif_wake_queue( nl->master );
+#else
+				netif_wake_queue( dev );
+#endif
+		}
+	}
+
+	nl->state &= ~FL_WAIT_ACK;
+}
+
+
+/*
+ * Glue received frame with previous fragments of packet.
+ * Indicate packet when last frame would be accepted.
+ */
+
+static int
+append_frame_to_pkt( struct net_device  *dev,  unsigned  framelen,  u32  crc )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	u8  *p;
+
+	if( nl->inppos + framelen  >  ETHER_MAX_LEN )
+		return  0;
+
+	if( !nl->rx_buf_p  &&  !(nl->rx_buf_p = get_rx_buf( dev )) )
+		return  0;
+
+	p = nl->rx_buf_p->data + nl->inppos;
+	insb( dev->base_addr + DAT, p, framelen );
+	if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
+		return  0;
+
+	nl->inppos += framelen - 4;
+	if( --nl->wait_frameno == 0 )		/* last frame received */
+		indicate_pkt( dev );
+
+	return  1;
+}
+
+
+/*
+ * Prepare to start output on adapter.
+ * Transmitter will be actually activated when marker is accepted.
+ */
+
+static void
+prepare_to_send( struct sk_buff  *skb,  struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	unsigned int  len;
+
+	/* nl->tx_buf_p == NULL here! */
+	if( nl->tx_buf_p )
+		netdev_err(dev, "memory leak!\n");
+
+	nl->outpos = 0;
+	nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+
+	len = skb->len;
+	if( len < SBNI_MIN_LEN )
+		len = SBNI_MIN_LEN;
+
+	nl->tx_buf_p	= skb;
+	nl->tx_frameno	= DIV_ROUND_UP(len, nl->maxframe);
+	nl->framelen	= len < nl->maxframe  ?  len  :  nl->maxframe;
+
+	outb( inb( dev->base_addr + CSR0 ) | TR_REQ,  dev->base_addr + CSR0 );
+#ifdef CONFIG_SBNI_MULTILINE
+	nl->master->trans_start = jiffies;
+#else
+	dev->trans_start = jiffies;
+#endif
+}
+
+
+static void
+drop_xmit_queue( struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	if( nl->tx_buf_p )
+		dev_kfree_skb_any( nl->tx_buf_p ),
+		nl->tx_buf_p = NULL,
+#ifdef CONFIG_SBNI_MULTILINE
+		nl->master->stats.tx_errors++,
+		nl->master->stats.tx_carrier_errors++;
+#else
+		dev->stats.tx_errors++,
+		dev->stats.tx_carrier_errors++;
+#endif
+
+	nl->tx_frameno	= 0;
+	nl->framelen	= 0;
+	nl->outpos	= 0;
+	nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+#ifdef CONFIG_SBNI_MULTILINE
+	netif_start_queue( nl->master );
+	nl->master->trans_start = jiffies;
+#else
+	netif_start_queue( dev );
+	dev->trans_start = jiffies;
+#endif
+}
+
+
+static void
+send_frame_header( struct net_device  *dev,  u32  *crc_p )
+{
+	struct net_local  *nl  = netdev_priv(dev);
+
+	u32  crc = *crc_p;
+	u32  len_field = nl->framelen + 6;	/* CRC + frameno + reserved */
+	u8   value;
+
+	if( nl->state & FL_NEED_RESEND )
+		len_field |= FRAME_RETRY;	/* non-first attempt... */
+
+	if( nl->outpos == 0 )
+		len_field |= FRAME_FIRST;
+
+	len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
+	outb( SBNI_SIG, dev->base_addr + DAT );
+
+	value = (u8) len_field;
+	outb( value, dev->base_addr + DAT );
+	crc = CRC32( value, crc );
+	value = (u8) (len_field >> 8);
+	outb( value, dev->base_addr + DAT );
+	crc = CRC32( value, crc );
+
+	outb( nl->tx_frameno, dev->base_addr + DAT );
+	crc = CRC32( nl->tx_frameno, crc );
+	outb( 0, dev->base_addr + DAT );
+	crc = CRC32( 0, crc );
+	*crc_p = crc;
+}
+
+
+/*
+ * if frame tail not needed (incorrect number or received twice),
+ * it won't store, but CRC will be calculated
+ */
+
+static int
+skip_tail( unsigned int  ioaddr,  unsigned int  tail_len,  u32 crc )
+{
+	while( tail_len-- )
+		crc = CRC32( inb( ioaddr + DAT ), crc );
+
+	return  crc == CRC32_REMAINDER;
+}
+
+
+/*
+ * Preliminary checks if frame header is correct, calculates its CRC
+ * and split it to simple fields
+ */
+
+static int
+check_fhdr( u32  ioaddr,  u32  *framelen,  u32  *frameno,  u32  *ack,
+	    u32  *is_first,  u32  *crc_p )
+{
+	u32  crc = *crc_p;
+	u8   value;
+
+	if( inb( ioaddr + DAT ) != SBNI_SIG )
+		return  0;
+
+	value = inb( ioaddr + DAT );
+	*framelen = (u32)value;
+	crc = CRC32( value, crc );
+	value = inb( ioaddr + DAT );
+	*framelen |= ((u32)value) << 8;
+	crc = CRC32( value, crc );
+
+	*ack = *framelen & FRAME_ACK_MASK;
+	*is_first = (*framelen & FRAME_FIRST) != 0;
+
+	if( (*framelen &= FRAME_LEN_MASK) < 6 ||
+	    *framelen > SBNI_MAX_FRAME - 3 )
+		return  0;
+
+	value = inb( ioaddr + DAT );
+	*frameno = (u32)value;
+	crc = CRC32( value, crc );
+
+	crc = CRC32( inb( ioaddr + DAT ), crc );	/* reserved byte */
+	*framelen -= 2;
+
+	*crc_p = crc;
+	return  1;
+}
+
+
+static struct sk_buff *
+get_rx_buf( struct net_device  *dev )
+{
+	/* +2 is to compensate for the alignment fixup below */
+	struct sk_buff  *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
+	if( !skb )
+		return  NULL;
+
+	skb_reserve( skb, 2 );		/* Align IP on longword boundaries */
+	return  skb;
+}
+
+
+static void
+indicate_pkt( struct net_device  *dev )
+{
+	struct net_local  *nl  = netdev_priv(dev);
+	struct sk_buff    *skb = nl->rx_buf_p;
+
+	skb_put( skb, nl->inppos );
+
+#ifdef CONFIG_SBNI_MULTILINE
+	skb->protocol = eth_type_trans( skb, nl->master );
+	netif_rx( skb );
+	++nl->master->stats.rx_packets;
+	nl->master->stats.rx_bytes += nl->inppos;
+#else
+	skb->protocol = eth_type_trans( skb, dev );
+	netif_rx( skb );
+	++dev->stats.rx_packets;
+	dev->stats.rx_bytes += nl->inppos;
+#endif
+	nl->rx_buf_p = NULL;	/* protocol driver will clear this sk_buff */
+}
+
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Routine checks periodically wire activity and regenerates marker if
+ * connect was inactive for a long time.
+ */
+
+static void
+sbni_watchdog( unsigned long  arg )
+{
+	struct net_device  *dev = (struct net_device *) arg;
+	struct net_local   *nl  = netdev_priv(dev);
+	struct timer_list  *w   = &nl->watchdog; 
+	unsigned long	   flags;
+	unsigned char	   csr0;
+
+	spin_lock_irqsave( &nl->lock, flags );
+
+	csr0 = inb( dev->base_addr + CSR0 );
+	if( csr0 & RC_CHK ) {
+
+		if( nl->timer_ticks ) {
+			if( csr0 & (RC_RDY | BU_EMP) )
+				/* receiving not active */
+				nl->timer_ticks--;
+		} else {
+			nl->in_stats.timeout_number++;
+			if( nl->delta_rxl )
+				timeout_change_level( dev );
+
+			outb( *(u_char *)&nl->csr1 | PR_RES,
+			      dev->base_addr + CSR1 );
+			csr0 = inb( dev->base_addr + CSR0 );
+		}
+	} else
+		nl->state &= ~FL_LINE_DOWN;
+
+	outb( csr0 | RC_CHK, dev->base_addr + CSR0 ); 
+
+	init_timer( w );
+	w->expires	= jiffies + SBNI_TIMEOUT;
+	w->data		= arg;
+	w->function	= sbni_watchdog;
+	add_timer( w );
+
+	spin_unlock_irqrestore( &nl->lock, flags );
+}
+
+
+static unsigned char  rxl_tab[] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
+	0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
+};
+
+#define SIZE_OF_TIMEOUT_RXL_TAB 4
+static unsigned char  timeout_rxl_tab[] = {
+	0x03, 0x05, 0x08, 0x0b
+};
+
+/* -------------------------------------------------------------------------- */
+
+static void
+card_start( struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
+	nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
+	nl->state |= FL_PREV_OK;
+
+	nl->inppos = nl->outpos = 0;
+	nl->wait_frameno = 0;
+	nl->tx_frameno	 = 0;
+	nl->framelen	 = 0;
+
+	outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
+	outb( EN_INT, dev->base_addr + CSR0 );
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Receive level auto-selection */
+
+static void
+change_level( struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	if( nl->delta_rxl == 0 )	/* do not auto-negotiate RxL */
+		return;
+
+	if( nl->cur_rxl_index == 0 )
+		nl->delta_rxl = 1;
+	else if( nl->cur_rxl_index == 15 )
+		nl->delta_rxl = -1;
+	else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
+		nl->delta_rxl = -nl->delta_rxl;
+
+	nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
+	inb( dev->base_addr + CSR0 );	/* needs for PCI cards */
+	outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
+
+	nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
+	nl->cur_rxl_rcvd  = 0;
+}
+
+
+static void
+timeout_change_level( struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
+	if( ++nl->timeout_rxl >= 4 )
+		nl->timeout_rxl = 0;
+
+	nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
+	inb( dev->base_addr + CSR0 );
+	outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
+
+	nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
+	nl->cur_rxl_rcvd  = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ *	Open/initialize the board. 
+ */
+
+static int
+sbni_open( struct net_device  *dev )
+{
+	struct net_local	*nl = netdev_priv(dev);
+	struct timer_list	*w  = &nl->watchdog;
+
+	/*
+	 * For double ISA adapters within "common irq" mode, we have to
+	 * determine whether primary or secondary channel is initialized,
+	 * and set the irq handler only in first case.
+	 */
+	if( dev->base_addr < 0x400 ) {		/* ISA only */
+		struct net_device  **p = sbni_cards;
+		for( ;  *p  &&  p < sbni_cards + SBNI_MAX_NUM_CARDS;  ++p )
+			if( (*p)->irq == dev->irq &&
+			    ((*p)->base_addr == dev->base_addr + 4 ||
+			     (*p)->base_addr == dev->base_addr - 4) &&
+			    (*p)->flags & IFF_UP ) {
+
+				((struct net_local *) (netdev_priv(*p)))
+					->second = dev;
+				netdev_notice(dev, "using shared irq with %s\n",
+					      (*p)->name);
+				nl->state |= FL_SECONDARY;
+				goto  handler_attached;
+			}
+	}
+
+	if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
+		netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
+		return  -EAGAIN;
+	}
+
+handler_attached:
+
+	spin_lock( &nl->lock );
+	memset( &dev->stats, 0, sizeof(struct net_device_stats) );
+	memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
+
+	card_start( dev );
+
+	netif_start_queue( dev );
+
+	/* set timer watchdog */
+	init_timer( w );
+	w->expires	= jiffies + SBNI_TIMEOUT;
+	w->data		= (unsigned long) dev;
+	w->function	= sbni_watchdog;
+	add_timer( w );
+   
+	spin_unlock( &nl->lock );
+	return 0;
+}
+
+
+static int
+sbni_close( struct net_device  *dev )
+{
+	struct net_local  *nl = netdev_priv(dev);
+
+	if( nl->second  &&  nl->second->flags & IFF_UP ) {
+		netdev_notice(dev, "Secondary channel (%s) is active!\n",
+			      nl->second->name);
+		return  -EBUSY;
+	}
+
+#ifdef CONFIG_SBNI_MULTILINE
+	if( nl->state & FL_SLAVE )
+		emancipate( dev );
+	else
+		while( nl->link )	/* it's master device! */
+			emancipate( nl->link );
+#endif
+
+	spin_lock( &nl->lock );
+
+	nl->second = NULL;
+	drop_xmit_queue( dev );	
+	netif_stop_queue( dev );
+   
+	del_timer( &nl->watchdog );
+
+	outb( 0, dev->base_addr + CSR0 );
+
+	if( !(nl->state & FL_SECONDARY) )
+		free_irq( dev->irq, dev );
+	nl->state &= FL_SECONDARY;
+
+	spin_unlock( &nl->lock );
+	return 0;
+}
+
+
+/*
+	Valid combinations in CSR0 (for probing):
+
+	VALID_DECODER	0000,0011,1011,1010
+
+				    	; 0   ; -
+				TR_REQ	; 1   ; +
+			TR_RDY	    	; 2   ; -
+			TR_RDY	TR_REQ	; 3   ; +
+		BU_EMP		    	; 4   ; +
+		BU_EMP	     	TR_REQ	; 5   ; +
+		BU_EMP	TR_RDY	    	; 6   ; -
+		BU_EMP	TR_RDY	TR_REQ	; 7   ; +
+	RC_RDY 		     		; 8   ; +
+	RC_RDY			TR_REQ	; 9   ; +
+	RC_RDY		TR_RDY		; 10  ; -
+	RC_RDY		TR_RDY	TR_REQ	; 11  ; -
+	RC_RDY	BU_EMP			; 12  ; -
+	RC_RDY	BU_EMP		TR_REQ	; 13  ; -
+	RC_RDY	BU_EMP	TR_RDY		; 14  ; -
+	RC_RDY	BU_EMP	TR_RDY	TR_REQ	; 15  ; -
+*/
+
+#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
+
+
+static int
+sbni_card_probe( unsigned long  ioaddr )
+{
+	unsigned char  csr0;
+
+	csr0 = inb( ioaddr + CSR0 );
+	if( csr0 != 0xff  &&  csr0 != 0x00 ) {
+		csr0 &= ~EN_INT;
+		if( csr0 & BU_EMP )
+			csr0 |= EN_INT;
+      
+		if( VALID_DECODER & (1 << (csr0 >> 4)) )
+			return  0;
+	}
+   
+	return  -ENODEV;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static int
+sbni_ioctl( struct net_device  *dev,  struct ifreq  *ifr,  int  cmd )
+{
+	struct net_local  *nl = netdev_priv(dev);
+	struct sbni_flags  flags;
+	int  error = 0;
+
+#ifdef CONFIG_SBNI_MULTILINE
+	struct net_device  *slave_dev;
+	char  slave_name[ 8 ];
+#endif
+  
+	switch( cmd ) {
+	case  SIOCDEVGETINSTATS :
+		if (copy_to_user( ifr->ifr_data, &nl->in_stats,
+					sizeof(struct sbni_in_stats) ))
+			error = -EFAULT;
+		break;
+
+	case  SIOCDEVRESINSTATS :
+		if (!capable(CAP_NET_ADMIN))
+			return  -EPERM;
+		memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
+		break;
+
+	case  SIOCDEVGHWSTATE :
+		flags.mac_addr	= *(u32 *)(dev->dev_addr + 3);
+		flags.rate	= nl->csr1.rate;
+		flags.slow_mode	= (nl->state & FL_SLOW_MODE) != 0;
+		flags.rxl	= nl->cur_rxl_index;
+		flags.fixed_rxl	= nl->delta_rxl == 0;
+
+		if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
+			error = -EFAULT;
+		break;
+
+	case  SIOCDEVSHWSTATE :
+		if (!capable(CAP_NET_ADMIN))
+			return  -EPERM;
+
+		spin_lock( &nl->lock );
+		flags = *(struct sbni_flags*) &ifr->ifr_ifru;
+		if( flags.fixed_rxl )
+			nl->delta_rxl = 0,
+			nl->cur_rxl_index = flags.rxl;
+		else
+			nl->delta_rxl = DEF_RXL_DELTA,
+			nl->cur_rxl_index = DEF_RXL;
+
+		nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
+		nl->csr1.rate = flags.rate;
+		outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
+		spin_unlock( &nl->lock );
+		break;
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+	case  SIOCDEVENSLAVE :
+		if (!capable(CAP_NET_ADMIN))
+			return  -EPERM;
+
+		if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
+			return -EFAULT;
+		slave_dev = dev_get_by_name(&init_net, slave_name );
+		if( !slave_dev  ||  !(slave_dev->flags & IFF_UP) ) {
+			netdev_err(dev, "trying to enslave non-active device %s\n",
+				   slave_name);
+			return  -EPERM;
+		}
+
+		return  enslave( dev, slave_dev );
+
+	case  SIOCDEVEMANSIPATE :
+		if (!capable(CAP_NET_ADMIN))
+			return  -EPERM;
+
+		return  emancipate( dev );
+
+#endif	/* CONFIG_SBNI_MULTILINE */
+
+	default :
+		return  -EOPNOTSUPP;
+	}
+
+	return  error;
+}
+
+
+#ifdef CONFIG_SBNI_MULTILINE
+
+static int
+enslave( struct net_device  *dev,  struct net_device  *slave_dev )
+{
+	struct net_local  *nl  = netdev_priv(dev);
+	struct net_local  *snl = netdev_priv(slave_dev);
+
+	if( nl->state & FL_SLAVE )	/* This isn't master or free device */
+		return  -EBUSY;
+
+	if( snl->state & FL_SLAVE )	/* That was already enslaved */
+		return  -EBUSY;
+
+	spin_lock( &nl->lock );
+	spin_lock( &snl->lock );
+
+	/* append to list */
+	snl->link = nl->link;
+	nl->link  = slave_dev;
+	snl->master = dev;
+	snl->state |= FL_SLAVE;
+
+	/* Summary statistics of MultiLine operation will be stored
+	   in master's counters */
+	memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
+	netif_stop_queue( slave_dev );
+	netif_wake_queue( dev );	/* Now we are able to transmit */
+
+	spin_unlock( &snl->lock );
+	spin_unlock( &nl->lock );
+	netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
+	return  0;
+}
+
+
+static int
+emancipate( struct net_device  *dev )
+{
+	struct net_local   *snl = netdev_priv(dev);
+	struct net_device  *p   = snl->master;
+	struct net_local   *nl  = netdev_priv(p);
+
+	if( !(snl->state & FL_SLAVE) )
+		return  -EINVAL;
+
+	spin_lock( &nl->lock );
+	spin_lock( &snl->lock );
+	drop_xmit_queue( dev );
+
+	/* exclude from list */
+	for(;;) {	/* must be in list */
+		struct net_local  *t = netdev_priv(p);
+		if( t->link == dev ) {
+			t->link = snl->link;
+			break;
+		}
+		p = t->link;
+	}
+
+	snl->link = NULL;
+	snl->master = dev;
+	snl->state &= ~FL_SLAVE;
+
+	netif_start_queue( dev );
+
+	spin_unlock( &snl->lock );
+	spin_unlock( &nl->lock );
+
+	dev_put( dev );
+	return  0;
+}
+
+#endif
+
+static void
+set_multicast_list( struct net_device  *dev )
+{
+	return;		/* sbni always operate in promiscuos mode */
+}
+
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(baud, int, NULL, 0);
+module_param_array(rxl, int, NULL, 0);
+module_param_array(mac, int, NULL, 0);
+module_param(skip_pci_probe, bool, 0);
+
+MODULE_LICENSE("GPL");
+
+
+int __init init_module( void )
+{
+	struct net_device  *dev;
+	int err;
+
+	while( num < SBNI_MAX_NUM_CARDS ) {
+		dev = alloc_netdev(sizeof(struct net_local), 
+				   "sbni%d", sbni_devsetup);
+		if( !dev)
+			break;
+
+		sprintf( dev->name, "sbni%d", num );
+
+		err = sbni_init(dev);
+		if (err) {
+			free_netdev(dev);
+			break;
+		}
+
+		if( register_netdev( dev ) ) {
+			release_region( dev->base_addr, SBNI_IO_EXTENT );
+			free_netdev( dev );
+			break;
+		}
+	}
+
+	return  *sbni_cards  ?  0  :  -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+	int i;
+
+	for (i = 0;  i < SBNI_MAX_NUM_CARDS;  ++i) {
+		struct net_device *dev = sbni_cards[i];
+		if (dev != NULL) {
+			unregister_netdev(dev);
+			release_region(dev->base_addr, SBNI_IO_EXTENT);
+			free_netdev(dev);
+		}
+	}
+}
+
+#else	/* MODULE */
+
+static int __init
+sbni_setup( char  *p )
+{
+	int  n, parm;
+
+	if( *p++ != '(' )
+		goto  bad_param;
+
+	for( n = 0, parm = 0;  *p  &&  n < 8; ) {
+		(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
+		if( !*p  ||  *p == ')' )
+			return 1;
+		if( *p == ';' )
+			++p, ++n, parm = 0;
+		else if( *p++ != ',' )
+			break;
+		else
+			if( ++parm >= 5 )
+				break;
+	}
+bad_param:
+	pr_err("Error in sbni kernel parameter!\n");
+	return 0;
+}
+
+__setup( "sbni=", sbni_setup );
+
+#endif	/* MODULE */
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef ASM_CRC
+
+static u32
+calc_crc32( u32  crc,  u8  *p,  u32  len )
+{
+	register u32  _crc;
+	_crc = crc;
+	
+	__asm__ __volatile__ (
+		"xorl	%%ebx, %%ebx\n"
+		"movl	%2, %%esi\n" 
+		"movl	%3, %%ecx\n" 
+		"movl	$crc32tab, %%edi\n"
+		"shrl	$2, %%ecx\n"
+		"jz	1f\n"
+
+		".align 4\n"
+	"0:\n"
+		"movb	%%al, %%bl\n"
+		"movl	(%%esi), %%edx\n"
+		"shrl	$8, %%eax\n"
+		"xorb	%%dl, %%bl\n"
+		"shrl	$8, %%edx\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	%%dl, %%bl\n"
+		"shrl	$8, %%edx\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	%%dl, %%bl\n"
+		"movb	%%dh, %%dl\n" 
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	%%dl, %%bl\n"
+		"addl	$4, %%esi\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"decl	%%ecx\n"
+		"jnz	0b\n"
+
+	"1:\n"
+		"movl	%3, %%ecx\n"
+		"andl	$3, %%ecx\n"
+		"jz	2f\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	(%%esi), %%bl\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"decl	%%ecx\n"
+		"jz	2f\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	1(%%esi), %%bl\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+
+		"decl	%%ecx\n"
+		"jz	2f\n"
+
+		"movb	%%al, %%bl\n"
+		"shrl	$8, %%eax\n"
+		"xorb	2(%%esi), %%bl\n"
+		"xorl	(%%edi,%%ebx,4), %%eax\n"
+	"2:\n"
+		: "=a" (_crc)
+		: "0" (_crc), "g" (p), "g" (len)
+		: "bx", "cx", "dx", "si", "di"
+	);
+
+	return  _crc;
+}
+
+#else	/* ASM_CRC */
+
+static u32
+calc_crc32( u32  crc,  u8  *p,  u32  len )
+{
+	while( len-- )
+		crc = CRC32( *p++, crc );
+
+	return  crc;
+}
+
+#endif	/* ASM_CRC */
+
+
+static u32  crc32tab[] __attribute__ ((aligned(8))) = {
+	0xD202EF8D,  0xA505DF1B,  0x3C0C8EA1,  0x4B0BBE37,
+	0xD56F2B94,  0xA2681B02,  0x3B614AB8,  0x4C667A2E,
+	0xDCD967BF,  0xABDE5729,  0x32D70693,  0x45D03605,
+	0xDBB4A3A6,  0xACB39330,  0x35BAC28A,  0x42BDF21C,
+	0xCFB5FFE9,  0xB8B2CF7F,  0x21BB9EC5,  0x56BCAE53,
+	0xC8D83BF0,  0xBFDF0B66,  0x26D65ADC,  0x51D16A4A,
+	0xC16E77DB,  0xB669474D,  0x2F6016F7,  0x58672661,
+	0xC603B3C2,  0xB1048354,  0x280DD2EE,  0x5F0AE278,
+	0xE96CCF45,  0x9E6BFFD3,  0x0762AE69,  0x70659EFF,
+	0xEE010B5C,  0x99063BCA,  0x000F6A70,  0x77085AE6,
+	0xE7B74777,  0x90B077E1,  0x09B9265B,  0x7EBE16CD,
+	0xE0DA836E,  0x97DDB3F8,  0x0ED4E242,  0x79D3D2D4,
+	0xF4DBDF21,  0x83DCEFB7,  0x1AD5BE0D,  0x6DD28E9B,
+	0xF3B61B38,  0x84B12BAE,  0x1DB87A14,  0x6ABF4A82,
+	0xFA005713,  0x8D076785,  0x140E363F,  0x630906A9,
+	0xFD6D930A,  0x8A6AA39C,  0x1363F226,  0x6464C2B0,
+	0xA4DEAE1D,  0xD3D99E8B,  0x4AD0CF31,  0x3DD7FFA7,
+	0xA3B36A04,  0xD4B45A92,  0x4DBD0B28,  0x3ABA3BBE,
+	0xAA05262F,  0xDD0216B9,  0x440B4703,  0x330C7795,
+	0xAD68E236,  0xDA6FD2A0,  0x4366831A,  0x3461B38C,
+	0xB969BE79,  0xCE6E8EEF,  0x5767DF55,  0x2060EFC3,
+	0xBE047A60,  0xC9034AF6,  0x500A1B4C,  0x270D2BDA,
+	0xB7B2364B,  0xC0B506DD,  0x59BC5767,  0x2EBB67F1,
+	0xB0DFF252,  0xC7D8C2C4,  0x5ED1937E,  0x29D6A3E8,
+	0x9FB08ED5,  0xE8B7BE43,  0x71BEEFF9,  0x06B9DF6F,
+	0x98DD4ACC,  0xEFDA7A5A,  0x76D32BE0,  0x01D41B76,
+	0x916B06E7,  0xE66C3671,  0x7F6567CB,  0x0862575D,
+	0x9606C2FE,  0xE101F268,  0x7808A3D2,  0x0F0F9344,
+	0x82079EB1,  0xF500AE27,  0x6C09FF9D,  0x1B0ECF0B,
+	0x856A5AA8,  0xF26D6A3E,  0x6B643B84,  0x1C630B12,
+	0x8CDC1683,  0xFBDB2615,  0x62D277AF,  0x15D54739,
+	0x8BB1D29A,  0xFCB6E20C,  0x65BFB3B6,  0x12B88320,
+	0x3FBA6CAD,  0x48BD5C3B,  0xD1B40D81,  0xA6B33D17,
+	0x38D7A8B4,  0x4FD09822,  0xD6D9C998,  0xA1DEF90E,
+	0x3161E49F,  0x4666D409,  0xDF6F85B3,  0xA868B525,
+	0x360C2086,  0x410B1010,  0xD80241AA,  0xAF05713C,
+	0x220D7CC9,  0x550A4C5F,  0xCC031DE5,  0xBB042D73,
+	0x2560B8D0,  0x52678846,  0xCB6ED9FC,  0xBC69E96A,
+	0x2CD6F4FB,  0x5BD1C46D,  0xC2D895D7,  0xB5DFA541,
+	0x2BBB30E2,  0x5CBC0074,  0xC5B551CE,  0xB2B26158,
+	0x04D44C65,  0x73D37CF3,  0xEADA2D49,  0x9DDD1DDF,
+	0x03B9887C,  0x74BEB8EA,  0xEDB7E950,  0x9AB0D9C6,
+	0x0A0FC457,  0x7D08F4C1,  0xE401A57B,  0x930695ED,
+	0x0D62004E,  0x7A6530D8,  0xE36C6162,  0x946B51F4,
+	0x19635C01,  0x6E646C97,  0xF76D3D2D,  0x806A0DBB,
+	0x1E0E9818,  0x6909A88E,  0xF000F934,  0x8707C9A2,
+	0x17B8D433,  0x60BFE4A5,  0xF9B6B51F,  0x8EB18589,
+	0x10D5102A,  0x67D220BC,  0xFEDB7106,  0x89DC4190,
+	0x49662D3D,  0x3E611DAB,  0xA7684C11,  0xD06F7C87,
+	0x4E0BE924,  0x390CD9B2,  0xA0058808,  0xD702B89E,
+	0x47BDA50F,  0x30BA9599,  0xA9B3C423,  0xDEB4F4B5,
+	0x40D06116,  0x37D75180,  0xAEDE003A,  0xD9D930AC,
+	0x54D13D59,  0x23D60DCF,  0xBADF5C75,  0xCDD86CE3,
+	0x53BCF940,  0x24BBC9D6,  0xBDB2986C,  0xCAB5A8FA,
+	0x5A0AB56B,  0x2D0D85FD,  0xB404D447,  0xC303E4D1,
+	0x5D677172,  0x2A6041E4,  0xB369105E,  0xC46E20C8,
+	0x72080DF5,  0x050F3D63,  0x9C066CD9,  0xEB015C4F,
+	0x7565C9EC,  0x0262F97A,  0x9B6BA8C0,  0xEC6C9856,
+	0x7CD385C7,  0x0BD4B551,  0x92DDE4EB,  0xE5DAD47D,
+	0x7BBE41DE,  0x0CB97148,  0x95B020F2,  0xE2B71064,
+	0x6FBF1D91,  0x18B82D07,  0x81B17CBD,  0xF6B64C2B,
+	0x68D2D988,  0x1FD5E91E,  0x86DCB8A4,  0xF1DB8832,
+	0x616495A3,  0x1663A535,  0x8F6AF48F,  0xF86DC419,
+	0x660951BA,  0x110E612C,  0x88073096,  0xFF000000
+};
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.h
new file mode 100644
index 0000000..8426451
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/sbni.h
@@ -0,0 +1,147 @@
+/* sbni.h:  definitions for a Granch SBNI12 driver, version 5.0.0
+ * Written 2001 Denis I.Timofeev (timofeev@granch.ru)
+ * This file is distributed under the GNU GPL
+ */
+
+#ifndef SBNI_H
+#define SBNI_H
+
+#ifdef SBNI_DEBUG
+#define DP( A ) A
+#else
+#define DP( A )
+#endif
+
+
+/* We don't have official vendor id yet... */
+#define SBNI_PCI_VENDOR 	0x55 
+#define SBNI_PCI_DEVICE 	0x9f
+
+#define ISA_MODE 0x00
+#define PCI_MODE 0x01
+
+#define	SBNI_IO_EXTENT	4
+
+enum sbni_reg {
+	CSR0 = 0,
+	CSR1 = 1,
+	DAT  = 2
+};
+
+/* CSR0 mapping */
+enum {
+	BU_EMP = 0x02,
+	RC_CHK = 0x04,
+	CT_ZER = 0x08,
+	TR_REQ = 0x10,
+	TR_RDY = 0x20,
+	EN_INT = 0x40,
+	RC_RDY = 0x80
+};
+
+
+/* CSR1 mapping */
+#define PR_RES 0x80
+
+struct sbni_csr1 {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+	u8 rxl	: 5;
+	u8 rate	: 2;
+	u8 	: 1;
+#else
+	u8 	: 1;
+	u8 rate	: 2;
+	u8 rxl	: 5;
+#endif
+};
+
+/* fields in frame header */
+#define FRAME_ACK_MASK  (unsigned short)0x7000
+#define FRAME_LEN_MASK  (unsigned short)0x03FF
+#define FRAME_FIRST     (unsigned short)0x8000
+#define FRAME_RETRY     (unsigned short)0x0800
+
+#define FRAME_SENT_BAD  (unsigned short)0x4000
+#define FRAME_SENT_OK   (unsigned short)0x3000
+
+
+/* state flags */
+enum {
+	FL_WAIT_ACK    = 0x01,
+	FL_NEED_RESEND = 0x02,
+	FL_PREV_OK     = 0x04,
+	FL_SLOW_MODE   = 0x08,
+	FL_SECONDARY   = 0x10,
+#ifdef CONFIG_SBNI_MULTILINE
+	FL_SLAVE       = 0x20,
+#endif
+	FL_LINE_DOWN   = 0x40
+};
+
+
+enum {
+	DEFAULT_IOBASEADDR = 0x210,
+	DEFAULT_INTERRUPTNUMBER = 5,
+	DEFAULT_RATE = 0,
+	DEFAULT_FRAME_LEN = 1012
+};
+
+#define DEF_RXL_DELTA	-1
+#define DEF_RXL		0xf
+
+#define SBNI_SIG 0x5a
+
+#define	SBNI_MIN_LEN	60	/* Shortest Ethernet frame without FCS */
+#define SBNI_MAX_FRAME	1023
+#define ETHER_MAX_LEN	1518
+
+#define SBNI_TIMEOUT	(HZ/10)
+
+#define TR_ERROR_COUNT	32
+#define CHANGE_LEVEL_START_TICKS 4
+
+#define SBNI_MAX_NUM_CARDS	16
+
+/* internal SBNI-specific statistics */
+struct sbni_in_stats {
+	u32	all_rx_number;
+	u32	bad_rx_number;
+	u32	timeout_number;
+	u32	all_tx_number;
+	u32	resend_tx_number;
+};
+
+/* SBNI ioctl params */
+#define SIOCDEVGETINSTATS 	SIOCDEVPRIVATE
+#define SIOCDEVRESINSTATS 	SIOCDEVPRIVATE+1
+#define SIOCDEVGHWSTATE   	SIOCDEVPRIVATE+2
+#define SIOCDEVSHWSTATE   	SIOCDEVPRIVATE+3
+#define SIOCDEVENSLAVE  	SIOCDEVPRIVATE+4
+#define SIOCDEVEMANSIPATE  	SIOCDEVPRIVATE+5
+
+
+/* data packet for SIOCDEVGHWSTATE/SIOCDEVSHWSTATE ioctl requests */
+struct sbni_flags {
+	u32	rxl		: 4;
+	u32	rate		: 2;
+	u32	fixed_rxl	: 1;
+	u32	slow_mode	: 1;
+	u32	mac_addr	: 24;
+};
+
+/*
+ * CRC-32 stuff
+ */
+#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF))
+      /* CRC generator 0xEDB88320 */
+      /* CRC remainder 0x2144DF1C */
+      /* CRC initial value 0x00000000 */
+#define CRC32_REMAINDER 0x2144DF1C
+#define CRC32_INITIAL 0x00000000
+
+#ifndef __initdata
+#define __initdata
+#endif
+
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/sdla.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/sdla.c
new file mode 100644
index 0000000..de3bbf4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/sdla.c
@@ -0,0 +1,1665 @@
+/*
+ * SDLA		An implementation of a driver for the Sangoma S502/S508 series
+ *		multi-protocol PC interface card.  Initial offering is with 
+ *		the DLCI driver, providing Frame Relay support for linux.
+ *
+ *		Global definitions for the Frame relay interface.
+ *
+ * Version:	@(#)sdla.c   0.30	12 Sep 1996
+ *
+ * Credits:	Sangoma Technologies, for the use of 2 cards for an extended
+ *			period of time.
+ *		David Mandelstam <dm@sangoma.com> for getting me started on 
+ *			this project, and incentive to complete it.
+ *		Gene Kozen <74604.152@compuserve.com> for providing me with
+ *			important information about the cards.
+ *
+ * Author:	Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ *		0.15	Mike McLagan	Improved error handling, packet dropping
+ *		0.20	Mike McLagan	New transmit/receive flags for config
+ *					If in FR mode, don't accept packets from
+ *					non DLCI devices.
+ *		0.25	Mike McLagan	Fixed problem with rejecting packets
+ *					from non DLCI devices.
+ *		0.30	Mike McLagan	Fixed kernel panic when used with modified
+ *					ifconfig
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/if_frad.h>
+#include <linux/sdla.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
+
+static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
+
+static unsigned int valid_mem[] = {
+				    0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000, 
+                                    0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
+                                    0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
+                                    0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
+                                    0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000}; 
+
+static DEFINE_SPINLOCK(sdla_lock);
+
+/*********************************************************
+ *
+ * these are the core routines that access the card itself 
+ *
+ *********************************************************/
+
+#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW)
+
+static void __sdla_read(struct net_device *dev, int addr, void *buf, short len)
+{
+	char          *temp;
+	const void    *base;
+	int           offset, bytes;
+
+	temp = buf;
+	while(len)
+	{	
+		offset = addr & SDLA_ADDR_MASK;
+		bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
+		base = (const void *) (dev->mem_start + offset);
+
+		SDLA_WINDOW(dev, addr);
+		memcpy(temp, base, bytes);
+
+		addr += bytes;
+		temp += bytes;
+		len  -= bytes;
+	}  
+}
+
+static void sdla_read(struct net_device *dev, int addr, void *buf, short len)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&sdla_lock, flags);
+	__sdla_read(dev, addr, buf, len);
+	spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+static void __sdla_write(struct net_device *dev, int addr, 
+			 const void *buf, short len)
+{
+	const char    *temp;
+	void 	      *base;
+	int           offset, bytes;
+
+	temp = buf;
+	while(len)
+	{
+		offset = addr & SDLA_ADDR_MASK;
+		bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
+		base = (void *) (dev->mem_start + offset);
+
+		SDLA_WINDOW(dev, addr);
+		memcpy(base, temp, bytes);
+
+		addr += bytes;
+		temp += bytes;
+		len  -= bytes;
+	}
+}
+
+static void sdla_write(struct net_device *dev, int addr, 
+		       const void *buf, short len)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sdla_lock, flags);
+	__sdla_write(dev, addr, buf, len);
+	spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+
+static void sdla_clear(struct net_device *dev)
+{
+	unsigned long flags;
+	char          *base;
+	int           len, addr, bytes;
+
+	len = 65536;	
+	addr = 0;
+	bytes = SDLA_WINDOW_SIZE;
+	base = (void *) dev->mem_start;
+
+	spin_lock_irqsave(&sdla_lock, flags);
+	while(len)
+	{
+		SDLA_WINDOW(dev, addr);
+		memset(base, 0, bytes);
+
+		addr += bytes;
+		len  -= bytes;
+	}
+	spin_unlock_irqrestore(&sdla_lock, flags);
+
+}
+
+static char sdla_byte(struct net_device *dev, int addr)
+{
+	unsigned long flags;
+	char          byte, *temp;
+
+	temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK));
+
+	spin_lock_irqsave(&sdla_lock, flags);
+	SDLA_WINDOW(dev, addr);
+	byte = *temp;
+	spin_unlock_irqrestore(&sdla_lock, flags);
+
+	return byte;
+}
+
+static void sdla_stop(struct net_device *dev)
+{
+	struct frad_local *flp;
+
+	flp = netdev_priv(dev);
+	switch(flp->type)
+	{
+		case SDLA_S502A:
+			outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL);
+			flp->state = SDLA_HALT;
+			break;
+		case SDLA_S502E:
+			outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL);
+			outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL);
+			flp->state = SDLA_S502E_ENABLE;
+			break;
+		case SDLA_S507:
+			flp->state &= ~SDLA_CPUEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+		case SDLA_S508:
+			flp->state &= ~SDLA_CPUEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+	}
+}
+
+static void sdla_start(struct net_device *dev)
+{
+	struct frad_local *flp;
+
+	flp = netdev_priv(dev);
+	switch(flp->type)
+	{
+		case SDLA_S502A:
+			outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL);
+			outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL);
+			flp->state = SDLA_S502A_START;
+			break;
+		case SDLA_S502E:
+			outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL);
+			outb(0x00, dev->base_addr + SDLA_REG_CONTROL);
+			flp->state = 0;
+			break;
+		case SDLA_S507:
+			flp->state |= SDLA_CPUEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+		case SDLA_S508:
+			flp->state |= SDLA_CPUEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+	}
+}
+
+/****************************************************
+ *
+ * this is used for the S502A/E cards to determine
+ * the speed of the onboard CPU.  Calibration is
+ * necessary for the Frame Relay code uploaded 
+ * later.  Incorrect results cause timing problems
+ * with link checks & status messages
+ *
+ ***************************************************/
+
+static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
+{
+	unsigned long start, done, now;
+	char          resp, *temp;
+
+	start = now = jiffies;
+	done = jiffies + jiffs;
+
+	temp = (void *)dev->mem_start;
+	temp += z80_addr & SDLA_ADDR_MASK;
+	
+	resp = ~resp1;
+	while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2)))
+	{
+		if (jiffies != now)
+		{
+			SDLA_WINDOW(dev, z80_addr);
+			now = jiffies;
+			resp = *temp;
+		}
+	}
+	return time_before(jiffies, done) ? jiffies - start : -1;
+}
+
+/* constants for Z80 CPU speed */
+#define Z80_READY 		'1'	/* Z80 is ready to begin */
+#define LOADER_READY 		'2'	/* driver is ready to begin */
+#define Z80_SCC_OK 		'3'	/* SCC is on board */
+#define Z80_SCC_BAD	 	'4'	/* SCC was not found */
+
+static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
+{
+	int  jiffs;
+	char data;
+
+	sdla_start(dev);
+	if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
+		return -EIO;
+
+	data = LOADER_READY;
+	sdla_write(dev, 0, &data, 1);
+
+	if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
+		return -EIO;
+
+	sdla_stop(dev);
+	sdla_read(dev, 0, &data, 1);
+
+	if (data == Z80_SCC_BAD)
+	{
+		printk("%s: SCC bad\n", dev->name);
+		return -EIO;
+	}
+
+	if (data != Z80_SCC_OK)
+		return -EINVAL;
+
+	if (jiffs < 165)
+		ifr->ifr_mtu = SDLA_CPU_16M;
+	else if (jiffs < 220)
+		ifr->ifr_mtu = SDLA_CPU_10M;
+	else if (jiffs < 258)
+		ifr->ifr_mtu = SDLA_CPU_8M;
+	else if (jiffs < 357)
+		ifr->ifr_mtu = SDLA_CPU_7M;
+	else if (jiffs < 467)
+		ifr->ifr_mtu = SDLA_CPU_5M;
+	else
+		ifr->ifr_mtu = SDLA_CPU_3M;
+ 
+	return 0;
+}
+
+/************************************************
+ *
+ *  Direct interaction with the Frame Relay code 
+ *  starts here.
+ *
+ ************************************************/
+
+struct _dlci_stat 
+{
+	short dlci;
+	char  flags;
+} __packed;
+
+struct _frad_stat 
+{
+	char    flags;
+	struct _dlci_stat dlcis[SDLA_MAX_DLCI];
+};
+
+static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data) 
+{
+	struct _dlci_stat *pstatus;
+	short             *pdlci;
+	int               i;
+	char              *state, line[30];
+
+	switch (ret)
+	{
+		case SDLA_RET_MODEM:
+			state = data;
+			if (*state & SDLA_MODEM_DCD_LOW)
+				netdev_info(dev, "Modem DCD unexpectedly low!\n");
+			if (*state & SDLA_MODEM_CTS_LOW)
+				netdev_info(dev, "Modem CTS unexpectedly low!\n");
+			/* I should probably do something about this! */
+			break;
+
+		case SDLA_RET_CHANNEL_OFF:
+			netdev_info(dev, "Channel became inoperative!\n");
+			/* same here */
+			break;
+
+		case SDLA_RET_CHANNEL_ON:
+			netdev_info(dev, "Channel became operative!\n");
+			/* same here */
+			break;
+
+		case SDLA_RET_DLCI_STATUS:
+			netdev_info(dev, "Status change reported by Access Node\n");
+			len /= sizeof(struct _dlci_stat);
+			for(pstatus = data, i=0;i < len;i++,pstatus++)
+			{
+				if (pstatus->flags & SDLA_DLCI_NEW)
+					state = "new";
+				else if (pstatus->flags & SDLA_DLCI_DELETED)
+					state = "deleted";
+				else if (pstatus->flags & SDLA_DLCI_ACTIVE)
+					state = "active";
+				else
+				{
+					sprintf(line, "unknown status: %02X", pstatus->flags);
+					state = line;
+				}
+				netdev_info(dev, "DLCI %i: %s\n",
+					    pstatus->dlci, state);
+				/* same here */
+			}
+			break;
+
+		case SDLA_RET_DLCI_UNKNOWN:
+			netdev_info(dev, "Received unknown DLCIs:");
+			len /= sizeof(short);
+			for(pdlci = data,i=0;i < len;i++,pdlci++)
+				pr_cont(" %i", *pdlci);
+			pr_cont("\n");
+			break;
+
+		case SDLA_RET_TIMEOUT:
+			netdev_err(dev, "Command timed out!\n");
+			break;
+
+		case SDLA_RET_BUF_OVERSIZE:
+			netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n",
+				    len);
+			break;
+
+		case SDLA_RET_BUF_TOO_BIG:
+			netdev_info(dev, "Buffer size over specified max of %i\n",
+				    len);
+			break;
+
+		case SDLA_RET_CHANNEL_INACTIVE:
+		case SDLA_RET_DLCI_INACTIVE:
+		case SDLA_RET_CIR_OVERFLOW:
+		case SDLA_RET_NO_BUFS:
+			if (cmd == SDLA_INFORMATION_WRITE)
+				break;
+
+		default: 
+			netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
+				   cmd, ret);
+			/* Further processing could be done here */
+			break;
+	}
+}
+
+static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags, 
+                        void *inbuf, short inlen, void *outbuf, short *outlen)
+{
+	static struct _frad_stat status;
+	struct frad_local        *flp;
+	struct sdla_cmd          *cmd_buf;
+	unsigned long            pflags;
+	unsigned long		 jiffs;
+	int                      ret, waiting, len;
+	long                     window;
+
+	flp = netdev_priv(dev);
+	window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
+	cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
+	ret = 0;
+	len = 0;
+	jiffs = jiffies + HZ;  /* 1 second is plenty */
+
+	spin_lock_irqsave(&sdla_lock, pflags);
+	SDLA_WINDOW(dev, window);
+	cmd_buf->cmd = cmd;
+	cmd_buf->dlci = dlci;
+	cmd_buf->flags = flags;
+
+	if (inbuf)
+		memcpy(cmd_buf->data, inbuf, inlen);
+
+	cmd_buf->length = inlen;
+
+	cmd_buf->opp_flag = 1;
+	spin_unlock_irqrestore(&sdla_lock, pflags);
+
+	waiting = 1;
+	len = 0;
+	while (waiting && time_before_eq(jiffies, jiffs))
+	{
+		if (waiting++ % 3) 
+		{
+			spin_lock_irqsave(&sdla_lock, pflags);
+			SDLA_WINDOW(dev, window);
+			waiting = ((volatile int)(cmd_buf->opp_flag));
+			spin_unlock_irqrestore(&sdla_lock, pflags);
+		}
+	}
+	
+	if (!waiting)
+	{
+
+		spin_lock_irqsave(&sdla_lock, pflags);
+		SDLA_WINDOW(dev, window);
+		ret = cmd_buf->retval;
+		len = cmd_buf->length;
+		if (outbuf && outlen)
+		{
+			*outlen = *outlen >= len ? len : *outlen;
+
+			if (*outlen)
+				memcpy(outbuf, cmd_buf->data, *outlen);
+		}
+
+		/* This is a local copy that's used for error handling */
+		if (ret)
+			memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len);
+
+		spin_unlock_irqrestore(&sdla_lock, pflags);
+	}
+	else
+		ret = SDLA_RET_TIMEOUT;
+
+	if (ret != SDLA_RET_OK)
+	   	sdla_errors(dev, cmd, dlci, ret, len, &status);
+
+	return ret;
+}
+
+/***********************************************
+ *
+ * these functions are called by the DLCI driver 
+ *
+ ***********************************************/
+
+static int sdla_reconfig(struct net_device *dev);
+
+static int sdla_activate(struct net_device *slave, struct net_device *master)
+{
+	struct frad_local *flp;
+	int i;
+
+	flp = netdev_priv(slave);
+
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->master[i] == master)
+			break;
+
+	if (i == CONFIG_DLCI_MAX)
+		return -ENODEV;
+
+	flp->dlci[i] = abs(flp->dlci[i]);
+
+	if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
+		sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
+
+	return 0;
+}
+
+static int sdla_deactivate(struct net_device *slave, struct net_device *master)
+{
+	struct frad_local *flp;
+	int               i;
+
+	flp = netdev_priv(slave);
+
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->master[i] == master)
+			break;
+
+	if (i == CONFIG_DLCI_MAX)
+		return -ENODEV;
+
+	flp->dlci[i] = -abs(flp->dlci[i]);
+
+	if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
+		sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
+
+	return 0;
+}
+
+static int sdla_assoc(struct net_device *slave, struct net_device *master)
+{
+	struct frad_local *flp;
+	int               i;
+
+	if (master->type != ARPHRD_DLCI)
+		return -EINVAL;
+
+	flp = netdev_priv(slave);
+
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+	{
+		if (!flp->master[i])
+			break;
+		if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
+			return -EADDRINUSE;
+	} 
+
+	if (i == CONFIG_DLCI_MAX)
+		return -EMLINK;  /* #### Alan: Comments on this ?? */
+
+
+	flp->master[i] = master;
+	flp->dlci[i] = -*(short *)(master->dev_addr);
+	master->mtu = slave->mtu;
+
+	if (netif_running(slave)) {
+		if (flp->config.station == FRAD_STATION_CPE)
+			sdla_reconfig(slave);
+		else
+			sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
+	}
+
+	return 0;
+}
+
+static int sdla_deassoc(struct net_device *slave, struct net_device *master)
+{
+	struct frad_local *flp;
+	int               i;
+
+	flp = netdev_priv(slave);
+
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->master[i] == master)
+			break;
+
+	if (i == CONFIG_DLCI_MAX)
+		return -ENODEV;
+
+	flp->master[i] = NULL;
+	flp->dlci[i] = 0;
+
+
+	if (netif_running(slave)) {
+		if (flp->config.station == FRAD_STATION_CPE)
+			sdla_reconfig(slave);
+		else
+			sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
+	}
+
+	return 0;
+}
+
+static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
+{
+	struct frad_local *flp;
+	struct dlci_local *dlp;
+	int               i;
+	short             len, ret;
+
+	flp = netdev_priv(slave);
+
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->master[i] == master)
+			break;
+
+	if (i == CONFIG_DLCI_MAX)
+		return -ENODEV;
+
+	dlp = netdev_priv(master);
+
+	ret = SDLA_RET_OK;
+	len = sizeof(struct dlci_conf);
+	if (netif_running(slave)) {
+		if (get)
+			ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,  
+			            NULL, 0, &dlp->config, &len);
+		else
+			ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,  
+			            &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
+	}
+
+	return ret == SDLA_RET_OK ? 0 : -EIO;
+}
+
+/**************************
+ *
+ * now for the Linux driver 
+ *
+ **************************/
+
+/* NOTE: the DLCI driver deals with freeing the SKB!! */
+static netdev_tx_t sdla_transmit(struct sk_buff *skb,
+				 struct net_device *dev)
+{
+	struct frad_local *flp;
+	int               ret, addr, accept, i;
+	short             size;
+	unsigned long     flags;
+	struct buf_entry  *pbuf;
+
+	flp = netdev_priv(dev);
+	ret = 0;
+	accept = 1;
+
+	netif_stop_queue(dev);
+
+	/*
+	 * stupid GateD insists on setting up the multicast router thru us
+	 * and we're ill equipped to handle a non Frame Relay packet at this
+	 * time!
+	 */
+
+	accept = 1;
+	switch (dev->type)
+	{
+		case ARPHRD_FRAD:
+			if (skb->dev->type != ARPHRD_DLCI)
+			{
+				netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n",
+					    skb->dev->type);
+				accept = 0;
+			}
+			break;
+		default:
+			netdev_warn(dev, "unknown firmware type 0x%04X\n",
+				    dev->type);
+			accept = 0;
+			break;
+	}
+	if (accept)
+	{
+		/* this is frame specific, but till there's a PPP module, it's the default */
+		switch (flp->type)
+		{
+			case SDLA_S502A:
+			case SDLA_S502E:
+				ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
+				break;
+				case SDLA_S508:
+				size = sizeof(addr);
+				ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
+				if (ret == SDLA_RET_OK)
+				{
+
+					spin_lock_irqsave(&sdla_lock, flags);
+					SDLA_WINDOW(dev, addr);
+					pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+					__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
+					SDLA_WINDOW(dev, addr);
+					pbuf->opp_flag = 1;
+					spin_unlock_irqrestore(&sdla_lock, flags);
+				}
+				break;
+		}
+
+		switch (ret)
+		{
+			case SDLA_RET_OK:
+				dev->stats.tx_packets++;
+				break;
+
+			case SDLA_RET_CIR_OVERFLOW:
+			case SDLA_RET_BUF_OVERSIZE:
+			case SDLA_RET_NO_BUFS:
+				dev->stats.tx_dropped++;
+				break;
+
+			default:
+				dev->stats.tx_errors++;
+				break;
+		}
+	}
+	netif_wake_queue(dev);
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+	{
+		if(flp->master[i]!=NULL)
+			netif_wake_queue(flp->master[i]);
+	}		
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static void sdla_receive(struct net_device *dev)
+{
+	struct net_device	  *master;
+	struct frad_local *flp;
+	struct dlci_local *dlp;
+	struct sk_buff	 *skb;
+
+	struct sdla_cmd	*cmd;
+	struct buf_info	*pbufi;
+	struct buf_entry  *pbuf;
+
+	unsigned long	  flags;
+	int               i=0, received, success, addr, buf_base, buf_top;
+	short             dlci, len, len2, split;
+
+	flp = netdev_priv(dev);
+	success = 1;
+	received = addr = buf_top = buf_base = 0;
+	len = dlci = 0;
+	skb = NULL;
+	master = NULL;
+	cmd = NULL;
+	pbufi = NULL;
+	pbuf = NULL;
+
+	spin_lock_irqsave(&sdla_lock, flags);
+
+	switch (flp->type)
+	{
+		case SDLA_S502A:
+		case SDLA_S502E:
+			cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK));
+			SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
+			success = cmd->opp_flag;
+			if (!success)
+				break;
+
+			dlci = cmd->dlci;
+			len = cmd->length;
+			break;
+
+		case SDLA_S508:
+			pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK));
+			SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
+			pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK));
+			success = pbuf->opp_flag;
+			if (!success)
+				break;
+
+			buf_top = pbufi->buf_top;
+			buf_base = pbufi->buf_base;
+			dlci = pbuf->dlci;
+			len = pbuf->length;
+			addr = pbuf->buf_addr;
+			break;
+	}
+
+	/* common code, find the DLCI and get the SKB */
+	if (success)
+	{
+		for (i=0;i<CONFIG_DLCI_MAX;i++)
+			if (flp->dlci[i] == dlci)
+				break;
+
+		if (i == CONFIG_DLCI_MAX)
+		{
+			netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n",
+				      dlci);
+			dev->stats.rx_errors++;
+			success = 0;
+		}
+	}
+
+	if (success)
+	{
+		master = flp->master[i];
+		skb = dev_alloc_skb(len + sizeof(struct frhdr));
+		if (skb == NULL) 
+		{
+			netdev_notice(dev, "Memory squeeze, dropping packet\n");
+			dev->stats.rx_dropped++;
+			success = 0;
+		}
+		else
+			skb_reserve(skb, sizeof(struct frhdr));
+	}
+
+	/* pick up the data */
+	switch (flp->type)
+	{
+		case SDLA_S502A:
+		case SDLA_S502E:
+			if (success)
+				__sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
+
+			SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
+			cmd->opp_flag = 0;
+			break;
+
+		case SDLA_S508:
+			if (success)
+			{
+				/* is this buffer split off the end of the internal ring buffer */
+				split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0;
+				len2 = len - split;
+
+				__sdla_read(dev, addr, skb_put(skb, len2), len2);
+				if (split)
+					__sdla_read(dev, buf_base, skb_put(skb, split), split);
+			}
+
+			/* increment the buffer we're looking at */
+			SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
+			flp->buffer = (flp->buffer + 1) % pbufi->rse_num;
+			pbuf->opp_flag = 0;
+			break;
+	}
+
+	if (success)
+	{
+		dev->stats.rx_packets++;
+		dlp = netdev_priv(master);
+		(*dlp->receive)(skb, master);
+	}
+
+	spin_unlock_irqrestore(&sdla_lock, flags);
+}
+
+static irqreturn_t sdla_isr(int dummy, void *dev_id)
+{
+	struct net_device     *dev;
+	struct frad_local *flp;
+	char              byte;
+
+	dev = dev_id;
+
+	flp = netdev_priv(dev);
+
+	if (!flp->initialized)
+	{
+		netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq);
+		return IRQ_NONE;
+	}
+
+	byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE);
+	switch (byte)
+	{
+		case SDLA_INTR_RX:
+			sdla_receive(dev);
+			break;
+
+		/* the command will get an error return, which is processed above */
+		case SDLA_INTR_MODEM:
+		case SDLA_INTR_STATUS:
+			sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL);
+			break;
+
+		case SDLA_INTR_TX:
+		case SDLA_INTR_COMPLETE:
+		case SDLA_INTR_TIMER:
+			netdev_warn(dev, "invalid irq flag 0x%02X\n", byte);
+			break;
+	}
+
+	/* the S502E requires a manual acknowledgement of the interrupt */ 
+	if (flp->type == SDLA_S502E)
+	{
+		flp->state &= ~SDLA_S502E_INTACK;
+		outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+		flp->state |= SDLA_S502E_INTACK;
+		outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+	}
+
+	/* this clears the byte, informing the Z80 we're done */
+	byte = 0;
+	sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
+	return IRQ_HANDLED;
+}
+
+static void sdla_poll(unsigned long device)
+{
+	struct net_device	  *dev;
+	struct frad_local *flp;
+
+	dev = (struct net_device *) device;
+	flp = netdev_priv(dev);
+
+	if (sdla_byte(dev, SDLA_502_RCV_BUF))
+		sdla_receive(dev);
+
+	flp->timer.expires = 1;
+	add_timer(&flp->timer);
+}
+
+static int sdla_close(struct net_device *dev)
+{
+	struct frad_local *flp;
+	struct intr_info  intr;
+	int               len, i;
+	short             dlcis[CONFIG_DLCI_MAX];
+
+	flp = netdev_priv(dev);
+
+	len = 0;
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->dlci[i])
+			dlcis[len++] = abs(flp->dlci[i]);
+	len *= 2;
+
+	if (flp->config.station == FRAD_STATION_NODE)
+	{
+		for(i=0;i<CONFIG_DLCI_MAX;i++)
+			if (flp->dlci[i] > 0) 
+				sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL);
+		sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL);
+	}
+
+	memset(&intr, 0, sizeof(intr));
+	/* let's start up the reception */
+	switch(flp->type)
+	{
+		case SDLA_S502A:
+			del_timer(&flp->timer); 
+			break;
+
+		case SDLA_S502E:
+			sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
+			flp->state &= ~SDLA_S502E_INTACK;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+
+		case SDLA_S507:
+			break;
+
+		case SDLA_S508:
+			sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
+			flp->state &= ~SDLA_S508_INTEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			break;
+	}
+
+	sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+	netif_stop_queue(dev);
+	
+	return 0;
+}
+
+struct conf_data {
+	struct frad_conf config;
+	short            dlci[CONFIG_DLCI_MAX];
+};
+
+static int sdla_open(struct net_device *dev)
+{
+	struct frad_local *flp;
+	struct dlci_local *dlp;
+	struct conf_data  data;
+	struct intr_info  intr;
+	int               len, i;
+	char              byte;
+
+	flp = netdev_priv(dev);
+
+	if (!flp->initialized)
+		return -EPERM;
+
+	if (!flp->configured)
+		return -EPERM;
+
+	/* time to send in the configuration */
+	len = 0;
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->dlci[i])
+			data.dlci[len++] = abs(flp->dlci[i]);
+	len *= 2;
+
+	memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
+	len += sizeof(struct frad_conf);
+
+	sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+	sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
+
+	if (flp->type == SDLA_S508)
+		flp->buffer = 0;
+
+	sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+	/* let's start up the reception */
+	memset(&intr, 0, sizeof(intr));
+	switch(flp->type)
+	{
+		case SDLA_S502A:
+			flp->timer.expires = 1;
+			add_timer(&flp->timer);
+			break;
+
+		case SDLA_S502E:
+			flp->state |= SDLA_S502E_ENABLE;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			flp->state |= SDLA_S502E_INTACK;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			byte = 0;
+			sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
+			intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
+			sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
+			break;
+
+		case SDLA_S507:
+			break;
+
+		case SDLA_S508:
+			flp->state |= SDLA_S508_INTEN;
+			outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
+			byte = 0;
+			sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte));
+			intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
+			intr.irq = dev->irq;
+			sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
+			break;
+	}
+
+	if (flp->config.station == FRAD_STATION_CPE)
+	{
+		byte = SDLA_ICS_STATUS_ENQ;
+		sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL);
+	}
+	else
+	{
+		sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL);
+		for(i=0;i<CONFIG_DLCI_MAX;i++)
+			if (flp->dlci[i] > 0)
+				sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL);
+	}
+
+	/* configure any specific DLCI settings */
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->dlci[i])
+		{
+			dlp = netdev_priv(flp->master[i]);
+			if (dlp->configured)
+				sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
+		}
+
+	netif_start_queue(dev);
+	
+	return 0;
+}
+
+static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
+{
+	struct frad_local *flp;
+	struct conf_data  data;
+	int               i;
+	short             size;
+
+	if (dev->type == 0xFFFF)
+		return -EUNATCH;
+
+	flp = netdev_priv(dev);
+
+	if (!get)
+	{
+		if (netif_running(dev))
+			return -EBUSY;
+
+		if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
+			return -EFAULT;
+
+		if (data.config.station & ~FRAD_STATION_NODE)
+			return -EINVAL;
+
+		if (data.config.flags & ~FRAD_VALID_FLAGS)
+			return -EINVAL;
+
+		if ((data.config.kbaud < 0) || 
+			 ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
+			return -EINVAL;
+
+		if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
+			return -EINVAL;
+
+		if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
+			return -EINVAL;
+
+		if ((data.config.T391 < 5) || (data.config.T391 > 30))
+			return -EINVAL;
+
+		if ((data.config.T392 < 5) || (data.config.T392 > 30))
+			return -EINVAL;
+
+		if ((data.config.N391 < 1) || (data.config.N391 > 255))
+			return -EINVAL;
+
+		if ((data.config.N392 < 1) || (data.config.N392 > 10))
+			return -EINVAL;
+
+		if ((data.config.N393 < 1) || (data.config.N393 > 10))
+			return -EINVAL;
+
+		memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
+		flp->config.flags |= SDLA_DIRECT_RECV;
+
+		if (flp->type == SDLA_S508)
+			flp->config.flags |= SDLA_TX70_RX30;
+
+		if (dev->mtu != flp->config.mtu)
+		{
+			/* this is required to change the MTU */
+			dev->mtu = flp->config.mtu;
+			for(i=0;i<CONFIG_DLCI_MAX;i++)
+				if (flp->master[i])
+					flp->master[i]->mtu = flp->config.mtu;
+		}
+
+		flp->config.mtu += sizeof(struct frhdr);
+
+		/* off to the races! */
+		if (!flp->configured)
+			sdla_start(dev);
+
+		flp->configured = 1;
+	}
+	else
+	{
+		/* no sense reading if the CPU isn't started */
+		if (netif_running(dev))
+		{
+			size = sizeof(data);
+			if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
+				return -EIO;
+		}
+		else
+			if (flp->configured)
+				memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
+			else
+				memset(&data.config, 0, sizeof(struct frad_conf));
+
+		memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
+		data.config.flags &= FRAD_VALID_FLAGS;
+		data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu;
+		return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
+	}
+
+	return 0;
+}
+
+static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
+{
+	struct sdla_mem mem;
+	char	*temp;
+
+	if(copy_from_user(&mem, info, sizeof(mem)))
+		return -EFAULT;
+		
+	if (read)
+	{	
+		temp = kzalloc(mem.len, GFP_KERNEL);
+		if (!temp)
+			return -ENOMEM;
+		sdla_read(dev, mem.addr, temp, mem.len);
+		if(copy_to_user(mem.data, temp, mem.len))
+		{
+			kfree(temp);
+			return -EFAULT;
+		}
+		kfree(temp);
+	}
+	else
+	{
+		temp = memdup_user(mem.data, mem.len);
+		if (IS_ERR(temp))
+			return PTR_ERR(temp);
+		sdla_write(dev, mem.addr, temp, mem.len);
+		kfree(temp);
+	}
+	return 0;
+}
+
+static int sdla_reconfig(struct net_device *dev)
+{
+	struct frad_local *flp;
+	struct conf_data  data;
+	int               i, len;
+
+	flp = netdev_priv(dev);
+
+	len = 0;
+	for(i=0;i<CONFIG_DLCI_MAX;i++)
+		if (flp->dlci[i])
+			data.dlci[len++] = flp->dlci[i];
+	len *= 2;
+
+	memcpy(&data, &flp->config, sizeof(struct frad_conf));
+	len += sizeof(struct frad_conf);
+
+	sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+	sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
+	sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
+
+	return 0;
+}
+
+static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct frad_local *flp;
+
+	if(!capable(CAP_NET_ADMIN))
+		return -EPERM;
+		
+	flp = netdev_priv(dev);
+
+	if (!flp->initialized)
+		return -EINVAL;
+
+	switch (cmd)
+	{
+		case FRAD_GET_CONF:
+		case FRAD_SET_CONF:
+			return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
+
+		case SDLA_IDENTIFY:
+			ifr->ifr_flags = flp->type;
+			break;
+
+		case SDLA_CPUSPEED:
+			return sdla_cpuspeed(dev, ifr);
+
+/* ==========================================================
+NOTE:  This is rather a useless action right now, as the
+       current driver does not support protocols other than
+       FR.  However, Sangoma has modules for a number of
+       other protocols in the works.
+============================================================*/
+		case SDLA_PROTOCOL:
+			if (flp->configured)
+				return -EALREADY;
+
+			switch (ifr->ifr_flags)
+			{
+				case ARPHRD_FRAD:
+					dev->type = ifr->ifr_flags;
+					break;
+				default:
+					return -ENOPROTOOPT;
+			}
+			break;
+
+		case SDLA_CLEARMEM:
+			sdla_clear(dev);
+			break;
+
+		case SDLA_WRITEMEM:
+		case SDLA_READMEM:
+			if(!capable(CAP_SYS_RAWIO))
+				return -EPERM;
+			return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
+
+		case SDLA_START:
+			sdla_start(dev);
+			break;
+
+		case SDLA_STOP:
+			sdla_stop(dev);
+			break;
+
+		default:
+			return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int sdla_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct frad_local *flp;
+
+	flp = netdev_priv(dev);
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	/* for now, you can't change the MTU! */
+	return -EOPNOTSUPP;
+}
+
+static int sdla_set_config(struct net_device *dev, struct ifmap *map)
+{
+	struct frad_local *flp;
+	int               i;
+	char              byte;
+	unsigned base;
+	int err = -EINVAL;
+
+	flp = netdev_priv(dev);
+
+	if (flp->initialized)
+		return -EINVAL;
+
+	for(i=0; i < ARRAY_SIZE(valid_port); i++)
+		if (valid_port[i] == map->base_addr)
+			break;   
+
+	if (i == ARRAY_SIZE(valid_port))
+		return -EINVAL;
+
+	if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
+		pr_warn("io-port 0x%04lx in use\n", dev->base_addr);
+		return -EINVAL;
+	}
+	base = map->base_addr;
+
+	/* test for card types, S502A, S502E, S507, S508                 */
+	/* these tests shut down the card completely, so clear the state */
+	flp->type = SDLA_UNKNOWN;
+	flp->state = 0;
+   
+	for(i=1;i<SDLA_IO_EXTENTS;i++)
+		if (inb(base + i) != 0xFF)
+			break;
+
+	if (i == SDLA_IO_EXTENTS) {   
+		outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL);
+		if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) {
+			outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL);
+			if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) {
+				outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+				flp->type = SDLA_S502E;
+				goto got_type;
+			}
+		}
+	}
+
+	for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++)
+		if (inb(base + i) != byte)
+			break;
+
+	if (i == SDLA_IO_EXTENTS) {
+		outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+		if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) {
+			outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL);
+			if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) {
+				outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+				flp->type = SDLA_S507;
+				goto got_type;
+			}
+		}
+	}
+
+	outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+	if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) {
+		outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL);
+		if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) {
+			outb(SDLA_HALT, base + SDLA_REG_CONTROL);
+			flp->type = SDLA_S508;
+			goto got_type;
+		}
+	}
+
+	outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL);
+	if (inb(base + SDLA_S502_STS) == 0x40) {
+		outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
+		if (inb(base + SDLA_S502_STS) == 0x40) {
+			outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL);
+			if (inb(base + SDLA_S502_STS) == 0x44) {
+				outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
+				flp->type = SDLA_S502A;
+				goto got_type;
+			}
+		}
+	}
+
+	netdev_notice(dev, "Unknown card type\n");
+	err = -ENODEV;
+	goto fail;
+
+got_type:
+	switch(base) {
+		case 0x270:
+		case 0x280:
+		case 0x380: 
+		case 0x390:
+			if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
+				goto fail;
+	}
+
+	switch (map->irq) {
+		case 2:
+			if (flp->type != SDLA_S502E)
+				goto fail;
+			break;
+
+		case 10:
+		case 11:
+		case 12:
+		case 15:
+		case 4:
+			if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
+				goto fail;
+			break;
+		case 3:
+		case 5:
+		case 7:
+			if (flp->type == SDLA_S502A)
+				goto fail;
+			break;
+
+		default:
+			goto fail;
+	}
+
+	err = -EAGAIN;
+	if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev)) 
+		goto fail;
+
+	if (flp->type == SDLA_S507) {
+		switch(dev->irq) {
+			case 3:
+				flp->state = SDLA_S507_IRQ3;
+				break;
+			case 4:
+				flp->state = SDLA_S507_IRQ4;
+				break;
+			case 5:
+				flp->state = SDLA_S507_IRQ5;
+				break;
+			case 7:
+				flp->state = SDLA_S507_IRQ7;
+				break;
+			case 10:
+				flp->state = SDLA_S507_IRQ10;
+				break;
+			case 11:
+				flp->state = SDLA_S507_IRQ11;
+				break;
+			case 12:
+				flp->state = SDLA_S507_IRQ12;
+				break;
+			case 15:
+				flp->state = SDLA_S507_IRQ15;
+				break;
+		}
+	}
+
+	for(i=0; i < ARRAY_SIZE(valid_mem); i++)
+		if (valid_mem[i] == map->mem_start)
+			break;   
+
+	err = -EINVAL;
+	if (i == ARRAY_SIZE(valid_mem))
+		goto fail2;
+
+	if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
+		goto fail2;
+
+	if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B)
+		goto fail2;
+
+	if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D)
+		goto fail2;
+
+	byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0;
+	byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0));
+	switch(flp->type) {
+		case SDLA_S502A:
+		case SDLA_S502E:
+			switch (map->mem_start >> 16) {
+				case 0x0A:
+					byte |= SDLA_S502_SEG_A;
+					break;
+				case 0x0C:
+					byte |= SDLA_S502_SEG_C;
+					break;
+				case 0x0D:
+					byte |= SDLA_S502_SEG_D;
+					break;
+				case 0x0E:
+					byte |= SDLA_S502_SEG_E;
+					break;
+			}
+			break;
+		case SDLA_S507:
+			switch (map->mem_start >> 16) {
+				case 0x0A:
+					byte |= SDLA_S507_SEG_A;
+					break;
+				case 0x0B:
+					byte |= SDLA_S507_SEG_B;
+					break;
+				case 0x0C:
+					byte |= SDLA_S507_SEG_C;
+					break;
+				case 0x0E:
+					byte |= SDLA_S507_SEG_E;
+					break;
+			}
+			break;
+		case SDLA_S508:
+			switch (map->mem_start >> 16) {
+				case 0x0A:
+					byte |= SDLA_S508_SEG_A;
+					break;
+				case 0x0C:
+					byte |= SDLA_S508_SEG_C;
+					break;
+				case 0x0D:
+					byte |= SDLA_S508_SEG_D;
+					break;
+				case 0x0E:
+					byte |= SDLA_S508_SEG_E;
+					break;
+			}
+			break;
+	}
+
+	/* set the memory bits, and enable access */
+	outb(byte, base + SDLA_REG_PC_WINDOW);
+
+	switch(flp->type)
+	{
+		case SDLA_S502E:
+			flp->state = SDLA_S502E_ENABLE;
+			break;
+		case SDLA_S507:
+			flp->state |= SDLA_MEMEN;
+			break;
+		case SDLA_S508:
+			flp->state = SDLA_MEMEN;
+			break;
+	}
+	outb(flp->state, base + SDLA_REG_CONTROL);
+
+	dev->irq = map->irq;
+	dev->base_addr = base;
+	dev->mem_start = map->mem_start;
+	dev->mem_end = dev->mem_start + 0x2000;
+	flp->initialized = 1;
+	return 0;
+
+fail2:
+	free_irq(map->irq, dev);
+fail:
+	release_region(base, SDLA_IO_EXTENTS);
+	return err;
+}
+ 
+static const struct net_device_ops sdla_netdev_ops = {
+	.ndo_open	= sdla_open,
+	.ndo_stop	= sdla_close,
+	.ndo_do_ioctl	= sdla_ioctl,
+	.ndo_set_config	= sdla_set_config,
+	.ndo_start_xmit	= sdla_transmit,
+	.ndo_change_mtu	= sdla_change_mtu,
+};
+
+static void setup_sdla(struct net_device *dev)
+{
+	struct frad_local *flp = netdev_priv(dev);
+
+	netdev_boot_setup_check(dev);
+
+	dev->netdev_ops		= &sdla_netdev_ops;
+	dev->flags		= 0;
+	dev->type		= 0xFFFF;
+	dev->hard_header_len	= 0;
+	dev->addr_len		= 0;
+	dev->mtu		= SDLA_MAX_MTU;
+
+	flp->activate		= sdla_activate;
+	flp->deactivate		= sdla_deactivate;
+	flp->assoc		= sdla_assoc;
+	flp->deassoc		= sdla_deassoc;
+	flp->dlci_conf		= sdla_dlci_conf;
+
+	init_timer(&flp->timer);
+	flp->timer.expires	= 1;
+	flp->timer.data		= (unsigned long) dev;
+	flp->timer.function	= sdla_poll;
+}
+
+static struct net_device *sdla;
+
+static int __init init_sdla(void)
+{
+	int err;
+
+	printk("%s.\n", version);
+
+	sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla);
+	if (!sdla) 
+		return -ENOMEM;
+
+	err = register_netdev(sdla);
+	if (err) 
+		free_netdev(sdla);
+
+	return err;
+}
+
+static void __exit exit_sdla(void)
+{
+	struct frad_local *flp = netdev_priv(sdla);
+
+	unregister_netdev(sdla);
+	if (flp->initialized) {
+		free_irq(sdla->irq, sdla);
+		release_region(sdla->base_addr, SDLA_IO_EXTENTS);
+	}
+	del_timer_sync(&flp->timer);
+	free_netdev(sdla);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_sdla);
+module_exit(exit_sdla);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/sealevel.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/sealevel.c
new file mode 100644
index 0000000..4f77484
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/sealevel.c
@@ -0,0 +1,398 @@
+/*
+ *	Sealevel Systems 4021 driver.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	(c) Copyright 1999, 2001 Alan Cox
+ *	(c) Copyright 2001 Red Hat Inc.
+ *	Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include "z85230.h"
+
+
+struct slvl_device
+{
+	struct z8530_channel *chan;
+	int channel;
+};
+
+
+struct slvl_board
+{
+	struct slvl_device dev[2];
+	struct z8530_dev board;
+	int iobase;
+};
+
+/*
+ *	Network driver support routines
+ */
+
+static inline struct slvl_device* dev_to_chan(struct net_device *dev)
+{
+	return (struct slvl_device *)dev_to_hdlc(dev)->priv;
+}
+
+/*
+ *	Frame receive. Simple for our card as we do HDLC and there
+ *	is no funny garbage involved
+ */
+
+static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
+{
+	/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
+	skb_trim(skb, skb->len - 2);
+	skb->protocol = hdlc_type_trans(skb, c->netdevice);
+	skb_reset_mac_header(skb);
+	skb->dev = c->netdevice;
+	netif_rx(skb);
+}
+
+/*
+ *	We've been placed in the UP state
+ */
+
+static int sealevel_open(struct net_device *d)
+{
+	struct slvl_device *slvl = dev_to_chan(d);
+	int err = -1;
+	int unit = slvl->channel;
+
+	/*
+	 *	Link layer up.
+	 */
+
+	switch (unit) {
+		case 0:
+			err = z8530_sync_dma_open(d, slvl->chan);
+			break;
+		case 1:
+			err = z8530_sync_open(d, slvl->chan);
+			break;
+	}
+
+	if (err)
+		return err;
+
+	err = hdlc_open(d);
+	if (err) {
+		switch (unit) {
+			case 0:
+				z8530_sync_dma_close(d, slvl->chan);
+				break;
+			case 1:
+				z8530_sync_close(d, slvl->chan);
+				break;
+		}
+		return err;
+	}
+
+	slvl->chan->rx_function = sealevel_input;
+
+	/*
+	 *	Go go go
+	 */
+	netif_start_queue(d);
+	return 0;
+}
+
+static int sealevel_close(struct net_device *d)
+{
+	struct slvl_device *slvl = dev_to_chan(d);
+	int unit = slvl->channel;
+
+	/*
+	 *	Discard new frames
+	 */
+
+	slvl->chan->rx_function = z8530_null_rx;
+
+	hdlc_close(d);
+	netif_stop_queue(d);
+
+	switch (unit) {
+		case 0:
+			z8530_sync_dma_close(d, slvl->chan);
+			break;
+		case 1:
+			z8530_sync_close(d, slvl->chan);
+			break;
+	}
+	return 0;
+}
+
+static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
+{
+	/* struct slvl_device *slvl=dev_to_chan(d);
+	   z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
+	return hdlc_ioctl(d, ifr, cmd);
+}
+
+/*
+ *	Passed network frames, fire them downwind.
+ */
+
+static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
+					     struct net_device *d)
+{
+	return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
+}
+
+static int sealevel_attach(struct net_device *dev, unsigned short encoding,
+			   unsigned short parity)
+{
+	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
+		return 0;
+	return -EINVAL;
+}
+
+static const struct net_device_ops sealevel_ops = {
+	.ndo_open       = sealevel_open,
+	.ndo_stop       = sealevel_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = sealevel_ioctl,
+};
+
+static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
+{
+	struct net_device *dev = alloc_hdlcdev(sv);
+	if (!dev)
+		return -1;
+
+	dev_to_hdlc(dev)->attach = sealevel_attach;
+	dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
+	dev->netdev_ops = &sealevel_ops;
+	dev->base_addr = iobase;
+	dev->irq = irq;
+
+	if (register_hdlc_device(dev)) {
+		pr_err("unable to register HDLC device\n");
+		free_netdev(dev);
+		return -1;
+	}
+
+	sv->chan->netdevice = dev;
+	return 0;
+}
+
+
+/*
+ *	Allocate and setup Sealevel board.
+ */
+
+static __init struct slvl_board *slvl_init(int iobase, int irq,
+					   int txdma, int rxdma, int slow)
+{
+	struct z8530_dev *dev;
+	struct slvl_board *b;
+
+	/*
+	 *	Get the needed I/O space
+	 */
+
+	if (!request_region(iobase, 8, "Sealevel 4021")) {
+		pr_warn("I/O 0x%X already in use\n", iobase);
+		return NULL;
+	}
+
+	b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
+	if (!b)
+		goto err_kzalloc;
+
+	b->dev[0].chan = &b->board.chanA;
+	b->dev[0].channel = 0;
+
+	b->dev[1].chan = &b->board.chanB;
+	b->dev[1].channel = 1;
+
+	dev = &b->board;
+
+	/*
+	 *	Stuff in the I/O addressing
+	 */
+
+	dev->active = 0;
+
+	b->iobase = iobase;
+
+	/*
+	 *	Select 8530 delays for the old board
+	 */
+
+	if (slow)
+		iobase |= Z8530_PORT_SLEEP;
+
+	dev->chanA.ctrlio = iobase + 1;
+	dev->chanA.dataio = iobase;
+	dev->chanB.ctrlio = iobase + 3;
+	dev->chanB.dataio = iobase + 2;
+
+	dev->chanA.irqs = &z8530_nop;
+	dev->chanB.irqs = &z8530_nop;
+
+	/*
+	 *	Assert DTR enable DMA
+	 */
+
+	outb(3 | (1 << 7), b->iobase + 4);
+
+
+	/* We want a fast IRQ for this device. Actually we'd like an even faster
+	   IRQ ;) - This is one driver RtLinux is made for */
+
+	if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+			"SeaLevel", dev) < 0) {
+		pr_warn("IRQ %d already in use\n", irq);
+		goto err_request_irq;
+	}
+
+	dev->irq = irq;
+	dev->chanA.private = &b->dev[0];
+	dev->chanB.private = &b->dev[1];
+	dev->chanA.dev = dev;
+	dev->chanB.dev = dev;
+
+	dev->chanA.txdma = 3;
+	dev->chanA.rxdma = 1;
+	if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
+		goto err_dma_tx;
+
+	if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
+		goto err_dma_rx;
+
+	disable_irq(irq);
+
+	/*
+	 *	Begin normal initialise
+	 */
+
+	if (z8530_init(dev) != 0) {
+		pr_err("Z8530 series device not found\n");
+		enable_irq(irq);
+		goto free_hw;
+	}
+	if (dev->type == Z85C30) {
+		z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
+		z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
+	} else {
+		z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
+		z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
+	}
+
+	/*
+	 *	Now we can take the IRQ
+	 */
+
+	enable_irq(irq);
+
+	if (slvl_setup(&b->dev[0], iobase, irq))
+		goto free_hw;
+	if (slvl_setup(&b->dev[1], iobase, irq))
+		goto free_netdev0;
+
+	z8530_describe(dev, "I/O", iobase);
+	dev->active = 1;
+	return b;
+
+free_netdev0:
+	unregister_hdlc_device(b->dev[0].chan->netdevice);
+	free_netdev(b->dev[0].chan->netdevice);
+free_hw:
+	free_dma(dev->chanA.rxdma);
+err_dma_rx:
+	free_dma(dev->chanA.txdma);
+err_dma_tx:
+	free_irq(irq, dev);
+err_request_irq:
+	kfree(b);
+err_kzalloc:
+	release_region(iobase, 8);
+	return NULL;
+}
+
+static void __exit slvl_shutdown(struct slvl_board *b)
+{
+	int u;
+
+	z8530_shutdown(&b->board);
+
+	for (u = 0; u < 2; u++) {
+		struct net_device *d = b->dev[u].chan->netdevice;
+		unregister_hdlc_device(d);
+		free_netdev(d);
+	}
+
+	free_irq(b->board.irq, &b->board);
+	free_dma(b->board.chanA.rxdma);
+	free_dma(b->board.chanA.txdma);
+	/* DMA off on the card, drop DTR */
+	outb(0, b->iobase);
+	release_region(b->iobase, 8);
+	kfree(b);
+}
+
+
+static int io=0x238;
+static int txdma=1;
+static int rxdma=3;
+static int irq=5;
+static bool slow=false;
+
+module_param(io, int, 0);
+MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
+module_param(txdma, int, 0);
+MODULE_PARM_DESC(txdma, "Transmit DMA channel");
+module_param(rxdma, int, 0);
+MODULE_PARM_DESC(rxdma, "Receive DMA channel");
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
+module_param(slow, bool, 0);
+MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
+
+static struct slvl_board *slvl_unit;
+
+static int __init slvl_init_module(void)
+{
+	slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
+
+	return slvl_unit ? 0 : -ENODEV;
+}
+
+static void __exit slvl_cleanup_module(void)
+{
+	if (slvl_unit)
+		slvl_shutdown(slvl_unit);
+}
+
+module_init(slvl_init_module);
+module_exit(slvl_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.c
new file mode 100644
index 0000000..ccd496b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.c
@@ -0,0 +1,850 @@
+/*
+ * wanXL serial card driver for Linux
+ * host part
+ *
+ * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Status:
+ *   - Only DTE (external clock) support with NRZ and NRZI encodings
+ *   - wanXL100 will require minor driver modifications, no access to hw
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "wanxl.h"
+
+static const char* version = "wanXL serial card driver version: 0.48";
+
+#define PLX_CTL_RESET   0x40000000 /* adapter reset */
+
+#undef DEBUG_PKT
+#undef DEBUG_PCI
+
+/* MAILBOX #1 - PUTS COMMANDS */
+#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
+#ifdef __LITTLE_ENDIAN
+#define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
+#else
+#define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
+#endif
+
+/* MAILBOX #2 - DRAM SIZE */
+#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
+
+
+typedef struct {
+	struct net_device *dev;
+	struct card_t *card;
+	spinlock_t lock;	/* for wanxl_xmit */
+        int node;		/* physical port #0 - 3 */
+	unsigned int clock_type;
+	int tx_in, tx_out;
+	struct sk_buff *tx_skbs[TX_BUFFERS];
+}port_t;
+
+
+typedef struct {
+	desc_t rx_descs[RX_QUEUE_LENGTH];
+	port_status_t port_status[4];
+}card_status_t;
+
+
+typedef struct card_t {
+	int n_ports;		/* 1, 2 or 4 ports */
+	u8 irq;
+
+	u8 __iomem *plx;	/* PLX PCI9060 virtual base address */
+	struct pci_dev *pdev;	/* for pci_name(pdev) */
+	int rx_in;
+	struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
+	card_status_t *status;	/* shared between host and card */
+	dma_addr_t status_address;
+	port_t ports[0];	/* 1 - 4 port_t structures follow */
+}card_t;
+
+
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+        return (port_t *)dev_to_hdlc(dev)->priv;
+}
+
+
+static inline port_status_t* get_status(port_t *port)
+{
+	return &port->card->status->port_status[port->node];
+}
+
+
+#ifdef DEBUG_PCI
+static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
+					      size_t size, int direction)
+{
+	dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
+	if (addr + size > 0x100000000LL)
+		pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
+			pci_name(pdev), (unsigned long long)addr);
+	return addr;
+}
+
+#undef pci_map_single
+#define pci_map_single pci_map_single_debug
+#endif
+
+
+/* Cable and/or personality module change interrupt service */
+static inline void wanxl_cable_intr(port_t *port)
+{
+	u32 value = get_status(port)->cable;
+	int valid = 1;
+	const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
+
+	switch(value & 0x7) {
+	case STATUS_CABLE_V35: cable = "V.35"; break;
+	case STATUS_CABLE_X21: cable = "X.21"; break;
+	case STATUS_CABLE_V24: cable = "V.24"; break;
+	case STATUS_CABLE_EIA530: cable = "EIA530"; break;
+	case STATUS_CABLE_NONE: cable = "no"; break;
+	default: cable = "invalid";
+	}
+
+	switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
+	case STATUS_CABLE_V35: pm = "V.35"; break;
+	case STATUS_CABLE_X21: pm = "X.21"; break;
+	case STATUS_CABLE_V24: pm = "V.24"; break;
+	case STATUS_CABLE_EIA530: pm = "EIA530"; break;
+	case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
+	default: pm = "invalid personality"; valid = 0;
+	}
+
+	if (valid) {
+		if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
+			dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
+				", DSR off";
+			dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
+				", carrier off";
+		}
+		dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
+	}
+	netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
+		    pm, dte, cable, dsr, dcd);
+
+	if (value & STATUS_CABLE_DCD)
+		netif_carrier_on(port->dev);
+	else
+		netif_carrier_off(port->dev);
+}
+
+
+
+/* Transmit complete interrupt service */
+static inline void wanxl_tx_intr(port_t *port)
+{
+	struct net_device *dev = port->dev;
+	while (1) {
+                desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
+		struct sk_buff *skb = port->tx_skbs[port->tx_in];
+
+		switch (desc->stat) {
+		case PACKET_FULL:
+		case PACKET_EMPTY:
+			netif_wake_queue(dev);
+			return;
+
+		case PACKET_UNDERRUN:
+			dev->stats.tx_errors++;
+			dev->stats.tx_fifo_errors++;
+			break;
+
+		default:
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += skb->len;
+		}
+                desc->stat = PACKET_EMPTY; /* Free descriptor */
+		pci_unmap_single(port->card->pdev, desc->address, skb->len,
+				 PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(skb);
+                port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
+        }
+}
+
+
+
+/* Receive complete interrupt service */
+static inline void wanxl_rx_intr(card_t *card)
+{
+	desc_t *desc;
+	while (desc = &card->status->rx_descs[card->rx_in],
+	       desc->stat != PACKET_EMPTY) {
+		if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
+			pr_crit("%s: received packet for nonexistent port\n",
+				pci_name(card->pdev));
+		else {
+			struct sk_buff *skb = card->rx_skbs[card->rx_in];
+			port_t *port = &card->ports[desc->stat &
+						    PACKET_PORT_MASK];
+			struct net_device *dev = port->dev;
+
+			if (!skb)
+				dev->stats.rx_dropped++;
+			else {
+				pci_unmap_single(card->pdev, desc->address,
+						 BUFFER_LENGTH,
+						 PCI_DMA_FROMDEVICE);
+				skb_put(skb, desc->length);
+
+#ifdef DEBUG_PKT
+				printk(KERN_DEBUG "%s RX(%i):", dev->name,
+				       skb->len);
+				debug_frame(skb);
+#endif
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += skb->len;
+				skb->protocol = hdlc_type_trans(skb, dev);
+				netif_rx(skb);
+				skb = NULL;
+			}
+
+			if (!skb) {
+				skb = dev_alloc_skb(BUFFER_LENGTH);
+				desc->address = skb ?
+					pci_map_single(card->pdev, skb->data,
+						       BUFFER_LENGTH,
+						       PCI_DMA_FROMDEVICE) : 0;
+				card->rx_skbs[card->rx_in] = skb;
+			}
+		}
+		desc->stat = PACKET_EMPTY; /* Free descriptor */
+		card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
+	}
+}
+
+
+
+static irqreturn_t wanxl_intr(int irq, void* dev_id)
+{
+        card_t *card = dev_id;
+        int i;
+        u32 stat;
+        int handled = 0;
+
+
+        while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
+                handled = 1;
+		writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
+
+                for (i = 0; i < card->n_ports; i++) {
+			if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
+				wanxl_tx_intr(&card->ports[i]);
+			if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
+				wanxl_cable_intr(&card->ports[i]);
+		}
+		if (stat & (1 << DOORBELL_FROM_CARD_RX))
+			wanxl_rx_intr(card);
+        }
+
+        return IRQ_RETVAL(handled);
+}
+
+
+
+static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+        port_t *port = dev_to_port(dev);
+	desc_t *desc;
+
+        spin_lock(&port->lock);
+
+	desc = &get_status(port)->tx_descs[port->tx_out];
+        if (desc->stat != PACKET_EMPTY) {
+                /* should never happen - previous xmit should stop queue */
+#ifdef DEBUG_PKT
+                printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+		netif_stop_queue(dev);
+		spin_unlock(&port->lock);
+		return NETDEV_TX_BUSY;       /* request packet to be queued */
+	}
+
+#ifdef DEBUG_PKT
+	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
+	debug_frame(skb);
+#endif
+
+	port->tx_skbs[port->tx_out] = skb;
+	desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+	desc->length = skb->len;
+	desc->stat = PACKET_FULL;
+	writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
+	       port->card->plx + PLX_DOORBELL_TO_CARD);
+
+	port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
+
+	if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
+		netif_stop_queue(dev);
+#ifdef DEBUG_PKT
+		printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+	}
+
+	spin_unlock(&port->lock);
+	return NETDEV_TX_OK;
+}
+
+
+
+static int wanxl_attach(struct net_device *dev, unsigned short encoding,
+			unsigned short parity)
+{
+	port_t *port = dev_to_port(dev);
+
+	if (encoding != ENCODING_NRZ &&
+	    encoding != ENCODING_NRZI)
+		return -EINVAL;
+
+	if (parity != PARITY_NONE &&
+	    parity != PARITY_CRC32_PR1_CCITT &&
+	    parity != PARITY_CRC16_PR1_CCITT &&
+	    parity != PARITY_CRC32_PR0_CCITT &&
+	    parity != PARITY_CRC16_PR0_CCITT)
+		return -EINVAL;
+
+	get_status(port)->encoding = encoding;
+	get_status(port)->parity = parity;
+	return 0;
+}
+
+
+
+static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	const size_t size = sizeof(sync_serial_settings);
+	sync_serial_settings line;
+	port_t *port = dev_to_port(dev);
+
+	if (cmd != SIOCWANDEV)
+		return hdlc_ioctl(dev, ifr, cmd);
+
+	switch (ifr->ifr_settings.type) {
+	case IF_GET_IFACE:
+		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+		if (ifr->ifr_settings.size < size) {
+			ifr->ifr_settings.size = size; /* data size wanted */
+			return -ENOBUFS;
+		}
+		memset(&line, 0, sizeof(line));
+		line.clock_type = get_status(port)->clocking;
+		line.clock_rate = 0;
+		line.loopback = 0;
+
+		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+			return -EFAULT;
+		return 0;
+
+	case IF_IFACE_SYNC_SERIAL:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (dev->flags & IFF_UP)
+			return -EBUSY;
+
+		if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
+				   size))
+			return -EFAULT;
+
+		if (line.clock_type != CLOCK_EXT &&
+		    line.clock_type != CLOCK_TXFROMRX)
+			return -EINVAL; /* No such clock setting */
+
+		if (line.loopback != 0)
+			return -EINVAL;
+
+		get_status(port)->clocking = line.clock_type;
+		return 0;
+
+	default:
+		return hdlc_ioctl(dev, ifr, cmd);
+        }
+}
+
+
+
+static int wanxl_open(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
+	unsigned long timeout;
+	int i;
+
+	if (get_status(port)->open) {
+		netdev_err(dev, "port already open\n");
+		return -EIO;
+	}
+	if ((i = hdlc_open(dev)) != 0)
+		return i;
+
+	port->tx_in = port->tx_out = 0;
+	for (i = 0; i < TX_BUFFERS; i++)
+		get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
+	/* signal the card */
+	writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
+
+	timeout = jiffies + HZ;
+	do {
+		if (get_status(port)->open) {
+			netif_start_queue(dev);
+			return 0;
+		}
+	} while (time_after(timeout, jiffies));
+
+	netdev_err(dev, "unable to open port\n");
+	/* ask the card to close the port, should it be still alive */
+	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
+	return -EFAULT;
+}
+
+
+
+static int wanxl_close(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+	unsigned long timeout;
+	int i;
+
+	hdlc_close(dev);
+	/* signal the card */
+	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
+	       port->card->plx + PLX_DOORBELL_TO_CARD);
+
+	timeout = jiffies + HZ;
+	do {
+		if (!get_status(port)->open)
+			break;
+	} while (time_after(timeout, jiffies));
+
+	if (get_status(port)->open)
+		netdev_err(dev, "unable to close port\n");
+
+	netif_stop_queue(dev);
+
+	for (i = 0; i < TX_BUFFERS; i++) {
+		desc_t *desc = &get_status(port)->tx_descs[i];
+
+		if (desc->stat != PACKET_EMPTY) {
+			desc->stat = PACKET_EMPTY;
+			pci_unmap_single(port->card->pdev, desc->address,
+					 port->tx_skbs[i]->len,
+					 PCI_DMA_TODEVICE);
+			dev_kfree_skb(port->tx_skbs[i]);
+		}
+	}
+	return 0;
+}
+
+
+
+static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
+{
+	port_t *port = dev_to_port(dev);
+
+	dev->stats.rx_over_errors = get_status(port)->rx_overruns;
+	dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
+	dev->stats.rx_errors = dev->stats.rx_over_errors +
+		dev->stats.rx_frame_errors;
+	return &dev->stats;
+}
+
+
+
+static int wanxl_puts_command(card_t *card, u32 cmd)
+{
+	unsigned long timeout = jiffies + 5 * HZ;
+
+	writel(cmd, card->plx + PLX_MAILBOX_1);
+	do {
+		if (readl(card->plx + PLX_MAILBOX_1) == 0)
+			return 0;
+
+		schedule();
+	}while (time_after(timeout, jiffies));
+
+	return -1;
+}
+
+
+
+static void wanxl_reset(card_t *card)
+{
+	u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
+
+	writel(0x80, card->plx + PLX_MAILBOX_0);
+	writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
+	readl(card->plx + PLX_CONTROL); /* wait for posted write */
+	udelay(1);
+	writel(old_value, card->plx + PLX_CONTROL);
+	readl(card->plx + PLX_CONTROL); /* wait for posted write */
+}
+
+
+
+static void wanxl_pci_remove_one(struct pci_dev *pdev)
+{
+	card_t *card = pci_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < card->n_ports; i++) {
+		unregister_hdlc_device(card->ports[i].dev);
+		free_netdev(card->ports[i].dev);
+	}
+
+	/* unregister and free all host resources */
+	if (card->irq)
+		free_irq(card->irq, card);
+
+	wanxl_reset(card);
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++)
+		if (card->rx_skbs[i]) {
+			pci_unmap_single(card->pdev,
+					 card->status->rx_descs[i].address,
+					 BUFFER_LENGTH, PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(card->rx_skbs[i]);
+		}
+
+	if (card->plx)
+		iounmap(card->plx);
+
+	if (card->status)
+		pci_free_consistent(pdev, sizeof(card_status_t),
+				    card->status, card->status_address);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	kfree(card);
+}
+
+
+#include "wanxlfw.inc"
+
+static const struct net_device_ops wanxl_ops = {
+	.ndo_open       = wanxl_open,
+	.ndo_stop       = wanxl_close,
+	.ndo_change_mtu = hdlc_change_mtu,
+	.ndo_start_xmit = hdlc_start_xmit,
+	.ndo_do_ioctl   = wanxl_ioctl,
+	.ndo_get_stats  = wanxl_get_stats,
+};
+
+static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
+					const struct pci_device_id *ent)
+{
+	card_t *card;
+	u32 ramsize, stat;
+	unsigned long timeout;
+	u32 plx_phy;		/* PLX PCI base address */
+	u32 mem_phy;		/* memory PCI base addr */
+	u8 __iomem *mem;	/* memory virtual base addr */
+	int i, ports, alloc_size;
+
+#ifndef MODULE
+	pr_info_once("%s\n", version);
+#endif
+
+	i = pci_enable_device(pdev);
+	if (i)
+		return i;
+
+	/* QUICC can only access first 256 MB of host RAM directly,
+	   but PLX9060 DMA does 32-bits for actual packet data transfers */
+
+	/* FIXME when PCI/DMA subsystems are fixed.
+	   We set both dma_mask and consistent_dma_mask to 28 bits
+	   and pray pci_alloc_consistent() will use this info. It should
+	   work on most platforms */
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
+	    pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
+		pr_err("No usable DMA configuration\n");
+		return -EIO;
+	}
+
+	i = pci_request_regions(pdev, "wanXL");
+	if (i) {
+		pci_disable_device(pdev);
+		return i;
+	}
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
+	case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
+	default: ports = 4;
+	}
+
+	alloc_size = sizeof(card_t) + ports * sizeof(port_t);
+	card = kzalloc(alloc_size, GFP_KERNEL);
+	if (card == NULL) {
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		return -ENOBUFS;
+	}
+
+	pci_set_drvdata(pdev, card);
+	card->pdev = pdev;
+
+	card->status = pci_alloc_consistent(pdev, sizeof(card_status_t),
+					    &card->status_address);
+	if (card->status == NULL) {
+		wanxl_pci_remove_one(pdev);
+		return -ENOBUFS;
+	}
+
+#ifdef DEBUG_PCI
+	printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
+	       " at 0x%LX\n", pci_name(pdev),
+	       (unsigned long long)card->status_address);
+#endif
+
+	/* FIXME when PCI/DMA subsystems are fixed.
+	   We set both dma_mask and consistent_dma_mask back to 32 bits
+	   to indicate the card can do 32-bit DMA addressing */
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+	    pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		pr_err("No usable DMA configuration\n");
+		wanxl_pci_remove_one(pdev);
+		return -EIO;
+	}
+
+	/* set up PLX mapping */
+	plx_phy = pci_resource_start(pdev, 0);
+
+	card->plx = ioremap_nocache(plx_phy, 0x70);
+	if (!card->plx) {
+		pr_err("ioremap() failed\n");
+ 		wanxl_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+#if RESET_WHILE_LOADING
+	wanxl_reset(card);
+#endif
+
+	timeout = jiffies + 20 * HZ;
+	while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
+		if (time_before(timeout, jiffies)) {
+			pr_warn("%s: timeout waiting for PUTS to complete\n",
+				pci_name(pdev));
+			wanxl_pci_remove_one(pdev);
+			return -ENODEV;
+		}
+
+		switch(stat & 0xC0) {
+		case 0x00:	/* hmm - PUTS completed with non-zero code? */
+		case 0x80:	/* PUTS still testing the hardware */
+			break;
+
+		default:
+			pr_warn("%s: PUTS test 0x%X failed\n",
+				pci_name(pdev), stat & 0x30);
+			wanxl_pci_remove_one(pdev);
+			return -ENODEV;
+		}
+
+		schedule();
+	}
+
+	/* get on-board memory size (PUTS detects no more than 4 MB) */
+	ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
+
+	/* set up on-board RAM mapping */
+	mem_phy = pci_resource_start(pdev, 2);
+
+
+	/* sanity check the board's reported memory size */
+	if (ramsize < BUFFERS_ADDR +
+	    (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
+		pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
+			pci_name(pdev), ramsize,
+			BUFFERS_ADDR +
+			(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
+		wanxl_pci_remove_one(pdev);
+		return -ENODEV;
+	}
+
+	if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
+		pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
+		wanxl_pci_remove_one(pdev);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
+		card->rx_skbs[i] = skb;
+		if (skb)
+			card->status->rx_descs[i].address =
+				pci_map_single(card->pdev, skb->data,
+					       BUFFER_LENGTH,
+					       PCI_DMA_FROMDEVICE);
+	}
+
+	mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
+	if (!mem) {
+		pr_err("ioremap() failed\n");
+ 		wanxl_pci_remove_one(pdev);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < sizeof(firmware); i += 4)
+		writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
+
+	for (i = 0; i < ports; i++)
+		writel(card->status_address +
+		       (void *)&card->status->port_status[i] -
+		       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
+	writel(card->status_address, mem + PDM_OFFSET + 20);
+	writel(PDM_OFFSET, mem);
+	iounmap(mem);
+
+	writel(0, card->plx + PLX_MAILBOX_5);
+
+	if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
+		pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
+		wanxl_pci_remove_one(pdev);
+		return -ENODEV;
+	}
+
+	stat = 0;
+	timeout = jiffies + 5 * HZ;
+	do {
+		if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
+			break;
+		schedule();
+	}while (time_after(timeout, jiffies));
+
+	if (!stat) {
+		pr_warn("%s: timeout while initializing card firmware\n",
+			pci_name(pdev));
+		wanxl_pci_remove_one(pdev);
+		return -ENODEV;
+	}
+
+#if DETECT_RAM
+	ramsize = stat;
+#endif
+
+	pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
+		pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
+
+	/* Allocate IRQ */
+	if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
+		pr_warn("%s: could not allocate IRQ%i\n",
+			pci_name(pdev), pdev->irq);
+		wanxl_pci_remove_one(pdev);
+		return -EBUSY;
+	}
+	card->irq = pdev->irq;
+
+	for (i = 0; i < ports; i++) {
+		hdlc_device *hdlc;
+		port_t *port = &card->ports[i];
+		struct net_device *dev = alloc_hdlcdev(port);
+		if (!dev) {
+			pr_err("%s: unable to allocate memory\n",
+			       pci_name(pdev));
+			wanxl_pci_remove_one(pdev);
+			return -ENOMEM;
+		}
+
+		port->dev = dev;
+		hdlc = dev_to_hdlc(dev);
+		spin_lock_init(&port->lock);
+		dev->tx_queue_len = 50;
+		dev->netdev_ops = &wanxl_ops;
+		hdlc->attach = wanxl_attach;
+		hdlc->xmit = wanxl_xmit;
+		port->card = card;
+		port->node = i;
+		get_status(port)->clocking = CLOCK_EXT;
+		if (register_hdlc_device(dev)) {
+			pr_err("%s: unable to register hdlc device\n",
+			       pci_name(pdev));
+			free_netdev(dev);
+			wanxl_pci_remove_one(pdev);
+			return -ENOBUFS;
+		}
+		card->n_ports++;
+	}
+
+	pr_info("%s: port", pci_name(pdev));
+	for (i = 0; i < ports; i++)
+		pr_cont("%s #%i: %s",
+			i ? "," : "", i, card->ports[i].dev->name);
+	pr_cont("\n");
+
+	for (i = 0; i < ports; i++)
+		wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
+
+	return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
+	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ 0, }
+};
+
+
+static struct pci_driver wanxl_pci_driver = {
+	.name		= "wanXL",
+	.id_table	= wanxl_pci_tbl,
+	.probe		= wanxl_pci_init_one,
+	.remove		= wanxl_pci_remove_one,
+};
+
+
+static int __init wanxl_init_module(void)
+{
+#ifdef MODULE
+	pr_info("%s\n", version);
+#endif
+	return pci_register_driver(&wanxl_pci_driver);
+}
+
+static void __exit wanxl_cleanup_module(void)
+{
+	pci_unregister_driver(&wanxl_pci_driver);
+}
+
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
+
+module_init(wanxl_init_module);
+module_exit(wanxl_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.h
new file mode 100644
index 0000000..3f86558
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxl.h
@@ -0,0 +1,152 @@
+/*
+ * wanXL serial card driver for Linux
+ * definitions common to host driver and card firmware
+ *
+ * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#define RESET_WHILE_LOADING 0
+
+/* you must rebuild the firmware if any of the following is changed */
+#define DETECT_RAM 0		/* needed for > 4MB RAM, 16 MB maximum */
+#define QUICC_MEMCPY_USES_PLX 1	/* must be used if the host has > 256 MB RAM */
+
+
+#define STATUS_CABLE_V35	2
+#define STATUS_CABLE_X21	3
+#define STATUS_CABLE_V24	4
+#define STATUS_CABLE_EIA530	5
+#define STATUS_CABLE_INVALID	6
+#define STATUS_CABLE_NONE	7
+
+#define STATUS_CABLE_DCE	0x8000
+#define STATUS_CABLE_DSR	0x0010
+#define STATUS_CABLE_DCD	0x0008
+#define STATUS_CABLE_PM_SHIFT	5
+
+#define PDM_OFFSET 0x1000
+
+#define TX_BUFFERS 10		/* per port */
+#define RX_BUFFERS 30
+#define RX_QUEUE_LENGTH 40	/* card->host queue length - per card */
+
+#define PACKET_EMPTY		0x00
+#define PACKET_FULL		0x10
+#define PACKET_SENT		0x20 /* TX only */
+#define PACKET_UNDERRUN		0x30 /* TX only */
+#define PACKET_PORT_MASK	0x03 /* RX only */
+
+/* bit numbers in PLX9060 doorbell registers */
+#define DOORBELL_FROM_CARD_TX_0		0 /* packet sent by the card */
+#define DOORBELL_FROM_CARD_TX_1		1
+#define DOORBELL_FROM_CARD_TX_2		2
+#define DOORBELL_FROM_CARD_TX_3		3
+#define DOORBELL_FROM_CARD_RX		4
+#define DOORBELL_FROM_CARD_CABLE_0	5 /* cable/PM/etc. changed */
+#define DOORBELL_FROM_CARD_CABLE_1	6
+#define DOORBELL_FROM_CARD_CABLE_2	7
+#define DOORBELL_FROM_CARD_CABLE_3	8
+
+#define DOORBELL_TO_CARD_OPEN_0		0
+#define DOORBELL_TO_CARD_OPEN_1		1
+#define DOORBELL_TO_CARD_OPEN_2		2
+#define DOORBELL_TO_CARD_OPEN_3		3
+#define DOORBELL_TO_CARD_CLOSE_0	4
+#define DOORBELL_TO_CARD_CLOSE_1	5
+#define DOORBELL_TO_CARD_CLOSE_2	6
+#define DOORBELL_TO_CARD_CLOSE_3	7
+#define DOORBELL_TO_CARD_TX_0		8 /* outbound packet queued */
+#define DOORBELL_TO_CARD_TX_1		9
+#define DOORBELL_TO_CARD_TX_2		10
+#define DOORBELL_TO_CARD_TX_3		11
+
+/* firmware-only status bits, starting from last DOORBELL_TO_CARD + 1 */
+#define TASK_SCC_0			12
+#define TASK_SCC_1			13
+#define TASK_SCC_2			14
+#define TASK_SCC_3			15
+
+#define ALIGN32(x) (((x) + 3) & 0xFFFFFFFC)
+#define BUFFER_LENGTH	ALIGN32(HDLC_MAX_MRU + 4) /* 4 bytes for 32-bit CRC */
+
+/* Address of TX and RX buffers in 68360 address space */
+#define BUFFERS_ADDR	0x4000	/* 16 KB */
+
+#ifndef __ASSEMBLER__
+#define PLX_OFFSET		0
+#else
+#define PLX_OFFSET		PLX + 0x80
+#endif
+
+#define PLX_MAILBOX_0		(PLX_OFFSET + 0x40)
+#define PLX_MAILBOX_1		(PLX_OFFSET + 0x44)
+#define PLX_MAILBOX_2		(PLX_OFFSET + 0x48)
+#define PLX_MAILBOX_3		(PLX_OFFSET + 0x4C)
+#define PLX_MAILBOX_4		(PLX_OFFSET + 0x50)
+#define PLX_MAILBOX_5		(PLX_OFFSET + 0x54)
+#define PLX_MAILBOX_6		(PLX_OFFSET + 0x58)
+#define PLX_MAILBOX_7		(PLX_OFFSET + 0x5C)
+#define PLX_DOORBELL_TO_CARD	(PLX_OFFSET + 0x60)
+#define PLX_DOORBELL_FROM_CARD	(PLX_OFFSET + 0x64)
+#define PLX_INTERRUPT_CS	(PLX_OFFSET + 0x68)
+#define PLX_CONTROL		(PLX_OFFSET + 0x6C)
+
+#ifdef __ASSEMBLER__
+#define PLX_DMA_0_MODE		(PLX + 0x100)
+#define PLX_DMA_0_PCI		(PLX + 0x104)
+#define PLX_DMA_0_LOCAL		(PLX + 0x108)
+#define PLX_DMA_0_LENGTH	(PLX + 0x10C)
+#define PLX_DMA_0_DESC		(PLX + 0x110)
+#define PLX_DMA_1_MODE		(PLX + 0x114)
+#define PLX_DMA_1_PCI		(PLX + 0x118)
+#define PLX_DMA_1_LOCAL		(PLX + 0x11C)
+#define PLX_DMA_1_LENGTH	(PLX + 0x120)
+#define PLX_DMA_1_DESC		(PLX + 0x124)
+#define PLX_DMA_CMD_STS		(PLX + 0x128)
+#define PLX_DMA_ARBITR_0	(PLX + 0x12C)
+#define PLX_DMA_ARBITR_1	(PLX + 0x130)
+#endif
+
+#define DESC_LENGTH 12
+
+/* offsets from start of status_t */
+/* card to host */
+#define STATUS_OPEN		0
+#define STATUS_CABLE		(STATUS_OPEN + 4)
+#define STATUS_RX_OVERRUNS	(STATUS_CABLE + 4)
+#define STATUS_RX_FRAME_ERRORS	(STATUS_RX_OVERRUNS + 4)
+
+/* host to card */
+#define STATUS_PARITY		(STATUS_RX_FRAME_ERRORS + 4)
+#define STATUS_ENCODING		(STATUS_PARITY + 4)
+#define STATUS_CLOCKING		(STATUS_ENCODING + 4)
+#define STATUS_TX_DESCS		(STATUS_CLOCKING + 4)
+
+#ifndef __ASSEMBLER__
+
+typedef struct {
+	volatile u32 stat;
+	u32 address;		/* PCI address */
+	volatile u32 length;
+}desc_t;
+
+
+typedef struct {
+// Card to host
+	volatile u32 open;
+	volatile u32 cable;
+	volatile u32 rx_overruns;
+	volatile u32 rx_frame_errors;
+
+// Host to card
+	u32 parity;
+	u32 encoding;
+	u32 clocking;
+	desc_t tx_descs[TX_BUFFERS];
+}port_status_t;
+
+#endif /* __ASSEMBLER__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.S b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.S
new file mode 100644
index 0000000..73aae2b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.S
@@ -0,0 +1,895 @@
+.psize 0
+/*
+  wanXL serial card driver for Linux
+  card firmware part
+
+  Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License
+  as published by the Free Software Foundation.
+
+
+
+
+	DPRAM BDs:
+	0x000 - 0x050 TX#0	0x050 - 0x140 RX#0
+	0x140 - 0x190 TX#1	0x190 - 0x280 RX#1
+	0x280 - 0x2D0 TX#2	0x2D0 - 0x3C0 RX#2
+	0x3C0 - 0x410 TX#3	0x410 - 0x500 RX#3
+
+
+	000 5FF 1536 Bytes Dual-Port RAM User Data / BDs
+	600 6FF 256 Bytes Dual-Port RAM User Data / BDs
+	700 7FF 256 Bytes Dual-Port RAM User Data / BDs
+	C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1
+	D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2
+	E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3
+	F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4
+
+	local interrupts		    level
+	NMI					7
+	PIT timer, CPM (RX/TX complete)		4
+	PCI9060	DMA and PCI doorbells		3
+	Cable - not used			1
+*/
+
+#include <linux/hdlc.h>
+#include "wanxl.h"
+
+/* memory addresses and offsets */
+
+MAX_RAM_SIZE	= 16 * 1024 * 1024	// max RAM supported by hardware
+
+PCI9060_VECTOR	= 0x0000006C
+CPM_IRQ_BASE	= 0x40
+ERROR_VECTOR	= CPM_IRQ_BASE * 4
+SCC1_VECTOR	= (CPM_IRQ_BASE + 0x1E) * 4
+SCC2_VECTOR	= (CPM_IRQ_BASE + 0x1D) * 4
+SCC3_VECTOR	= (CPM_IRQ_BASE + 0x1C) * 4
+SCC4_VECTOR	= (CPM_IRQ_BASE + 0x1B) * 4
+CPM_IRQ_LEVEL	= 4
+TIMER_IRQ	= 128
+TIMER_IRQ_LEVEL = 4
+PITR_CONST	= 0x100 + 16		// 1 Hz timer
+
+MBAR		= 0x0003FF00
+
+VALUE_WINDOW	= 0x40000000
+ORDER_WINDOW	= 0xC0000000
+
+PLX		= 0xFFF90000
+
+CSRA		= 0xFFFB0000
+CSRB		= 0xFFFB0002
+CSRC		= 0xFFFB0004
+CSRD		= 0xFFFB0006
+STATUS_CABLE_LL		= 0x2000
+STATUS_CABLE_DTR	= 0x1000
+
+DPRBASE		= 0xFFFC0000
+
+SCC1_BASE	= DPRBASE + 0xC00
+MISC_BASE	= DPRBASE + 0xCB0
+SCC2_BASE	= DPRBASE + 0xD00
+SCC3_BASE	= DPRBASE + 0xE00
+SCC4_BASE	= DPRBASE + 0xF00
+
+// offset from SCCx_BASE
+// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8
+SCC_RBASE	= 0		// 16-bit RxBD base address
+SCC_TBASE	= 2		// 16-bit TxBD base address
+SCC_RFCR	= 4		// 8-bit Rx function code
+SCC_TFCR	= 5		// 8-bit Tx function code
+SCC_MRBLR	= 6		// 16-bit maximum Rx buffer length
+SCC_C_MASK	= 0x34		// 32-bit CRC constant
+SCC_C_PRES	= 0x38		// 32-bit CRC preset
+SCC_MFLR	= 0x46		// 16-bit max Rx frame length (without flags)
+
+REGBASE		= DPRBASE + 0x1000
+PICR		= REGBASE + 0x026	// 16-bit periodic irq control
+PITR		= REGBASE + 0x02A	// 16-bit periodic irq timing
+OR1		= REGBASE + 0x064	// 32-bit RAM bank #1 options
+CICR		= REGBASE + 0x540	// 32(24)-bit CP interrupt config
+CIMR		= REGBASE + 0x548	// 32-bit CP interrupt mask
+CISR		= REGBASE + 0x54C	// 32-bit CP interrupts in-service
+PADIR		= REGBASE + 0x550	// 16-bit PortA data direction bitmap
+PAPAR		= REGBASE + 0x552	// 16-bit PortA pin assignment bitmap
+PAODR		= REGBASE + 0x554	// 16-bit PortA open drain bitmap
+PADAT		= REGBASE + 0x556	// 16-bit PortA data register
+
+PCDIR		= REGBASE + 0x560	// 16-bit PortC data direction bitmap
+PCPAR		= REGBASE + 0x562	// 16-bit PortC pin assignment bitmap
+PCSO		= REGBASE + 0x564	// 16-bit PortC special options
+PCDAT		= REGBASE + 0x566	// 16-bit PortC data register
+PCINT		= REGBASE + 0x568	// 16-bit PortC interrupt control
+CR		= REGBASE + 0x5C0	// 16-bit Command register
+
+SCC1_REGS	= REGBASE + 0x600
+SCC2_REGS	= REGBASE + 0x620
+SCC3_REGS	= REGBASE + 0x640
+SCC4_REGS	= REGBASE + 0x660
+SICR		= REGBASE + 0x6EC	// 32-bit SI clock route
+
+// offset from SCCx_REGS
+SCC_GSMR_L	= 0x00	// 32 bits
+SCC_GSMR_H	= 0x04	// 32 bits
+SCC_PSMR	= 0x08	// 16 bits
+SCC_TODR	= 0x0C	// 16 bits
+SCC_DSR		= 0x0E	// 16 bits
+SCC_SCCE	= 0x10	// 16 bits
+SCC_SCCM	= 0x14	// 16 bits
+SCC_SCCS	= 0x17	// 8 bits
+
+#if QUICC_MEMCPY_USES_PLX
+	.macro memcpy_from_pci src, dest, len // len must be < 8 MB
+	addl #3, \len
+	andl #0xFFFFFFFC, \len		// always copy n * 4 bytes
+	movel \src, PLX_DMA_0_PCI
+	movel \dest, PLX_DMA_0_LOCAL
+	movel \len, PLX_DMA_0_LENGTH
+	movel #0x0103, PLX_DMA_CMD_STS	// start channel 0 transfer
+	bsr memcpy_from_pci_run
+	.endm
+
+	.macro memcpy_to_pci src, dest, len
+	addl #3, \len
+	andl #0xFFFFFFFC, \len		// always copy n * 4 bytes
+	movel \src, PLX_DMA_1_LOCAL
+	movel \dest, PLX_DMA_1_PCI
+	movel \len, PLX_DMA_1_LENGTH
+	movel #0x0301, PLX_DMA_CMD_STS	// start channel 1 transfer
+	bsr memcpy_to_pci_run
+	.endm
+
+#else
+
+	.macro memcpy src, dest, len	// len must be < 65536 bytes
+	movel %d7, -(%sp)		// src and dest must be < 256 MB
+	movel \len, %d7			// bits 0 and 1
+	lsrl #2, \len
+	andl \len, \len
+	beq 99f				// only 0 - 3 bytes
+	subl #1, \len			// for dbf
+98:	movel (\src)+, (\dest)+
+	dbfw \len, 98b
+99:	movel %d7, \len
+	btstl #1, \len
+	beq 99f
+	movew (\src)+, (\dest)+
+99:	btstl #0, \len
+	beq 99f
+	moveb (\src)+, (\dest)+
+99:
+	movel (%sp)+, %d7
+	.endm
+
+	.macro memcpy_from_pci src, dest, len
+	addl #VALUE_WINDOW, \src
+	memcpy \src, \dest, \len
+	.endm
+
+	.macro memcpy_to_pci src, dest, len
+	addl #VALUE_WINDOW, \dest
+	memcpy \src, \dest, \len
+	.endm
+#endif
+
+
+	.macro wait_for_command
+99:	btstl #0, CR
+	bne 99b
+	.endm
+
+
+
+
+/****************************** card initialization *******************/
+	.text
+	.global _start
+_start:	bra init
+
+	.org _start + 4
+ch_status_addr:	.long 0, 0, 0, 0
+rx_descs_addr:	.long 0
+
+init:
+#if DETECT_RAM
+	movel OR1, %d0
+	andl #0xF00007FF, %d0		// mask AMxx bits
+	orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size
+	movel %d0, OR1
+#endif
+
+	addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data
+	clrl %d0			// D0 = 4 * port
+init_1:	tstl ch_status_addr(%d0)
+	beq init_2
+	addl #VALUE_WINDOW, ch_status_addr(%d0)
+init_2:	addl #4, %d0
+	cmpl #4 * 4, %d0
+	bne init_1
+
+	movel #pci9060_interrupt, PCI9060_VECTOR
+	movel #error_interrupt, ERROR_VECTOR
+	movel #port_interrupt_1, SCC1_VECTOR
+	movel #port_interrupt_2, SCC2_VECTOR
+	movel #port_interrupt_3, SCC3_VECTOR
+	movel #port_interrupt_4, SCC4_VECTOR
+	movel #timer_interrupt, TIMER_IRQ * 4
+
+	movel #0x78000000, CIMR		// only SCCx IRQs from CPM
+	movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR	// interrupt from PIT
+	movew #PITR_CONST, PITR
+
+	// SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79
+	movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR
+	movel #0x543, PLX_DMA_0_MODE	// 32-bit, Ready, Burst, IRQ
+	movel #0x543, PLX_DMA_1_MODE
+	movel #0x0, PLX_DMA_0_DESC	// from PCI to local
+	movel #0x8, PLX_DMA_1_DESC	// from local to PCI
+	movel #0x101, PLX_DMA_CMD_STS	// enable both DMA channels
+	// enable local IRQ, DMA, doorbells and PCI IRQ
+	orl #0x000F0300, PLX_INTERRUPT_CS
+
+#if DETECT_RAM
+	bsr ram_test
+#else
+	movel #1, PLX_MAILBOX_5		// non-zero value = init complete
+#endif
+	bsr check_csr
+
+	movew #0xFFFF, PAPAR		// all pins are clocks/data
+	clrw PADIR			// first function
+	clrw PCSO			// CD and CTS always active
+
+
+/****************************** main loop *****************************/
+
+main:	movel channel_stats, %d7	// D7 = doorbell + irq status
+	clrl channel_stats
+
+	tstl %d7
+	bne main_1
+	// nothing to do - wait for next event
+	stop #0x2200			// supervisor + IRQ level 2
+	movew #0x2700, %sr		// disable IRQs again
+	bra main
+
+main_1:	clrl %d0			// D0 = 4 * port
+	clrl %d6			// D6 = doorbell to host value
+
+main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7
+	beq main_op
+	bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set
+	bsr close_port
+main_op:
+	btstl #DOORBELL_TO_CARD_OPEN_0, %d7
+	beq main_cl
+	bsr open_port
+main_cl:
+	btstl #DOORBELL_TO_CARD_TX_0, %d7
+	beq main_txend
+	bsr tx
+main_txend:
+	btstl #TASK_SCC_0, %d7
+	beq main_next
+	bsr tx_end
+	bsr rx
+
+main_next:
+	lsrl #1, %d7			// port status for next port
+	addl #4, %d0			// D0 = 4 * next port
+	cmpl #4 * 4, %d0
+	bne main_l
+	movel %d6, PLX_DOORBELL_FROM_CARD // signal the host
+	bra main
+
+
+/****************************** open port *****************************/
+
+open_port:				// D0 = 4 * port, D6 = doorbell to host
+	movel ch_status_addr(%d0), %a0	// A0 = port status address
+	tstl STATUS_OPEN(%a0)
+	bne open_port_ret		// port already open
+	movel #1, STATUS_OPEN(%a0)	// confirm the port is open
+// setup BDs
+	clrl tx_in(%d0)
+	clrl tx_out(%d0)
+	clrl tx_count(%d0)
+	clrl rx_in(%d0)
+
+	movel SICR, %d1			// D1 = clock settings in SICR
+	andl clocking_mask(%d0), %d1
+	cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0)
+	bne open_port_clock_ext
+	orl clocking_txfromrx(%d0), %d1
+	bra open_port_set_clock
+
+open_port_clock_ext:
+	orl clocking_ext(%d0), %d1
+open_port_set_clock:
+	movel %d1, SICR			// update clock settings in SICR
+
+	orw #STATUS_CABLE_DTR, csr_output(%d0)	// DTR on
+	bsr check_csr			// call with disabled timer interrupt
+
+// Setup TX descriptors
+	movel first_buffer(%d0), %d1	// D1 = starting buffer address
+	movel tx_first_bd(%d0), %a1	// A1 = starting TX BD address
+	movel #TX_BUFFERS - 2, %d2	// D2 = TX_BUFFERS - 1 counter
+	movel #0x18000000, %d3		// D3 = initial TX BD flags: Int + Last
+	cmpl #PARITY_NONE, STATUS_PARITY(%a0)
+	beq open_port_tx_loop
+	bsetl #26, %d3			// TX BD flag: Transmit CRC
+open_port_tx_loop:
+	movel %d3, (%a1)+		// TX flags + length
+	movel %d1, (%a1)+		// buffer address
+	addl #BUFFER_LENGTH, %d1
+	dbfw %d2, open_port_tx_loop
+
+	bsetl #29, %d3			// TX BD flag: Wrap (last BD)
+	movel %d3, (%a1)+		// Final TX flags + length
+	movel %d1, (%a1)+		// buffer address
+
+// Setup RX descriptors			// A1 = starting RX BD address
+	movel #RX_BUFFERS - 2, %d2	// D2 = RX_BUFFERS - 1 counter
+open_port_rx_loop:
+	movel #0x90000000, (%a1)+	// RX flags + length
+	movel %d1, (%a1)+		// buffer address
+	addl #BUFFER_LENGTH, %d1
+	dbfw %d2, open_port_rx_loop
+
+	movel #0xB0000000, (%a1)+	// Final RX flags + length
+	movel %d1, (%a1)+		// buffer address
+
+// Setup port parameters
+	movel scc_base_addr(%d0), %a1	// A1 = SCC_BASE address
+	movel scc_reg_addr(%d0), %a2	// A2 = SCC_REGS address
+
+	movel #0xFFFF, SCC_SCCE(%a2)	// clear status bits
+	movel #0x0000, SCC_SCCM(%a2)	// interrupt mask
+
+	movel tx_first_bd(%d0), %d1
+	movew %d1, SCC_TBASE(%a1)	// D1 = offset of first TxBD
+	addl #TX_BUFFERS * 8, %d1
+	movew %d1, SCC_RBASE(%a1)	// D1 = offset of first RxBD
+	moveb #0x8, SCC_RFCR(%a1)	// Intel mode, 1000
+	moveb #0x8, SCC_TFCR(%a1)
+
+// Parity settings
+	cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0)
+	bne open_port_parity_1
+	clrw SCC_PSMR(%a2)		// CRC16-CCITT
+	movel #0xF0B8, SCC_C_MASK(%a1)
+	movel #0xFFFF, SCC_C_PRES(%a1)
+	movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
+	movew #2, parity_bytes(%d0)
+	bra open_port_2
+
+open_port_parity_1:
+	cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0)
+	bne open_port_parity_2
+	movew #0x0800, SCC_PSMR(%a2)	// CRC32-CCITT
+	movel #0xDEBB20E3, SCC_C_MASK(%a1)
+	movel #0xFFFFFFFF, SCC_C_PRES(%a1)
+	movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
+	movew #4, parity_bytes(%d0)
+	bra open_port_2
+
+open_port_parity_2:
+	cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0)
+	bne open_port_parity_3
+	clrw SCC_PSMR(%a2)		// CRC16-CCITT preset 0
+	movel #0xF0B8, SCC_C_MASK(%a1)
+	clrl SCC_C_PRES(%a1)
+	movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
+	movew #2, parity_bytes(%d0)
+	bra open_port_2
+
+open_port_parity_3:
+	cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0)
+	bne open_port_parity_4
+	movew #0x0800, SCC_PSMR(%a2)	// CRC32-CCITT preset 0
+	movel #0xDEBB20E3, SCC_C_MASK(%a1)
+	clrl SCC_C_PRES(%a1)
+	movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
+	movew #4, parity_bytes(%d0)
+	bra open_port_2
+
+open_port_parity_4:
+	clrw SCC_PSMR(%a2)		// no parity
+	movel #0xF0B8, SCC_C_MASK(%a1)
+	movel #0xFFFF, SCC_C_PRES(%a1)
+	movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC
+	clrw parity_bytes(%d0)
+
+open_port_2:
+	movel #0x00000003, SCC_GSMR_H(%a2) // RTSM
+	cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0)
+	bne open_port_nrz
+	movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1
+	bra open_port_3
+
+open_port_nrz:
+	movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0
+open_port_3:
+	movew #BUFFER_LENGTH, SCC_MRBLR(%a1)
+	movel %d0, %d1
+	lsll #4, %d1			// D1 bits 7 and 6 = port
+	orl #1, %d1
+	movew %d1, CR			// Init SCC RX and TX params
+	wait_for_command
+
+	// TCI Tend ENR ENT
+	movew #0x001F, SCC_SCCM(%a2)	// TXE RXF BSY TXB RXB interrupts
+	orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC
+open_port_ret:
+	rts
+
+
+/****************************** close port ****************************/
+
+close_port:				// D0 = 4 * port, D6 = doorbell to host
+	movel scc_reg_addr(%d0), %a0	// A0 = SCC_REGS address
+	clrw SCC_SCCM(%a0)		// no SCC interrupts
+	andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR
+
+	andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off
+	bsr check_csr			// call with disabled timer interrupt
+
+	movel ch_status_addr(%d0), %d1
+	clrl STATUS_OPEN(%d1)		// confirm the port is closed
+	rts
+
+
+/****************************** transmit packet ***********************/
+// queue packets for transmission
+tx:					// D0 = 4 * port, D6 = doorbell to host
+	cmpl #TX_BUFFERS, tx_count(%d0)
+	beq tx_ret			// all DB's = descs in use
+
+	movel tx_out(%d0), %d1
+	movel %d1, %d2			// D1 = D2 = tx_out BD# = desc#
+	mulul #DESC_LENGTH, %d2		// D2 = TX desc offset
+	addl ch_status_addr(%d0), %d2
+	addl #STATUS_TX_DESCS, %d2	// D2 = TX desc address
+	cmpl #PACKET_FULL, (%d2)	// desc status
+	bne tx_ret
+
+// queue it
+	movel 4(%d2), %a0		// PCI address
+	lsll #3, %d1			// BD is 8-bytes long
+	addl tx_first_bd(%d0), %d1	// D1 = current tx_out BD addr
+
+	movel 4(%d1), %a1		// A1 = dest address
+	movel 8(%d2), %d2		// D2 = length
+	movew %d2, 2(%d1)		// length into BD
+	memcpy_from_pci %a0, %a1, %d2
+	bsetl #31, (%d1)		// CP go ahead
+
+// update tx_out and tx_count
+	movel tx_out(%d0), %d1
+	addl #1, %d1
+	cmpl #TX_BUFFERS, %d1
+	bne tx_1
+	clrl %d1
+tx_1:	movel %d1, tx_out(%d0)
+
+	addl #1, tx_count(%d0)
+	bra tx
+
+tx_ret: rts
+
+
+/****************************** packet received ***********************/
+
+// Service receive buffers		// D0 = 4 * port, D6 = doorbell to host
+rx:	movel rx_in(%d0), %d1		// D1 = rx_in BD#
+	lsll #3, %d1			// BD is 8-bytes long
+	addl rx_first_bd(%d0), %d1	// D1 = current rx_in BD address
+	movew (%d1), %d2		// D2 = RX BD flags
+	btstl #15, %d2
+	bne rx_ret			// BD still empty
+
+	btstl #1, %d2
+	bne rx_overrun
+
+	tstw parity_bytes(%d0)
+	bne rx_parity
+	bclrl #2, %d2			// do not test for CRC errors
+rx_parity:
+	andw #0x0CBC, %d2		// mask status bits
+	cmpw #0x0C00, %d2		// correct frame
+	bne rx_bad_frame
+	clrl %d3
+	movew 2(%d1), %d3
+	subw parity_bytes(%d0), %d3	// D3 = packet length
+	cmpw #HDLC_MAX_MRU, %d3
+	bgt rx_bad_frame
+
+rx_good_frame:
+	movel rx_out, %d2
+	mulul #DESC_LENGTH, %d2
+	addl rx_descs_addr, %d2		// D2 = RX desc address
+	cmpl #PACKET_EMPTY, (%d2)	// desc stat
+	bne rx_overrun
+
+	movel %d3, 8(%d2)
+	movel 4(%d1), %a0		// A0 = source address
+	movel 4(%d2), %a1
+	tstl %a1
+	beq rx_ignore_data
+	memcpy_to_pci %a0, %a1, %d3
+rx_ignore_data:
+	movel packet_full(%d0), (%d2)	// update desc stat
+
+// update D6 and rx_out
+	bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed
+	movel rx_out, %d2
+	addl #1, %d2
+	cmpl #RX_QUEUE_LENGTH, %d2
+	bne rx_1
+	clrl %d2
+rx_1:	movel %d2, rx_out
+
+rx_free_bd:
+	andw #0xF000, (%d1)		// clear CM and error bits
+	bsetl #31, (%d1)		// free BD
+// update rx_in
+	movel rx_in(%d0), %d1
+	addl #1, %d1
+	cmpl #RX_BUFFERS, %d1
+	bne rx_2
+	clrl %d1
+rx_2:	movel %d1, rx_in(%d0)
+	bra rx
+
+rx_overrun:
+	movel ch_status_addr(%d0), %d2
+	addl #1, STATUS_RX_OVERRUNS(%d2)
+	bra rx_free_bd
+
+rx_bad_frame:
+	movel ch_status_addr(%d0), %d2
+	addl #1, STATUS_RX_FRAME_ERRORS(%d2)
+	bra rx_free_bd
+
+rx_ret: rts
+
+
+/****************************** packet transmitted ********************/
+
+// Service transmit buffers		// D0 = 4 * port, D6 = doorbell to host
+tx_end:	tstl tx_count(%d0)
+	beq tx_end_ret			// TX buffers already empty
+
+	movel tx_in(%d0), %d1
+	movel %d1, %d2			// D1 = D2 = tx_in BD# = desc#
+	lsll #3, %d1			// BD is 8-bytes long
+	addl tx_first_bd(%d0), %d1	// D1 = current tx_in BD address
+	movew (%d1), %d3		// D3 = TX BD flags
+	btstl #15, %d3
+	bne tx_end_ret			// BD still being transmitted
+
+// update D6, tx_in and tx_count
+	orl bell_tx(%d0), %d6		// signal host that TX desc freed
+	subl #1, tx_count(%d0)
+	movel tx_in(%d0), %d1
+	addl #1, %d1
+	cmpl #TX_BUFFERS, %d1
+	bne tx_end_1
+	clrl %d1
+tx_end_1:
+	movel %d1, tx_in(%d0)
+
+// free host's descriptor
+	mulul #DESC_LENGTH, %d2		// D2 = TX desc offset
+	addl ch_status_addr(%d0), %d2
+	addl #STATUS_TX_DESCS, %d2	// D2 = TX desc address
+	btstl #1, %d3
+	bne tx_end_underrun
+	movel #PACKET_SENT, (%d2)
+	bra tx_end
+
+tx_end_underrun:
+	movel #PACKET_UNDERRUN, (%d2)
+	bra tx_end
+
+tx_end_ret: rts
+
+
+/****************************** PLX PCI9060 DMA memcpy ****************/
+
+#if QUICC_MEMCPY_USES_PLX
+// called with interrupts disabled
+memcpy_from_pci_run:
+	movel %d0, -(%sp)
+	movew %sr, -(%sp)
+memcpy_1:
+	movel PLX_DMA_CMD_STS, %d0	// do not btst PLX register directly
+	btstl #4, %d0			// transfer done?
+	bne memcpy_end
+	stop #0x2200			// enable PCI9060 interrupts
+	movew #0x2700, %sr		// disable interrupts again
+	bra memcpy_1
+
+memcpy_to_pci_run:
+	movel %d0, -(%sp)
+	movew %sr, -(%sp)
+memcpy_2:
+	movel PLX_DMA_CMD_STS, %d0	// do not btst PLX register directly
+	btstl #12, %d0			// transfer done?
+	bne memcpy_end
+	stop #0x2200			// enable PCI9060 interrupts
+	movew #0x2700, %sr		// disable interrupts again
+	bra memcpy_2
+
+memcpy_end:
+	movew (%sp)+, %sr
+	movel (%sp)+, %d0
+	rts
+#endif
+
+
+
+
+
+
+/****************************** PLX PCI9060 interrupt *****************/
+
+pci9060_interrupt:
+	movel %d0, -(%sp)
+
+	movel PLX_DOORBELL_TO_CARD, %d0
+	movel %d0, PLX_DOORBELL_TO_CARD	// confirm all requests
+	orl %d0, channel_stats
+
+	movel #0x0909, PLX_DMA_CMD_STS	// clear DMA ch #0 and #1 interrupts
+
+	movel (%sp)+, %d0
+	rte
+
+/****************************** SCC interrupts ************************/
+
+port_interrupt_1:
+	orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events
+	orl #1 << TASK_SCC_0, channel_stats
+	movel #0x40000000, CISR
+	rte
+
+port_interrupt_2:
+	orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events
+	orl #1 << TASK_SCC_1, channel_stats
+	movel #0x20000000, CISR
+	rte
+
+port_interrupt_3:
+	orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events
+	orl #1 << TASK_SCC_2, channel_stats
+	movel #0x10000000, CISR
+	rte
+
+port_interrupt_4:
+	orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events
+	orl #1 << TASK_SCC_3, channel_stats
+	movel #0x08000000, CISR
+	rte
+
+error_interrupt:
+	rte
+
+
+/****************************** cable and PM routine ******************/
+// modified registers: none
+check_csr:
+	movel %d0, -(%sp)
+	movel %d1, -(%sp)
+	movel %d2, -(%sp)
+	movel %a0, -(%sp)
+	movel %a1, -(%sp)
+
+	clrl %d0			// D0 = 4 * port
+	movel #CSRA, %a0		// A0 = CSR address
+
+check_csr_loop:
+	movew (%a0), %d1		// D1 = CSR input bits
+	andl #0xE7, %d1			// PM and cable sense bits (no DCE bit)
+	cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+	bne check_csr_1
+	movew #0x0E08, %d1
+	bra check_csr_valid
+
+check_csr_1:
+	cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+	bne check_csr_2
+	movew #0x0408, %d1
+	bra check_csr_valid
+
+check_csr_2:
+	cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+	bne check_csr_3
+	movew #0x0208, %d1
+	bra check_csr_valid
+
+check_csr_3:
+	cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
+	bne check_csr_disable
+	movew #0x0D08, %d1
+	bra check_csr_valid
+
+check_csr_disable:
+	movew #0x0008, %d1		// D1 = disable everything
+	movew #0x80E7, %d2		// D2 = input mask: ignore DSR
+	bra check_csr_write
+
+check_csr_valid:			// D1 = mode and IRQ bits
+	movew csr_output(%d0), %d2
+	andw #0x3000, %d2		// D2 = requested LL and DTR bits
+	orw %d2, %d1			// D1 = all requested output bits
+	movew #0x80FF, %d2		// D2 = input mask: include DSR
+
+check_csr_write:
+	cmpw old_csr_output(%d0), %d1
+	beq check_csr_input
+	movew %d1, old_csr_output(%d0)
+	movew %d1, (%a0)		// Write CSR output bits
+
+check_csr_input:
+	movew (PCDAT), %d1
+	andw dcd_mask(%d0), %d1
+	beq check_csr_dcd_on		// DCD and CTS signals are negated
+	movew (%a0), %d1		// D1 = CSR input bits
+	andw #~STATUS_CABLE_DCD, %d1	// DCD off
+	bra check_csr_previous
+
+check_csr_dcd_on:
+	movew (%a0), %d1		// D1 = CSR input bits
+	orw #STATUS_CABLE_DCD, %d1	// DCD on
+check_csr_previous:
+	andw %d2, %d1			// input mask
+	movel ch_status_addr(%d0), %a1
+	cmpl STATUS_CABLE(%a1), %d1	// check for change
+	beq check_csr_next
+	movel %d1, STATUS_CABLE(%a1)	// update status
+	movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD	// signal the host
+
+check_csr_next:
+	addl #2, %a0			// next CSR register
+	addl #4, %d0			// D0 = 4 * next port
+	cmpl #4 * 4, %d0
+	bne check_csr_loop
+
+	movel (%sp)+, %a1
+	movel (%sp)+, %a0
+	movel (%sp)+, %d2
+	movel (%sp)+, %d1
+	movel (%sp)+, %d0
+	rts
+
+
+/****************************** timer interrupt ***********************/
+
+timer_interrupt:
+	bsr check_csr
+	rte
+
+
+/****************************** RAM sizing and test *******************/
+#if DETECT_RAM
+ram_test:
+	movel #0x12345678, %d1		// D1 = test value
+	movel %d1, (128 * 1024 - 4)
+	movel #128 * 1024, %d0		// D0 = RAM size tested
+ram_test_size:
+	cmpl #MAX_RAM_SIZE, %d0
+	beq ram_test_size_found
+	movel %d0, %a0
+	addl #128 * 1024 - 4, %a0
+	cmpl (%a0), %d1
+	beq ram_test_size_check
+ram_test_next_size:
+	lsll #1, %d0
+	bra ram_test_size
+
+ram_test_size_check:
+	eorl #0xFFFFFFFF, %d1
+	movel %d1, (128 * 1024 - 4)
+	cmpl (%a0), %d1
+	bne ram_test_next_size
+
+ram_test_size_found:			// D0 = RAM size
+	movel %d0, %a0			// A0 = fill ptr
+	subl #firmware_end + 4, %d0
+	lsrl #2, %d0
+	movel %d0, %d1			// D1 = DBf counter
+ram_test_fill:
+	movel %a0, -(%a0)
+	dbfw %d1, ram_test_fill
+	subl #0x10000, %d1
+	cmpl #0xFFFFFFFF, %d1
+	bne ram_test_fill
+
+ram_test_loop:				// D0 = DBf counter
+	cmpl (%a0)+, %a0
+	dbnew %d0, ram_test_loop
+	bne ram_test_found_bad
+	subl #0x10000, %d0
+	cmpl #0xFFFFFFFF, %d0
+	bne ram_test_loop
+	bra ram_test_all_ok
+
+ram_test_found_bad:
+	subl #4, %a0
+ram_test_all_ok:
+	movel %a0, PLX_MAILBOX_5
+	rts
+#endif
+
+
+/****************************** constants *****************************/
+
+scc_reg_addr:
+	.long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS
+scc_base_addr:
+	.long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE
+
+tx_first_bd:
+	.long DPRBASE
+	.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8
+	.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
+	.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
+
+rx_first_bd:
+	.long DPRBASE + TX_BUFFERS * 8
+	.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8
+	.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
+	.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
+
+first_buffer:
+	.long BUFFERS_ADDR
+	.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH
+	.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2
+	.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3
+
+bell_tx:
+	.long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1
+	.long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3
+
+bell_cable:
+	.long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1
+	.long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3
+
+packet_full:
+	.long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3
+
+clocking_ext:
+	.long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000
+clocking_txfromrx:
+	.long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000
+clocking_mask:
+	.long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+dcd_mask:
+	.word 0x020, 0, 0x080, 0, 0x200, 0, 0x800
+
+	.ascii "wanXL firmware\n"
+	.asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n"
+
+
+/****************************** variables *****************************/
+
+		.align 4
+channel_stats:	.long 0
+
+tx_in:		.long 0, 0, 0, 0	// transmitted
+tx_out:		.long 0, 0, 0, 0	// received from host for transmission
+tx_count:	.long 0, 0, 0, 0	// currently in transmit queue
+
+rx_in:		.long 0, 0, 0, 0	// received from port
+rx_out:		.long 0			// transmitted to host
+parity_bytes:	.word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used
+
+csr_output:	.word 0
+old_csr_output:	.word 0, 0, 0, 0, 0, 0, 0
+		.align 4
+firmware_end:				// must be dword-aligned
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.inc_shipped b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.inc_shipped
new file mode 100644
index 0000000..73da688
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/wanxlfw.inc_shipped
@@ -0,0 +1,158 @@
+static u8 firmware[]={
+0x60,0x00,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0xB9,0x40,0x00,0x00,0x00,0x00,0x00,
+0x10,0x14,0x42,0x80,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x67,0x00,0x00,0x0E,
+0x06,0xB0,0x40,0x00,0x00,0x00,0x09,0xB0,0x00,0x00,0x10,0x04,0x58,0x80,0x0C,0x80,
+0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xDE,0x21,0xFC,0x00,0x00,0x16,0xBC,0x00,0x6C,
+0x21,0xFC,0x00,0x00,0x17,0x5E,0x01,0x00,0x21,0xFC,0x00,0x00,0x16,0xDE,0x01,0x78,
+0x21,0xFC,0x00,0x00,0x16,0xFE,0x01,0x74,0x21,0xFC,0x00,0x00,0x17,0x1E,0x01,0x70,
+0x21,0xFC,0x00,0x00,0x17,0x3E,0x01,0x6C,0x21,0xFC,0x00,0x00,0x18,0x4C,0x02,0x00,
+0x23,0xFC,0x78,0x00,0x00,0x00,0xFF,0xFC,0x15,0x48,0x33,0xFC,0x04,0x80,0xFF,0xFC,
+0x10,0x26,0x33,0xFC,0x01,0x10,0xFF,0xFC,0x10,0x2A,0x23,0xFC,0x00,0xD4,0x9F,0x40,
+0xFF,0xFC,0x15,0x40,0x23,0xFC,0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x00,0x23,0xFC,
+0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x14,0x23,0xFC,0x00,0x00,0x00,0x00,0xFF,0xF9,
+0x01,0x10,0x23,0xFC,0x00,0x00,0x00,0x08,0xFF,0xF9,0x01,0x24,0x23,0xFC,0x00,0x00,
+0x01,0x01,0xFF,0xF9,0x01,0x28,0x00,0xB9,0x00,0x0F,0x03,0x00,0xFF,0xF9,0x00,0xE8,
+0x23,0xFC,0x00,0x00,0x00,0x01,0xFF,0xF9,0x00,0xD4,0x61,0x00,0x06,0x74,0x33,0xFC,
+0xFF,0xFF,0xFF,0xFC,0x15,0x52,0x42,0x79,0xFF,0xFC,0x15,0x50,0x42,0x79,0xFF,0xFC,
+0x15,0x64,0x2E,0x3A,0x08,0x50,0x42,0xB9,0x00,0x00,0x19,0x54,0x4A,0x87,0x66,0x00,
+0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE6,0x42,0x80,
+0x42,0x86,0x08,0x07,0x00,0x04,0x67,0x00,0x00,0x0A,0x08,0x87,0x00,0x00,0x61,0x00,
+0x02,0xA0,0x08,0x07,0x00,0x00,0x67,0x00,0x00,0x06,0x61,0x00,0x00,0x36,0x08,0x07,
+0x00,0x08,0x67,0x00,0x00,0x06,0x61,0x00,0x02,0xB8,0x08,0x07,0x00,0x0C,0x67,0x00,
+0x00,0x0A,0x61,0x00,0x04,0x94,0x61,0x00,0x03,0x60,0xE2,0x8F,0x58,0x80,0x0C,0x80,
+0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xBC,0x23,0xC6,0xFF,0xF9,0x00,0xE4,0x60,0x00,
+0xFF,0x92,0x20,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0x4A,0xA8,0x00,0x00,0x66,0x00,
+0x02,0x4E,0x21,0x7C,0x00,0x00,0x00,0x01,0x00,0x00,0x42,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x58,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x68,0x42,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x78,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x88,0x22,0x39,0xFF,0xFC,0x16,0xEC,
+0xC2,0xB0,0x09,0xB0,0x00,0x00,0x18,0xF2,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x18,
+0x66,0x00,0x00,0x0E,0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xE2,0x60,0x00,0x00,0x0A,
+0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xD2,0x23,0xC1,0xFF,0xFC,0x16,0xEC,0x00,0x70,
+0x10,0x00,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,0x05,0x76,0x22,0x30,0x09,0xB0,
+0x00,0x00,0x18,0x92,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x72,0x74,0x08,0x26,0x3C,
+0x18,0x00,0x00,0x00,0x0C,0xA8,0x00,0x00,0x00,0x01,0x00,0x10,0x67,0x00,0x00,0x06,
+0x08,0xC3,0x00,0x1A,0x22,0xC3,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,
+0xFF,0xF4,0x08,0xC3,0x00,0x1D,0x22,0xC3,0x22,0xC1,0x74,0x1C,0x22,0xFC,0x90,0x00,
+0x00,0x00,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,0xFF,0xF0,0x22,0xFC,
+0xB0,0x00,0x00,0x00,0x22,0xC1,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x62,0x24,0x70,
+0x09,0xB0,0x00,0x00,0x18,0x52,0x25,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x10,0x25,0x7C,
+0x00,0x00,0x00,0x00,0x00,0x14,0x22,0x30,0x09,0xB0,0x00,0x00,0x18,0x72,0x33,0x41,
+0x00,0x02,0x06,0x81,0x00,0x00,0x00,0x50,0x33,0x41,0x00,0x00,0x13,0x7C,0x00,0x08,
+0x00,0x04,0x13,0x7C,0x00,0x08,0x00,0x05,0x0C,0xA8,0x00,0x00,0x00,0x05,0x00,0x10,
+0x66,0x00,0x00,0x2A,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
+0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xFA,0x00,0x46,0x31,0xBC,
+0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,0x00,0xBC,0x0C,0xA8,0x00,0x00,
+0x00,0x07,0x00,0x10,0x66,0x00,0x00,0x2C,0x35,0x7C,0x08,0x00,0x00,0x08,0x23,0x7C,
+0xDE,0xBB,0x20,0xE3,0x00,0x34,0x23,0x7C,0xFF,0xFF,0xFF,0xFF,0x00,0x38,0x33,0x7C,
+0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
+0x00,0x86,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x10,0x66,0x00,0x00,0x26,0x42,0x6A,
+0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,0x42,0xA9,0x00,0x38,0x33,0x7C,
+0x05,0xFA,0x00,0x46,0x31,0xBC,0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
+0x00,0x56,0x0C,0xA8,0x00,0x00,0x00,0x06,0x00,0x10,0x66,0x00,0x00,0x28,0x35,0x7C,
+0x08,0x00,0x00,0x08,0x23,0x7C,0xDE,0xBB,0x20,0xE3,0x00,0x34,0x42,0xA9,0x00,0x38,
+0x33,0x7C,0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,
+0x60,0x00,0x00,0x24,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
+0x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xF8,0x00,0x46,0x42,0x70,
+0x09,0xB0,0x00,0x00,0x19,0x9C,0x25,0x7C,0x00,0x00,0x00,0x03,0x00,0x04,0x0C,0xA8,
+0x00,0x00,0x00,0x02,0x00,0x14,0x66,0x00,0x00,0x0E,0x25,0x7C,0x10,0x04,0x09,0x00,
+0x00,0x00,0x60,0x00,0x00,0x0A,0x25,0x7C,0x10,0x04,0x00,0x00,0x00,0x00,0x33,0x7C,
+0x05,0xFC,0x00,0x06,0x22,0x00,0xE9,0x89,0x00,0x81,0x00,0x00,0x00,0x01,0x33,0xC1,
+0xFF,0xFC,0x15,0xC0,0x08,0x39,0x00,0x00,0xFF,0xFC,0x15,0xC0,0x66,0x00,0xFF,0xF6,
+0x35,0x7C,0x00,0x1F,0x00,0x14,0x00,0xAA,0x00,0x00,0x00,0x30,0x00,0x00,0x4E,0x75,
+0x20,0x70,0x09,0xB0,0x00,0x00,0x18,0x52,0x42,0x68,0x00,0x14,0x02,0xA8,0xFF,0xFF,
+0xFF,0xCF,0x00,0x00,0x02,0x70,0xEF,0xFF,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,
+0x03,0x70,0x22,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x42,0xB0,0x19,0x90,0x4E,0x75,
+0x0C,0xB0,0x00,0x00,0x00,0x0A,0x09,0xB0,0x00,0x00,0x19,0x78,0x67,0x00,0x00,0xA8,
+0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x68,0x24,0x01,0x4C,0x3C,0x20,0x00,0x00,0x00,
+0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,
+0x0C,0xB0,0x00,0x00,0x00,0x10,0x29,0x90,0x66,0x00,0x00,0x7C,0x20,0x70,0x29,0xA0,
+0x00,0x04,0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x22,0x70,0x19,0xA0,
+0x00,0x04,0x24,0x30,0x29,0xA0,0x00,0x08,0x31,0x82,0x19,0xA0,0x00,0x02,0x56,0x82,
+0x02,0x82,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,0xFF,0xF9,0x01,0x04,0x23,0xC9,0xFF,0xF9,
+0x01,0x08,0x23,0xC2,0xFF,0xF9,0x01,0x0C,0x23,0xFC,0x00,0x00,0x01,0x03,0xFF,0xF9,
+0x01,0x28,0x61,0x00,0x01,0xF6,0x08,0xF0,0x00,0x1F,0x19,0x90,0x22,0x30,0x09,0xB0,
+0x00,0x00,0x19,0x68,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,
+0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x68,0x52,0xB0,0x09,0xB0,0x00,0x00,
+0x19,0x78,0x60,0x00,0xFF,0x4C,0x4E,0x75,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,
+0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x82,0x34,0x30,0x19,0x90,0x08,0x02,
+0x00,0x0F,0x66,0x00,0x01,0x12,0x08,0x02,0x00,0x01,0x66,0x00,0x00,0xE6,0x4A,0x70,
+0x09,0xB0,0x00,0x00,0x19,0x9C,0x66,0x00,0x00,0x06,0x08,0x82,0x00,0x02,0x02,0x42,
+0x0C,0xBC,0x0C,0x42,0x0C,0x00,0x66,0x00,0x00,0xDC,0x42,0x83,0x36,0x30,0x19,0xA0,
+0x00,0x02,0x96,0x70,0x09,0xB0,0x00,0x00,0x19,0x9C,0x0C,0x43,0x05,0xF8,0x6E,0x00,
+0x00,0xC4,0x24,0x3A,0x04,0x84,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xBA,
+0xFA,0xF4,0x0C,0xB0,0x00,0x00,0x00,0x00,0x29,0x90,0x66,0x00,0x00,0x96,0x21,0x83,
+0x29,0xA0,0x00,0x08,0x20,0x70,0x19,0xA0,0x00,0x04,0x22,0x70,0x29,0xA0,0x00,0x04,
+0x4A,0x89,0x67,0x00,0x00,0x2A,0x56,0x83,0x02,0x83,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,
+0xFF,0xF9,0x01,0x1C,0x23,0xC9,0xFF,0xF9,0x01,0x18,0x23,0xC3,0xFF,0xF9,0x01,0x20,
+0x23,0xFC,0x00,0x00,0x03,0x01,0xFF,0xF9,0x01,0x28,0x61,0x00,0x01,0x2C,0x21,0xB0,
+0x09,0xB0,0x00,0x00,0x18,0xC2,0x29,0x90,0x08,0xC6,0x00,0x04,0x24,0x3A,0x04,0x1A,
+0x52,0x82,0x0C,0x82,0x00,0x00,0x00,0x28,0x66,0x00,0x00,0x04,0x42,0x82,0x23,0xC2,
+0x00,0x00,0x19,0x98,0x02,0x70,0xF0,0x00,0x19,0x90,0x08,0xF0,0x00,0x1F,0x19,0x90,
+0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x1E,
+0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x88,0x60,0x00,
+0xFE,0xF8,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,0x00,0x08,
+0x60,0x00,0xFF,0xC2,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,
+0x00,0x0C,0x60,0x00,0xFF,0xB0,0x4E,0x75,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x19,0x78,
+0x67,0x00,0x00,0x86,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x24,0x01,0xE7,0x89,
+0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x36,0x30,0x19,0x90,0x08,0x03,0x00,0x0F,
+0x66,0x00,0x00,0x66,0x8C,0xB0,0x09,0xB0,0x00,0x00,0x18,0xA2,0x53,0xB0,0x09,0xB0,
+0x00,0x00,0x19,0x78,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x52,0x81,0x0C,0x81,
+0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,
+0x19,0x58,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,
+0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,0x08,0x03,0x00,0x01,0x66,0x00,0x00,0x0E,
+0x21,0xBC,0x00,0x00,0x00,0x20,0x29,0x90,0x60,0x00,0xFF,0x7E,0x21,0xBC,0x00,0x00,
+0x00,0x30,0x29,0x90,0x60,0x00,0xFF,0x72,0x4E,0x75,0x2F,0x00,0x40,0xE7,0x20,0x39,
+0xFF,0xF9,0x01,0x28,0x08,0x00,0x00,0x04,0x66,0x00,0x00,0x2C,0x4E,0x72,0x22,0x00,
+0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE8,0x2F,0x00,0x40,0xE7,0x20,0x39,0xFF,0xF9,
+0x01,0x28,0x08,0x00,0x00,0x0C,0x66,0x00,0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,
+0x27,0x00,0x60,0x00,0xFF,0xE8,0x46,0xDF,0x20,0x1F,0x4E,0x75,0x2F,0x00,0x20,0x39,
+0xFF,0xF9,0x00,0xE0,0x23,0xC0,0xFF,0xF9,0x00,0xE0,0x81,0xB9,0x00,0x00,0x19,0x54,
+0x23,0xFC,0x00,0x00,0x09,0x09,0xFF,0xF9,0x01,0x28,0x20,0x1F,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x10,0x00,0xB9,0x00,0x00,0x10,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x40,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x30,0x00,0xB9,0x00,0x00,0x20,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x20,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x50,0x00,0xB9,0x00,0x00,0x40,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x10,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
+0x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x70,0x00,0xB9,0x00,0x00,0x80,0x00,0x00,0x00,
+0x19,0x54,0x23,0xFC,0x08,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x4E,0x73,
+0x2F,0x00,0x2F,0x01,0x2F,0x02,0x2F,0x08,0x2F,0x09,0x42,0x80,0x20,0x7C,0xFF,0xFB,
+0x00,0x00,0x32,0x10,0x02,0x81,0x00,0x00,0x00,0xE7,0x0C,0x41,0x00,0x42,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x0E,0x08,0x60,0x00,0x00,0x3E,0x0C,0x41,0x00,0x63,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x04,0x08,0x60,0x00,0x00,0x2E,0x0C,0x41,0x00,0x84,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x02,0x08,0x60,0x00,0x00,0x1E,0x0C,0x41,0x00,0xA5,0x66,0x00,
+0x00,0x0A,0x32,0x3C,0x0D,0x08,0x60,0x00,0x00,0x0E,0x32,0x3C,0x00,0x08,0x34,0x3C,
+0x80,0xE7,0x60,0x00,0x00,0x14,0x34,0x30,0x09,0xB0,0x00,0x00,0x19,0xAA,0x02,0x42,
+0x30,0x00,0x82,0x42,0x34,0x3C,0x80,0xFF,0xB2,0x70,0x09,0xB0,0x00,0x00,0x19,0xAC,
+0x67,0x00,0x00,0x0C,0x31,0x81,0x09,0xB0,0x00,0x00,0x19,0xAC,0x30,0x81,0x32,0x39,
+0xFF,0xFC,0x15,0x66,0xC2,0x70,0x09,0xB0,0x00,0x00,0x19,0x02,0x67,0x00,0x00,0x0C,
+0x32,0x10,0x02,0x41,0xFF,0xF7,0x60,0x00,0x00,0x08,0x32,0x10,0x00,0x41,0x00,0x08,
+0xC2,0x42,0x22,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0xB2,0xA9,0x00,0x04,0x67,0x00,
+0x00,0x12,0x23,0x41,0x00,0x04,0x23,0xF0,0x09,0xB0,0x00,0x00,0x18,0xB2,0xFF,0xF9,
+0x00,0xE4,0x54,0x88,0x58,0x80,0x0C,0x80,0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0x34,
+0x22,0x5F,0x20,0x5F,0x24,0x1F,0x22,0x1F,0x20,0x1F,0x4E,0x75,0x61,0x00,0xFF,0x12,
+0x4E,0x73,0xFF,0xFC,0x16,0x00,0xFF,0xFC,0x16,0x20,0xFF,0xFC,0x16,0x40,0xFF,0xFC,
+0x16,0x60,0xFF,0xFC,0x0C,0x00,0xFF,0xFC,0x0D,0x00,0xFF,0xFC,0x0E,0x00,0xFF,0xFC,
+0x0F,0x00,0xFF,0xFC,0x00,0x00,0xFF,0xFC,0x01,0x40,0xFF,0xFC,0x02,0x80,0xFF,0xFC,
+0x03,0xC0,0xFF,0xFC,0x00,0x50,0xFF,0xFC,0x01,0x90,0xFF,0xFC,0x02,0xD0,0xFF,0xFC,
+0x04,0x10,0x00,0x00,0x40,0x00,0x00,0x01,0x2F,0x60,0x00,0x02,0x1E,0xC0,0x00,0x03,
+0x0E,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,
+0x00,0x08,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x80,0x00,0x00,
+0x01,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x12,0x00,0x00,
+0x00,0x13,0x00,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,
+0x00,0x00,0x00,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,
+0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,
+0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x80,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00,
+0x77,0x61,0x6E,0x58,0x4C,0x20,0x66,0x69,0x72,0x6D,0x77,0x61,0x72,0x65,0x0A,0x43,
+0x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x43,0x29,0x20,0x32,0x30,0x30,
+0x33,0x20,0x4B,0x72,0x7A,0x79,0x73,0x7A,0x74,0x6F,0x66,0x20,0x48,0x61,0x6C,0x61,
+0x73,0x61,0x20,0x3C,0x6B,0x68,0x63,0x40,0x70,0x6D,0x2E,0x77,0x61,0x77,0x2E,0x70,
+0x6C,0x3E,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.c
new file mode 100644
index 0000000..d7a65e1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.c
@@ -0,0 +1,824 @@
+/*
+ *	Things to sort out:
+ *
+ *	o	tbusy handling
+ *	o	allow users to set the parameters
+ *	o	sync/async switching ?
+ *
+ *	Note: This does _not_ implement CCITT X.25 asynchronous framing
+ *	recommendations. Its primarily for testing purposes. If you wanted
+ *	to do CCITT then in theory all you need is to nick the HDLC async
+ *	checksum routines from ppp.c
+ *      Changes:
+ *
+ *	2000-10-29	Henner Eisen	lapb_data_indication() return status.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/lapb.h>
+#include <linux/init.h>
+#include <linux/rtnetlink.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <net/x25device.h>
+#include "x25_asy.h"
+
+static struct net_device **x25_asy_devs;
+static int x25_asy_maxdev = SL_NRUNIT;
+
+module_param(x25_asy_maxdev, int, 0);
+MODULE_LICENSE("GPL");
+
+static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
+static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
+static void x25_asy_setup(struct net_device *dev);
+
+/* Find a free X.25 channel, and link in this `tty' line. */
+static struct x25_asy *x25_asy_alloc(void)
+{
+	struct net_device *dev = NULL;
+	struct x25_asy *sl;
+	int i;
+
+	if (x25_asy_devs == NULL)
+		return NULL;	/* Master array missing ! */
+
+	for (i = 0; i < x25_asy_maxdev; i++) {
+		dev = x25_asy_devs[i];
+
+		/* Not allocated ? */
+		if (dev == NULL)
+			break;
+
+		sl = netdev_priv(dev);
+		/* Not in use ? */
+		if (!test_and_set_bit(SLF_INUSE, &sl->flags))
+			return sl;
+	}
+
+
+	/* Sorry, too many, all slots in use */
+	if (i >= x25_asy_maxdev)
+		return NULL;
+
+	/* If no channels are available, allocate one */
+	if (!dev) {
+		char name[IFNAMSIZ];
+		sprintf(name, "x25asy%d", i);
+
+		dev = alloc_netdev(sizeof(struct x25_asy),
+				   name, x25_asy_setup);
+		if (!dev)
+			return NULL;
+
+		/* Initialize channel control data */
+		sl = netdev_priv(dev);
+		dev->base_addr    = i;
+
+		/* register device so that it can be ifconfig'ed       */
+		if (register_netdev(dev) == 0) {
+			/* (Re-)Set the INUSE bit.   Very Important! */
+			set_bit(SLF_INUSE, &sl->flags);
+			x25_asy_devs[i] = dev;
+			return sl;
+		} else {
+			pr_warn("%s(): register_netdev() failure\n", __func__);
+			free_netdev(dev);
+		}
+	}
+	return NULL;
+}
+
+
+/* Free an X.25 channel. */
+static void x25_asy_free(struct x25_asy *sl)
+{
+	/* Free all X.25 frame buffers. */
+	kfree(sl->rbuff);
+	sl->rbuff = NULL;
+	kfree(sl->xbuff);
+	sl->xbuff = NULL;
+
+	if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
+		netdev_err(sl->dev, "x25_asy_free for already free unit\n");
+}
+
+static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	unsigned char *xbuff, *rbuff;
+	int len = 2 * newmtu;
+
+	xbuff = kmalloc(len + 4, GFP_ATOMIC);
+	rbuff = kmalloc(len + 4, GFP_ATOMIC);
+
+	if (xbuff == NULL || rbuff == NULL) {
+		netdev_warn(dev, "unable to grow X.25 buffers, MTU change cancelled\n");
+		kfree(xbuff);
+		kfree(rbuff);
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&sl->lock);
+	xbuff    = xchg(&sl->xbuff, xbuff);
+	if (sl->xleft)  {
+		if (sl->xleft <= len)  {
+			memcpy(sl->xbuff, sl->xhead, sl->xleft);
+		} else  {
+			sl->xleft = 0;
+			dev->stats.tx_dropped++;
+		}
+	}
+	sl->xhead = sl->xbuff;
+
+	rbuff	 = xchg(&sl->rbuff, rbuff);
+	if (sl->rcount)  {
+		if (sl->rcount <= len) {
+			memcpy(sl->rbuff, rbuff, sl->rcount);
+		} else  {
+			sl->rcount = 0;
+			dev->stats.rx_over_errors++;
+			set_bit(SLF_ERROR, &sl->flags);
+		}
+	}
+
+	dev->mtu    = newmtu;
+	sl->buffsize = len;
+
+	spin_unlock_bh(&sl->lock);
+
+	kfree(xbuff);
+	kfree(rbuff);
+	return 0;
+}
+
+
+/* Set the "sending" flag.  This must be atomic, hence the ASM. */
+
+static inline void x25_asy_lock(struct x25_asy *sl)
+{
+	netif_stop_queue(sl->dev);
+}
+
+
+/* Clear the "sending" flag.  This must be atomic, hence the ASM. */
+
+static inline void x25_asy_unlock(struct x25_asy *sl)
+{
+	netif_wake_queue(sl->dev);
+}
+
+/* Send one completely decapsulated IP datagram to the IP layer. */
+
+static void x25_asy_bump(struct x25_asy *sl)
+{
+	struct net_device *dev = sl->dev;
+	struct sk_buff *skb;
+	int count;
+	int err;
+
+	count = sl->rcount;
+	dev->stats.rx_bytes += count;
+
+	skb = dev_alloc_skb(count+1);
+	if (skb == NULL) {
+		netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
+		dev->stats.rx_dropped++;
+		return;
+	}
+	skb_push(skb, 1);	/* LAPB internal control */
+	memcpy(skb_put(skb, count), sl->rbuff, count);
+	skb->protocol = x25_type_trans(skb, sl->dev);
+	err = lapb_data_received(skb->dev, skb);
+	if (err != LAPB_OK) {
+		kfree_skb(skb);
+		printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
+	} else {
+		netif_rx(skb);
+		dev->stats.rx_packets++;
+	}
+}
+
+/* Encapsulate one IP datagram and stuff into a TTY queue. */
+static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
+{
+	unsigned char *p;
+	int actual, count, mtu = sl->dev->mtu;
+
+	if (len > mtu) {
+		/* Sigh, shouldn't occur BUT ... */
+		len = mtu;
+		printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n",
+					sl->dev->name);
+		sl->dev->stats.tx_dropped++;
+		x25_asy_unlock(sl);
+		return;
+	}
+
+	p = icp;
+	count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
+
+	/* Order of next two lines is *very* important.
+	 * When we are sending a little amount of data,
+	 * the transfer may be completed inside driver.write()
+	 * routine, because it's running with interrupts enabled.
+	 * In this case we *never* got WRITE_WAKEUP event,
+	 * if we did not request it before write operation.
+	 *       14 Oct 1994  Dmitry Gorodchanin.
+	 */
+	set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+	actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
+	sl->xleft = count - actual;
+	sl->xhead = sl->xbuff + actual;
+	/* VSV */
+	clear_bit(SLF_OUTWAIT, &sl->flags);	/* reset outfill flag */
+}
+
+/*
+ * Called by the driver when there's room for more data.  If we have
+ * more packets to send, we send them here.
+ */
+static void x25_asy_write_wakeup(struct tty_struct *tty)
+{
+	int actual;
+	struct x25_asy *sl = tty->disc_data;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
+		return;
+
+	if (sl->xleft <= 0) {
+		/* Now serial buffer is almost free & we can start
+		 * transmission of another packet */
+		sl->dev->stats.tx_packets++;
+		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+		x25_asy_unlock(sl);
+		return;
+	}
+
+	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+	sl->xleft -= actual;
+	sl->xhead += actual;
+}
+
+static void x25_asy_timeout(struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+
+	spin_lock(&sl->lock);
+	if (netif_queue_stopped(dev)) {
+		/* May be we must check transmitter timeout here ?
+		 *      14 Oct 1994 Dmitry Gorodchanin.
+		 */
+		netdev_warn(dev, "transmit timed out, %s?\n",
+			    (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
+			    "bad line quality" : "driver error");
+		sl->xleft = 0;
+		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+		x25_asy_unlock(sl);
+	}
+	spin_unlock(&sl->lock);
+}
+
+/* Encapsulate an IP datagram and kick it into a TTY queue. */
+
+static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	int err;
+
+	if (!netif_running(sl->dev)) {
+		netdev_err(dev, "xmit call when iface is down\n");
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	switch (skb->data[0]) {
+	case X25_IFACE_DATA:
+		break;
+	case X25_IFACE_CONNECT: /* Connection request .. do nothing */
+		err = lapb_connect_request(dev);
+		if (err != LAPB_OK)
+			netdev_err(dev, "lapb_connect_request error: %d\n",
+				   err);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
+		err = lapb_disconnect_request(dev);
+		if (err != LAPB_OK)
+			netdev_err(dev, "lapb_disconnect_request error: %d\n",
+				   err);
+	default:
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	skb_pull(skb, 1);	/* Remove control byte */
+	/*
+	 * If we are busy already- too bad.  We ought to be able
+	 * to queue things at this point, to allow for a little
+	 * frame buffer.  Oh well...
+	 * -----------------------------------------------------
+	 * I hate queues in X.25 driver. May be it's efficient,
+	 * but for me latency is more important. ;)
+	 * So, no queues !
+	 *        14 Oct 1994  Dmitry Gorodchanin.
+	 */
+
+	err = lapb_data_request(dev, skb);
+	if (err != LAPB_OK) {
+		netdev_err(dev, "lapb_data_request error: %d\n", err);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	return NETDEV_TX_OK;
+}
+
+
+/*
+ *	LAPB interface boilerplate
+ */
+
+/*
+ *	Called when I frame data arrives. We did the work above - throw it
+ *	at the net layer.
+ */
+
+static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
+{
+	return netif_rx(skb);
+}
+
+/*
+ *	Data has emerged from the LAPB protocol machine. We don't handle
+ *	busy cases too well. Its tricky to see how to do this nicely -
+ *	perhaps lapb should allow us to bounce this ?
+ */
+
+static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+
+	spin_lock(&sl->lock);
+	if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
+		spin_unlock(&sl->lock);
+		netdev_err(dev, "tbusy drop\n");
+		kfree_skb(skb);
+		return;
+	}
+	/* We were not busy, so we are now... :-) */
+	if (skb != NULL) {
+		x25_asy_lock(sl);
+		dev->stats.tx_bytes += skb->len;
+		x25_asy_encaps(sl, skb->data, skb->len);
+		dev_kfree_skb(skb);
+	}
+	spin_unlock(&sl->lock);
+}
+
+/*
+ *	LAPB connection establish/down information.
+ */
+
+static void x25_asy_connected(struct net_device *dev, int reason)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	struct sk_buff *skb;
+	unsigned char *ptr;
+
+	skb = dev_alloc_skb(1);
+	if (skb == NULL) {
+		netdev_err(dev, "out of memory\n");
+		return;
+	}
+
+	ptr  = skb_put(skb, 1);
+	*ptr = X25_IFACE_CONNECT;
+
+	skb->protocol = x25_type_trans(skb, sl->dev);
+	netif_rx(skb);
+}
+
+static void x25_asy_disconnected(struct net_device *dev, int reason)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	struct sk_buff *skb;
+	unsigned char *ptr;
+
+	skb = dev_alloc_skb(1);
+	if (skb == NULL) {
+		netdev_err(dev, "out of memory\n");
+		return;
+	}
+
+	ptr  = skb_put(skb, 1);
+	*ptr = X25_IFACE_DISCONNECT;
+
+	skb->protocol = x25_type_trans(skb, sl->dev);
+	netif_rx(skb);
+}
+
+static const struct lapb_register_struct x25_asy_callbacks = {
+	.connect_confirmation = x25_asy_connected,
+	.connect_indication = x25_asy_connected,
+	.disconnect_confirmation = x25_asy_disconnected,
+	.disconnect_indication = x25_asy_disconnected,
+	.data_indication = x25_asy_data_indication,
+	.data_transmit = x25_asy_data_transmit,
+};
+
+
+/* Open the low-level part of the X.25 channel. Easy! */
+static int x25_asy_open(struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	unsigned long len;
+	int err;
+
+	if (sl->tty == NULL)
+		return -ENODEV;
+
+	/*
+	 * Allocate the X.25 frame buffers:
+	 *
+	 * rbuff	Receive buffer.
+	 * xbuff	Transmit buffer.
+	 */
+
+	len = dev->mtu * 2;
+
+	sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
+	if (sl->rbuff == NULL)
+		goto norbuff;
+	sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
+	if (sl->xbuff == NULL)
+		goto noxbuff;
+
+	sl->buffsize = len;
+	sl->rcount   = 0;
+	sl->xleft    = 0;
+	sl->flags   &= (1 << SLF_INUSE);      /* Clear ESCAPE & ERROR flags */
+
+	netif_start_queue(dev);
+
+	/*
+	 *	Now attach LAPB
+	 */
+	err = lapb_register(dev, &x25_asy_callbacks);
+	if (err == LAPB_OK)
+		return 0;
+
+	/* Cleanup */
+	kfree(sl->xbuff);
+noxbuff:
+	kfree(sl->rbuff);
+norbuff:
+	return -ENOMEM;
+}
+
+
+/* Close the low-level part of the X.25 channel. Easy! */
+static int x25_asy_close(struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+
+	spin_lock(&sl->lock);
+	if (sl->tty)
+		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+
+	netif_stop_queue(dev);
+	sl->rcount = 0;
+	sl->xleft  = 0;
+	spin_unlock(&sl->lock);
+	return 0;
+}
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of X.25 data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing.
+ */
+
+static void x25_asy_receive_buf(struct tty_struct *tty,
+				const unsigned char *cp, char *fp, int count)
+{
+	struct x25_asy *sl = tty->disc_data;
+
+	if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
+		return;
+
+
+	/* Read the characters out of the buffer */
+	while (count--) {
+		if (fp && *fp++) {
+			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+				sl->dev->stats.rx_errors++;
+			cp++;
+			continue;
+		}
+		x25_asy_unesc(sl, *cp++);
+	}
+}
+
+/*
+ * Open the high-level part of the X.25 channel.
+ * This function is called by the TTY module when the
+ * X.25 line discipline is called for.  Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free X.25 channel...
+ */
+
+static int x25_asy_open_tty(struct tty_struct *tty)
+{
+	struct x25_asy *sl = tty->disc_data;
+	int err;
+
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+
+	/* First make sure we're not already connected. */
+	if (sl && sl->magic == X25_ASY_MAGIC)
+		return -EEXIST;
+
+	/* OK.  Find a free X.25 channel to use. */
+	sl = x25_asy_alloc();
+	if (sl == NULL)
+		return -ENFILE;
+
+	sl->tty = tty;
+	tty->disc_data = sl;
+	tty->receive_room = 65536;
+	tty_driver_flush_buffer(tty);
+	tty_ldisc_flush(tty);
+
+	/* Restore default settings */
+	sl->dev->type = ARPHRD_X25;
+
+	/* Perform the low-level X.25 async init */
+	err = x25_asy_open(sl->dev);
+	if (err)
+		return err;
+	/* Done.  We have linked the TTY line to a channel. */
+	return 0;
+}
+
+
+/*
+ * Close down an X.25 channel.
+ * This means flushing out any pending queues, and then restoring the
+ * TTY line discipline to what it was before it got hooked to X.25
+ * (which usually is TTY again).
+ */
+static void x25_asy_close_tty(struct tty_struct *tty)
+{
+	struct x25_asy *sl = tty->disc_data;
+	int err;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != X25_ASY_MAGIC)
+		return;
+
+	rtnl_lock();
+	if (sl->dev->flags & IFF_UP)
+		dev_close(sl->dev);
+	rtnl_unlock();
+
+	err = lapb_unregister(sl->dev);
+	if (err != LAPB_OK)
+		pr_err("x25_asy_close: lapb_unregister error: %d\n",
+		       err);
+
+	tty->disc_data = NULL;
+	sl->tty = NULL;
+	x25_asy_free(sl);
+}
+
+ /************************************************************************
+  *			STANDARD X.25 ENCAPSULATION		  	 *
+  ************************************************************************/
+
+static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
+{
+	unsigned char *ptr = d;
+	unsigned char c;
+
+	/*
+	 * Send an initial END character to flush out any
+	 * data that may have accumulated in the receiver
+	 * due to line noise.
+	 */
+
+	*ptr++ = X25_END;	/* Send 10111110 bit seq */
+
+	/*
+	 * For each byte in the packet, send the appropriate
+	 * character sequence, according to the X.25 protocol.
+	 */
+
+	while (len-- > 0) {
+		switch (c = *s++) {
+		case X25_END:
+			*ptr++ = X25_ESC;
+			*ptr++ = X25_ESCAPE(X25_END);
+			break;
+		case X25_ESC:
+			*ptr++ = X25_ESC;
+			*ptr++ = X25_ESCAPE(X25_ESC);
+			break;
+		default:
+			*ptr++ = c;
+			break;
+		}
+	}
+	*ptr++ = X25_END;
+	return ptr - d;
+}
+
+static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
+{
+
+	switch (s) {
+	case X25_END:
+		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+		    sl->rcount > 2)
+			x25_asy_bump(sl);
+		clear_bit(SLF_ESCAPE, &sl->flags);
+		sl->rcount = 0;
+		return;
+	case X25_ESC:
+		set_bit(SLF_ESCAPE, &sl->flags);
+		return;
+	case X25_ESCAPE(X25_ESC):
+	case X25_ESCAPE(X25_END):
+		if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
+			s = X25_UNESCAPE(s);
+		break;
+	}
+	if (!test_bit(SLF_ERROR, &sl->flags)) {
+		if (sl->rcount < sl->buffsize) {
+			sl->rbuff[sl->rcount++] = s;
+			return;
+		}
+		sl->dev->stats.rx_over_errors++;
+		set_bit(SLF_ERROR, &sl->flags);
+	}
+}
+
+
+/* Perform I/O control on an active X.25 channel. */
+static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
+			 unsigned int cmd,  unsigned long arg)
+{
+	struct x25_asy *sl = tty->disc_data;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != X25_ASY_MAGIC)
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGIFNAME:
+		if (copy_to_user((void __user *)arg, sl->dev->name,
+					strlen(sl->dev->name) + 1))
+			return -EFAULT;
+		return 0;
+	case SIOCSIFHWADDR:
+		return -EINVAL;
+	default:
+		return tty_mode_ioctl(tty, file, cmd, arg);
+	}
+}
+
+#ifdef CONFIG_COMPAT
+static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
+			 unsigned int cmd,  unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCGIFNAME:
+	case SIOCSIFHWADDR:
+		return x25_asy_ioctl(tty, file, cmd,
+				     (unsigned long)compat_ptr(arg));
+	}
+
+	return -ENOIOCTLCMD;
+}
+#endif
+
+static int x25_asy_open_dev(struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+	if (sl->tty == NULL)
+		return -ENODEV;
+	return 0;
+}
+
+static const struct net_device_ops x25_asy_netdev_ops = {
+	.ndo_open	= x25_asy_open_dev,
+	.ndo_stop	= x25_asy_close,
+	.ndo_start_xmit	= x25_asy_xmit,
+	.ndo_tx_timeout	= x25_asy_timeout,
+	.ndo_change_mtu	= x25_asy_change_mtu,
+};
+
+/* Initialise the X.25 driver.  Called by the device init code */
+static void x25_asy_setup(struct net_device *dev)
+{
+	struct x25_asy *sl = netdev_priv(dev);
+
+	sl->magic  = X25_ASY_MAGIC;
+	sl->dev	   = dev;
+	spin_lock_init(&sl->lock);
+	set_bit(SLF_INUSE, &sl->flags);
+
+	/*
+	 *	Finish setting up the DEVICE info.
+	 */
+
+	dev->mtu		= SL_MTU;
+	dev->netdev_ops		= &x25_asy_netdev_ops;
+	dev->watchdog_timeo	= HZ*20;
+	dev->hard_header_len	= 0;
+	dev->addr_len		= 0;
+	dev->type		= ARPHRD_X25;
+	dev->tx_queue_len	= 10;
+
+	/* New-style flags. */
+	dev->flags		= IFF_NOARP;
+}
+
+static struct tty_ldisc_ops x25_ldisc = {
+	.owner		= THIS_MODULE,
+	.magic		= TTY_LDISC_MAGIC,
+	.name		= "X.25",
+	.open		= x25_asy_open_tty,
+	.close		= x25_asy_close_tty,
+	.ioctl		= x25_asy_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= x25_asy_compat_ioctl,
+#endif
+	.receive_buf	= x25_asy_receive_buf,
+	.write_wakeup	= x25_asy_write_wakeup,
+};
+
+static int __init init_x25_asy(void)
+{
+	if (x25_asy_maxdev < 4)
+		x25_asy_maxdev = 4; /* Sanity */
+
+	pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n",
+		x25_asy_maxdev);
+
+	x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
+				GFP_KERNEL);
+	if (!x25_asy_devs)
+		return -ENOMEM;
+
+	return tty_register_ldisc(N_X25, &x25_ldisc);
+}
+
+
+static void __exit exit_x25_asy(void)
+{
+	struct net_device *dev;
+	int i;
+
+	for (i = 0; i < x25_asy_maxdev; i++) {
+		dev = x25_asy_devs[i];
+		if (dev) {
+			struct x25_asy *sl = netdev_priv(dev);
+
+			spin_lock_bh(&sl->lock);
+			if (sl->tty)
+				tty_hangup(sl->tty);
+
+			spin_unlock_bh(&sl->lock);
+			/*
+			 * VSV = if dev->start==0, then device
+			 * unregistered while close proc.
+			 */
+			unregister_netdev(dev);
+			free_netdev(dev);
+		}
+	}
+
+	kfree(x25_asy_devs);
+	tty_unregister_ldisc(N_X25);
+}
+
+module_init(init_x25_asy);
+module_exit(exit_x25_asy);
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.h
new file mode 100644
index 0000000..8f0fc2e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/x25_asy.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_X25_ASY_H
+#define _LINUX_X25_ASY_H
+
+/* X.25 asy configuration. */
+#define SL_NRUNIT	256		/* MAX number of X.25 channels;
+					   This can be overridden with
+					   insmod -ox25_asy_maxdev=nnn	*/
+#define SL_MTU		256	
+
+/* X25 async protocol characters. */
+#define X25_END         0x7E		/* indicates end of frame	*/
+#define X25_ESC         0x7D		/* indicates byte stuffing	*/
+#define X25_ESCAPE(x)	((x)^0x20)
+#define X25_UNESCAPE(x)	((x)^0x20)
+
+
+struct x25_asy {
+  int			magic;
+
+  /* Various fields. */
+  spinlock_t		lock;
+  struct tty_struct	*tty;		/* ptr to TTY structure		*/
+  struct net_device	*dev;		/* easy for intr handling	*/
+
+  /* These are pointers to the malloc()ed frame buffers. */
+  unsigned char		*rbuff;		/* receiver buffer		*/
+  int                   rcount;         /* received chars counter       */
+  unsigned char		*xbuff;		/* transmitter buffer		*/
+  unsigned char         *xhead;         /* pointer to next byte to XMIT */
+  int                   xleft;          /* bytes left in XMIT queue     */
+  int                   buffsize;       /* Max buffers sizes            */
+
+  unsigned long		flags;		/* Flag values/ mode etc	*/
+#define SLF_INUSE	0		/* Channel in use               */
+#define SLF_ESCAPE	1               /* ESC received                 */
+#define SLF_ERROR	2               /* Parity, etc. error           */
+#define SLF_OUTWAIT	4		/* Waiting for output		*/
+};
+
+
+
+#define X25_ASY_MAGIC 0x5303
+
+extern int x25_asy_init(struct net_device *dev);
+
+#endif	/* _LINUX_X25_ASY.H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.c b/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.c
new file mode 100644
index 0000000..0e57690
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.c
@@ -0,0 +1,1795 @@
+/*
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *	(c) Copyright 2000, 2001 Red Hat Inc
+ *
+ *	Development of this driver was funded by Equiinet Ltd
+ *			http://www.equiinet.com
+ *
+ *	ChangeLog:
+ *
+ *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
+ *	unification of all the Z85x30 asynchronous drivers for real.
+ *
+ *	DMA now uses get_free_page as kmalloc buffers may span a 64K 
+ *	boundary.
+ *
+ *	Modified for SMP safety and SMP locking by Alan Cox
+ *					<alan@lxorguk.ukuu.org.uk>
+ *
+ *	Performance
+ *
+ *	Z85230:
+ *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
+ *	X.25 is not unrealistic on all machines. DMA mode can in theory
+ *	handle T1/E1 quite nicely. In practice the limit seems to be about
+ *	512Kbit->1Mbit depending on motherboard.
+ *
+ *	Z85C30:
+ *	64K will take DMA, 9600 baud X.25 should be ok.
+ *
+ *	Z8530:
+ *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#define RT_LOCK
+#define RT_UNLOCK
+#include <linux/spinlock.h>
+
+#include "z85230.h"
+
+
+/**
+ *	z8530_read_port - Architecture specific interface function
+ *	@p: port to read
+ *
+ *	Provided port access methods. The Comtrol SV11 requires no delays
+ *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
+ *	
+ *	In the longer term this should become an architecture specific
+ *	section so that this can become a generic driver interface for all
+ *	platforms. For now we only handle PC I/O ports with or without the
+ *	dread 5uS sanity delay.
+ *
+ *	The caller must hold sufficient locks to avoid violating the horrible
+ *	5uS delay rule.
+ */
+
+static inline int z8530_read_port(unsigned long p)
+{
+	u8 r=inb(Z8530_PORT_OF(p));
+	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
+		udelay(5);
+	return r;
+}
+
+/**
+ *	z8530_write_port - Architecture specific interface function
+ *	@p: port to write
+ *	@d: value to write
+ *
+ *	Write a value to a port with delays if need be. Note that the
+ *	caller must hold locks to avoid read/writes from other contexts
+ *	violating the 5uS rule
+ *
+ *	In the longer term this should become an architecture specific
+ *	section so that this can become a generic driver interface for all
+ *	platforms. For now we only handle PC I/O ports with or without the
+ *	dread 5uS sanity delay.
+ */
+
+
+static inline void z8530_write_port(unsigned long p, u8 d)
+{
+	outb(d,Z8530_PORT_OF(p));
+	if(p&Z8530_PORT_SLEEP)
+		udelay(5);
+}
+
+
+
+static void z8530_rx_done(struct z8530_channel *c);
+static void z8530_tx_done(struct z8530_channel *c);
+
+
+/**
+ *	read_zsreg - Read a register from a Z85230 
+ *	@c: Z8530 channel to read from (2 per chip)
+ *	@reg: Register to read
+ *	FIXME: Use a spinlock.
+ *	
+ *	Most of the Z8530 registers are indexed off the control registers.
+ *	A read is done by writing to the control register and reading the
+ *	register back.  The caller must hold the lock
+ */
+ 
+static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
+{
+	if(reg)
+		z8530_write_port(c->ctrlio, reg);
+	return z8530_read_port(c->ctrlio);
+}
+
+/**
+ *	read_zsdata - Read the data port of a Z8530 channel
+ *	@c: The Z8530 channel to read the data port from
+ *
+ *	The data port provides fast access to some things. We still
+ *	have all the 5uS delays to worry about.
+ */
+
+static inline u8 read_zsdata(struct z8530_channel *c)
+{
+	u8 r;
+	r=z8530_read_port(c->dataio);
+	return r;
+}
+
+/**
+ *	write_zsreg - Write to a Z8530 channel register
+ *	@c: The Z8530 channel
+ *	@reg: Register number
+ *	@val: Value to write
+ *
+ *	Write a value to an indexed register. The caller must hold the lock
+ *	to honour the irritating delay rules. We know about register 0
+ *	being fast to access.
+ *
+ *      Assumes c->lock is held.
+ */
+static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
+{
+	if(reg)
+		z8530_write_port(c->ctrlio, reg);
+	z8530_write_port(c->ctrlio, val);
+
+}
+
+/**
+ *	write_zsctrl - Write to a Z8530 control register
+ *	@c: The Z8530 channel
+ *	@val: Value to write
+ *
+ *	Write directly to the control register on the Z8530
+ */
+
+static inline void write_zsctrl(struct z8530_channel *c, u8 val)
+{
+	z8530_write_port(c->ctrlio, val);
+}
+
+/**
+ *	write_zsdata - Write to a Z8530 control register
+ *	@c: The Z8530 channel
+ *	@val: Value to write
+ *
+ *	Write directly to the data register on the Z8530
+ */
+
+
+static inline void write_zsdata(struct z8530_channel *c, u8 val)
+{
+	z8530_write_port(c->dataio, val);
+}
+
+/*
+ *	Register loading parameters for a dead port
+ */
+ 
+u8 z8530_dead_port[]=
+{
+	255
+};
+
+EXPORT_SYMBOL(z8530_dead_port);
+
+/*
+ *	Register loading parameters for currently supported circuit types
+ */
+
+
+/*
+ *	Data clocked by telco end. This is the correct data for the UK
+ *	"kilostream" service, and most other similar services.
+ */
+ 
+u8 z8530_hdlc_kilostream[]=
+{
+	4,	SYNC_ENAB|SDLC|X1CLK,
+	2,	0,	/* No vector */
+	1,	0,
+	3,	ENT_HM|RxCRC_ENAB|Rx8,
+	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+	9,	0,		/* Disable interrupts */
+	6,	0xFF,
+	7,	FLAG,
+	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
+	11,	TCTRxCP,
+	14,	DISDPLL,
+	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
+	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
+	9,	NV|MIE|NORESET,
+	255
+};
+
+EXPORT_SYMBOL(z8530_hdlc_kilostream);
+
+/*
+ *	As above but for enhanced chips.
+ */
+ 
+u8 z8530_hdlc_kilostream_85230[]=
+{
+	4,	SYNC_ENAB|SDLC|X1CLK,
+	2,	0,	/* No vector */
+	1,	0,
+	3,	ENT_HM|RxCRC_ENAB|Rx8,
+	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+	9,	0,		/* Disable interrupts */
+	6,	0xFF,
+	7,	FLAG,
+	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
+	11,	TCTRxCP,
+	14,	DISDPLL,
+	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
+	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
+	9,	NV|MIE|NORESET,
+	23,	3,		/* Extended mode AUTO TX and EOM*/
+	
+	255
+};
+
+EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
+
+/**
+ *	z8530_flush_fifo - Flush on chip RX FIFO
+ *	@c: Channel to flush
+ *
+ *	Flush the receive FIFO. There is no specific option for this, we 
+ *	blindly read bytes and discard them. Reading when there is no data
+ *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
+ *	
+ *	All locking is handled for the caller. On return data may still be
+ *	present if it arrived during the flush.
+ */
+ 
+static void z8530_flush_fifo(struct z8530_channel *c)
+{
+	read_zsreg(c, R1);
+	read_zsreg(c, R1);
+	read_zsreg(c, R1);
+	read_zsreg(c, R1);
+	if(c->dev->type==Z85230)
+	{
+		read_zsreg(c, R1);
+		read_zsreg(c, R1);
+		read_zsreg(c, R1);
+		read_zsreg(c, R1);
+	}
+}	
+
+/**
+ *	z8530_rtsdtr - Control the outgoing DTS/RTS line
+ *	@c: The Z8530 channel to control;
+ *	@set: 1 to set, 0 to clear
+ *
+ *	Sets or clears DTR/RTS on the requested line. All locking is handled
+ *	by the caller. For now we assume all boards use the actual RTS/DTR
+ *	on the chip. Apparently one or two don't. We'll scream about them
+ *	later.
+ */
+
+static void z8530_rtsdtr(struct z8530_channel *c, int set)
+{
+	if (set)
+		c->regs[5] |= (RTS | DTR);
+	else
+		c->regs[5] &= ~(RTS | DTR);
+	write_zsreg(c, R5, c->regs[5]);
+}
+
+/**
+ *	z8530_rx - Handle a PIO receive event
+ *	@c: Z8530 channel to process
+ *
+ *	Receive handler for receiving in PIO mode. This is much like the 
+ *	async one but not quite the same or as complex
+ *
+ *	Note: Its intended that this handler can easily be separated from
+ *	the main code to run realtime. That'll be needed for some machines
+ *	(eg to ever clock 64kbits on a sparc ;)).
+ *
+ *	The RT_LOCK macros don't do anything now. Keep the code covered
+ *	by them as short as possible in all circumstances - clocks cost
+ *	baud. The interrupt handler is assumed to be atomic w.r.t. to
+ *	other code - this is true in the RT case too.
+ *
+ *	We only cover the sync cases for this. If you want 2Mbit async
+ *	do it yourself but consider medical assistance first. This non DMA 
+ *	synchronous mode is portable code. The DMA mode assumes PCI like 
+ *	ISA DMA
+ *
+ *	Called with the device lock held
+ */
+ 
+static void z8530_rx(struct z8530_channel *c)
+{
+	u8 ch,stat;
+
+	while(1)
+	{
+		/* FIFO empty ? */
+		if(!(read_zsreg(c, R0)&1))
+			break;
+		ch=read_zsdata(c);
+		stat=read_zsreg(c, R1);
+	
+		/*
+		 *	Overrun ?
+		 */
+		if(c->count < c->max)
+		{
+			*c->dptr++=ch;
+			c->count++;
+		}
+
+		if(stat&END_FR)
+		{
+		
+			/*
+			 *	Error ?
+			 */
+			if(stat&(Rx_OVR|CRC_ERR))
+			{
+				/* Rewind the buffer and return */
+				if(c->skb)
+					c->dptr=c->skb->data;
+				c->count=0;
+				if(stat&Rx_OVR)
+				{
+					pr_warn("%s: overrun\n", c->dev->name);
+					c->rx_overrun++;
+				}
+				if(stat&CRC_ERR)
+				{
+					c->rx_crc_err++;
+					/* printk("crc error\n"); */
+				}
+				/* Shove the frame upstream */
+			}
+			else
+			{
+				/*
+				 *	Drop the lock for RX processing, or
+		 		 *	there are deadlocks
+		 		 */
+				z8530_rx_done(c);
+				write_zsctrl(c, RES_Rx_CRC);
+			}
+		}
+	}
+	/*
+	 *	Clear irq
+	 */
+	write_zsctrl(c, ERR_RES);
+	write_zsctrl(c, RES_H_IUS);
+}
+
+
+/**
+ *	z8530_tx - Handle a PIO transmit event
+ *	@c: Z8530 channel to process
+ *
+ *	Z8530 transmit interrupt handler for the PIO mode. The basic
+ *	idea is to attempt to keep the FIFO fed. We fill as many bytes
+ *	in as possible, its quite possible that we won't keep up with the
+ *	data rate otherwise.
+ */
+ 
+static void z8530_tx(struct z8530_channel *c)
+{
+	while(c->txcount) {
+		/* FIFO full ? */
+		if(!(read_zsreg(c, R0)&4))
+			return;
+		c->txcount--;
+		/*
+		 *	Shovel out the byte
+		 */
+		write_zsreg(c, R8, *c->tx_ptr++);
+		write_zsctrl(c, RES_H_IUS);
+		/* We are about to underflow */
+		if(c->txcount==0)
+		{
+			write_zsctrl(c, RES_EOM_L);
+			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+		}
+	}
+
+	
+	/*
+	 *	End of frame TX - fire another one
+	 */
+	 
+	write_zsctrl(c, RES_Tx_P);
+
+	z8530_tx_done(c);	 
+	write_zsctrl(c, RES_H_IUS);
+}
+
+/**
+ *	z8530_status - Handle a PIO status exception
+ *	@chan: Z8530 channel to process
+ *
+ *	A status event occurred in PIO synchronous mode. There are several
+ *	reasons the chip will bother us here. A transmit underrun means we
+ *	failed to feed the chip fast enough and just broke a packet. A DCD
+ *	change is a line up or down.
+ */
+
+static void z8530_status(struct z8530_channel *chan)
+{
+	u8 status, altered;
+
+	status = read_zsreg(chan, R0);
+	altered = chan->status ^ status;
+
+	chan->status = status;
+
+	if (status & TxEOM) {
+/*		printk("%s: Tx underrun.\n", chan->dev->name); */
+		chan->netdevice->stats.tx_fifo_errors++;
+		write_zsctrl(chan, ERR_RES);
+		z8530_tx_done(chan);
+	}
+
+	if (altered & chan->dcdcheck)
+	{
+		if (status & chan->dcdcheck) {
+			pr_info("%s: DCD raised\n", chan->dev->name);
+			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
+			if (chan->netdevice)
+				netif_carrier_on(chan->netdevice);
+		} else {
+			pr_info("%s: DCD lost\n", chan->dev->name);
+			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
+			z8530_flush_fifo(chan);
+			if (chan->netdevice)
+				netif_carrier_off(chan->netdevice);
+		}
+
+	}
+	write_zsctrl(chan, RES_EXT_INT);
+	write_zsctrl(chan, RES_H_IUS);
+}
+
+struct z8530_irqhandler z8530_sync =
+{
+	z8530_rx,
+	z8530_tx,
+	z8530_status
+};
+
+EXPORT_SYMBOL(z8530_sync);
+
+/**
+ *	z8530_dma_rx - Handle a DMA RX event
+ *	@chan: Channel to handle
+ *
+ *	Non bus mastering DMA interfaces for the Z8x30 devices. This
+ *	is really pretty PC specific. The DMA mode means that most receive
+ *	events are handled by the DMA hardware. We get a kick here only if
+ *	a frame ended.
+ */
+ 
+static void z8530_dma_rx(struct z8530_channel *chan)
+{
+	if(chan->rxdma_on)
+	{
+		/* Special condition check only */
+		u8 status;
+	
+		read_zsreg(chan, R7);
+		read_zsreg(chan, R6);
+		
+		status=read_zsreg(chan, R1);
+	
+		if(status&END_FR)
+		{
+			z8530_rx_done(chan);	/* Fire up the next one */
+		}		
+		write_zsctrl(chan, ERR_RES);
+		write_zsctrl(chan, RES_H_IUS);
+	}
+	else
+	{
+		/* DMA is off right now, drain the slow way */
+		z8530_rx(chan);
+	}	
+}
+
+/**
+ *	z8530_dma_tx - Handle a DMA TX event
+ *	@chan:	The Z8530 channel to handle
+ *
+ *	We have received an interrupt while doing DMA transmissions. It
+ *	shouldn't happen. Scream loudly if it does.
+ */
+ 
+static void z8530_dma_tx(struct z8530_channel *chan)
+{
+	if(!chan->dma_tx)
+	{
+		pr_warn("Hey who turned the DMA off?\n");
+		z8530_tx(chan);
+		return;
+	}
+	/* This shouldn't occur in DMA mode */
+	pr_err("DMA tx - bogus event!\n");
+	z8530_tx(chan);
+}
+
+/**
+ *	z8530_dma_status - Handle a DMA status exception
+ *	@chan: Z8530 channel to process
+ *	
+ *	A status event occurred on the Z8530. We receive these for two reasons
+ *	when in DMA mode. Firstly if we finished a packet transfer we get one
+ *	and kick the next packet out. Secondly we may see a DCD change.
+ *
+ */
+ 
+static void z8530_dma_status(struct z8530_channel *chan)
+{
+	u8 status, altered;
+
+	status=read_zsreg(chan, R0);
+	altered=chan->status^status;
+	
+	chan->status=status;
+
+
+	if(chan->dma_tx)
+	{
+		if(status&TxEOM)
+		{
+			unsigned long flags;
+	
+			flags=claim_dma_lock();
+			disable_dma(chan->txdma);
+			clear_dma_ff(chan->txdma);	
+			chan->txdma_on=0;
+			release_dma_lock(flags);
+			z8530_tx_done(chan);
+		}
+	}
+
+	if (altered & chan->dcdcheck)
+	{
+		if (status & chan->dcdcheck) {
+			pr_info("%s: DCD raised\n", chan->dev->name);
+			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
+			if (chan->netdevice)
+				netif_carrier_on(chan->netdevice);
+		} else {
+			pr_info("%s: DCD lost\n", chan->dev->name);
+			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
+			z8530_flush_fifo(chan);
+			if (chan->netdevice)
+				netif_carrier_off(chan->netdevice);
+		}
+	}
+
+	write_zsctrl(chan, RES_EXT_INT);
+	write_zsctrl(chan, RES_H_IUS);
+}
+
+static struct z8530_irqhandler z8530_dma_sync = {
+	z8530_dma_rx,
+	z8530_dma_tx,
+	z8530_dma_status
+};
+
+static struct z8530_irqhandler z8530_txdma_sync = {
+	z8530_rx,
+	z8530_dma_tx,
+	z8530_dma_status
+};
+
+/**
+ *	z8530_rx_clear - Handle RX events from a stopped chip
+ *	@c: Z8530 channel to shut up
+ *
+ *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
+ *	For machines with PCI Z85x30 cards, or level triggered interrupts
+ *	(eg the MacII) we must clear the interrupt cause or die.
+ */
+
+
+static void z8530_rx_clear(struct z8530_channel *c)
+{
+	/*
+	 *	Data and status bytes
+	 */
+	u8 stat;
+
+	read_zsdata(c);
+	stat=read_zsreg(c, R1);
+	
+	if(stat&END_FR)
+		write_zsctrl(c, RES_Rx_CRC);
+	/*
+	 *	Clear irq
+	 */
+	write_zsctrl(c, ERR_RES);
+	write_zsctrl(c, RES_H_IUS);
+}
+
+/**
+ *	z8530_tx_clear - Handle TX events from a stopped chip
+ *	@c: Z8530 channel to shut up
+ *
+ *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
+ *	For machines with PCI Z85x30 cards, or level triggered interrupts
+ *	(eg the MacII) we must clear the interrupt cause or die.
+ */
+
+static void z8530_tx_clear(struct z8530_channel *c)
+{
+	write_zsctrl(c, RES_Tx_P);
+	write_zsctrl(c, RES_H_IUS);
+}
+
+/**
+ *	z8530_status_clear - Handle status events from a stopped chip
+ *	@chan: Z8530 channel to shut up
+ *
+ *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
+ *	For machines with PCI Z85x30 cards, or level triggered interrupts
+ *	(eg the MacII) we must clear the interrupt cause or die.
+ */
+
+static void z8530_status_clear(struct z8530_channel *chan)
+{
+	u8 status=read_zsreg(chan, R0);
+	if(status&TxEOM)
+		write_zsctrl(chan, ERR_RES);
+	write_zsctrl(chan, RES_EXT_INT);
+	write_zsctrl(chan, RES_H_IUS);
+}
+
+struct z8530_irqhandler z8530_nop=
+{
+	z8530_rx_clear,
+	z8530_tx_clear,
+	z8530_status_clear
+};
+
+
+EXPORT_SYMBOL(z8530_nop);
+
+/**
+ *	z8530_interrupt - Handle an interrupt from a Z8530
+ *	@irq: 	Interrupt number
+ *	@dev_id: The Z8530 device that is interrupting.
+ *
+ *	A Z85[2]30 device has stuck its hand in the air for attention.
+ *	We scan both the channels on the chip for events and then call
+ *	the channel specific call backs for each channel that has events.
+ *	We have to use callback functions because the two channels can be
+ *	in different modes.
+ *
+ *	Locking is done for the handlers. Note that locking is done
+ *	at the chip level (the 5uS delay issue is per chip not per
+ *	channel). c->lock for both channels points to dev->lock
+ */
+
+irqreturn_t z8530_interrupt(int irq, void *dev_id)
+{
+	struct z8530_dev *dev=dev_id;
+	u8 uninitialized_var(intr);
+	static volatile int locker=0;
+	int work=0;
+	struct z8530_irqhandler *irqs;
+	
+	if(locker)
+	{
+		pr_err("IRQ re-enter\n");
+		return IRQ_NONE;
+	}
+	locker=1;
+
+	spin_lock(&dev->lock);
+
+	while(++work<5000)
+	{
+
+		intr = read_zsreg(&dev->chanA, R3);
+		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
+			break;
+	
+		/* This holds the IRQ status. On the 8530 you must read it from chan 
+		   A even though it applies to the whole chip */
+		
+		/* Now walk the chip and see what it is wanting - it may be
+		   an IRQ for someone else remember */
+		   
+		irqs=dev->chanA.irqs;
+
+		if(intr & (CHARxIP|CHATxIP|CHAEXT))
+		{
+			if(intr&CHARxIP)
+				irqs->rx(&dev->chanA);
+			if(intr&CHATxIP)
+				irqs->tx(&dev->chanA);
+			if(intr&CHAEXT)
+				irqs->status(&dev->chanA);
+		}
+
+		irqs=dev->chanB.irqs;
+
+		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
+		{
+			if(intr&CHBRxIP)
+				irqs->rx(&dev->chanB);
+			if(intr&CHBTxIP)
+				irqs->tx(&dev->chanB);
+			if(intr&CHBEXT)
+				irqs->status(&dev->chanB);
+		}
+	}
+	spin_unlock(&dev->lock);
+	if(work==5000)
+		pr_err("%s: interrupt jammed - abort(0x%X)!\n",
+		       dev->name, intr);
+	/* Ok all done */
+	locker=0;
+	return IRQ_HANDLED;
+}
+
+EXPORT_SYMBOL(z8530_interrupt);
+
+static const u8 reg_init[16]=
+{
+	0,0,0,0,
+	0,0,0,0,
+	0,0,0,0,
+	0x55,0,0,0
+};
+
+
+/**
+ *	z8530_sync_open - Open a Z8530 channel for PIO
+ *	@dev:	The network interface we are using
+ *	@c:	The Z8530 channel to open in synchronous PIO mode
+ *
+ *	Switch a Z8530 into synchronous mode without DMA assist. We
+ *	raise the RTS/DTR and commence network operation.
+ */
+ 
+int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(c->lock, flags);
+
+	c->sync = 1;
+	c->mtu = dev->mtu+64;
+	c->count = 0;
+	c->skb = NULL;
+	c->skb2 = NULL;
+	c->irqs = &z8530_sync;
+
+	/* This loads the double buffer up */
+	z8530_rx_done(c);	/* Load the frame ring */
+	z8530_rx_done(c);	/* Load the backup frame */
+	z8530_rtsdtr(c,1);
+	c->dma_tx = 0;
+	c->regs[R1]|=TxINT_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);
+	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+	spin_unlock_irqrestore(c->lock, flags);
+	return 0;
+}
+
+
+EXPORT_SYMBOL(z8530_sync_open);
+
+/**
+ *	z8530_sync_close - Close a PIO Z8530 channel
+ *	@dev: Network device to close
+ *	@c: Z8530 channel to disassociate and move to idle
+ *
+ *	Close down a Z8530 interface and switch its interrupt handlers
+ *	to discard future events.
+ */
+ 
+int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
+{
+	u8 chk;
+	unsigned long flags;
+	
+	spin_lock_irqsave(c->lock, flags);
+	c->irqs = &z8530_nop;
+	c->max = 0;
+	c->sync = 0;
+	
+	chk=read_zsreg(c,R0);
+	write_zsreg(c, R3, c->regs[R3]);
+	z8530_rtsdtr(c,0);
+
+	spin_unlock_irqrestore(c->lock, flags);
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_close);
+
+/**
+ *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
+ *	@dev: The network device to attach
+ *	@c: The Z8530 channel to configure in sync DMA mode.
+ *
+ *	Set up a Z85x30 device for synchronous DMA in both directions. Two
+ *	ISA DMA channels must be available for this to work. We assume ISA
+ *	DMA driven I/O and PC limits on access.
+ */
+ 
+int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
+{
+	unsigned long cflags, dflags;
+	
+	c->sync = 1;
+	c->mtu = dev->mtu+64;
+	c->count = 0;
+	c->skb = NULL;
+	c->skb2 = NULL;
+	/*
+	 *	Load the DMA interfaces up
+	 */
+	c->rxdma_on = 0;
+	c->txdma_on = 0;
+	
+	/*
+	 *	Allocate the DMA flip buffers. Limit by page size.
+	 *	Everyone runs 1500 mtu or less on wan links so this
+	 *	should be fine.
+	 */
+	 
+	if(c->mtu  > PAGE_SIZE/2)
+		return -EMSGSIZE;
+	 
+	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+	if(c->rx_buf[0]==NULL)
+		return -ENOBUFS;
+	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
+	
+	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+	if(c->tx_dma_buf[0]==NULL)
+	{
+		free_page((unsigned long)c->rx_buf[0]);
+		c->rx_buf[0]=NULL;
+		return -ENOBUFS;
+	}
+	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
+
+	c->tx_dma_used=0;
+	c->dma_tx = 1;
+	c->dma_num=0;
+	c->dma_ready=1;
+	
+	/*
+	 *	Enable DMA control mode
+	 */
+
+	spin_lock_irqsave(c->lock, cflags);
+	 
+	/*
+	 *	TX DMA via DIR/REQ
+	 */
+	 
+	c->regs[R14]|= DTRREQ;
+	write_zsreg(c, R14, c->regs[R14]);     
+
+	c->regs[R1]&= ~TxINT_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);
+	
+	/*
+	 *	RX DMA via W/Req
+	 */	 
+
+	c->regs[R1]|= WT_FN_RDYFN;
+	c->regs[R1]|= WT_RDY_RT;
+	c->regs[R1]|= INT_ERR_Rx;
+	c->regs[R1]&= ~TxINT_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);
+	c->regs[R1]|= WT_RDY_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);            
+	
+	/*
+	 *	DMA interrupts
+	 */
+	 
+	/*
+	 *	Set up the DMA configuration
+	 */	
+	 
+	dflags=claim_dma_lock();
+	 
+	disable_dma(c->rxdma);
+	clear_dma_ff(c->rxdma);
+	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
+	set_dma_count(c->rxdma, c->mtu);
+	enable_dma(c->rxdma);
+
+	disable_dma(c->txdma);
+	clear_dma_ff(c->txdma);
+	set_dma_mode(c->txdma, DMA_MODE_WRITE);
+	disable_dma(c->txdma);
+	
+	release_dma_lock(dflags);
+	
+	/*
+	 *	Select the DMA interrupt handlers
+	 */
+
+	c->rxdma_on = 1;
+	c->txdma_on = 1;
+	c->tx_dma_used = 1;
+	 
+	c->irqs = &z8530_dma_sync;
+	z8530_rtsdtr(c,1);
+	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+	spin_unlock_irqrestore(c->lock, cflags);
+	
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_dma_open);
+
+/**
+ *	z8530_sync_dma_close - Close down DMA I/O
+ *	@dev: Network device to detach
+ *	@c: Z8530 channel to move into discard mode
+ *
+ *	Shut down a DMA mode synchronous interface. Halt the DMA, and
+ *	free the buffers.
+ */
+ 
+int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
+{
+	u8 chk;
+	unsigned long flags;
+	
+	c->irqs = &z8530_nop;
+	c->max = 0;
+	c->sync = 0;
+	
+	/*
+	 *	Disable the PC DMA channels
+	 */
+	
+	flags=claim_dma_lock(); 
+	disable_dma(c->rxdma);
+	clear_dma_ff(c->rxdma);
+	
+	c->rxdma_on = 0;
+	
+	disable_dma(c->txdma);
+	clear_dma_ff(c->txdma);
+	release_dma_lock(flags);
+	
+	c->txdma_on = 0;
+	c->tx_dma_used = 0;
+
+	spin_lock_irqsave(c->lock, flags);
+
+	/*
+	 *	Disable DMA control mode
+	 */
+	 
+	c->regs[R1]&= ~WT_RDY_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);            
+	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
+	c->regs[R1]|= INT_ALL_Rx;
+	write_zsreg(c, R1, c->regs[R1]);
+	c->regs[R14]&= ~DTRREQ;
+	write_zsreg(c, R14, c->regs[R14]);   
+	
+	if(c->rx_buf[0])
+	{
+		free_page((unsigned long)c->rx_buf[0]);
+		c->rx_buf[0]=NULL;
+	}
+	if(c->tx_dma_buf[0])
+	{
+		free_page((unsigned  long)c->tx_dma_buf[0]);
+		c->tx_dma_buf[0]=NULL;
+	}
+	chk=read_zsreg(c,R0);
+	write_zsreg(c, R3, c->regs[R3]);
+	z8530_rtsdtr(c,0);
+
+	spin_unlock_irqrestore(c->lock, flags);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_dma_close);
+
+/**
+ *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
+ *	@dev: The network device to attach
+ *	@c: The Z8530 channel to configure in sync DMA mode.
+ *
+ *	Set up a Z85x30 device for synchronous DMA tranmission. One
+ *	ISA DMA channel must be available for this to work. The receive
+ *	side is run in PIO mode, but then it has the bigger FIFO.
+ */
+
+int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
+{
+	unsigned long cflags, dflags;
+
+	printk("Opening sync interface for TX-DMA\n");
+	c->sync = 1;
+	c->mtu = dev->mtu+64;
+	c->count = 0;
+	c->skb = NULL;
+	c->skb2 = NULL;
+	
+	/*
+	 *	Allocate the DMA flip buffers. Limit by page size.
+	 *	Everyone runs 1500 mtu or less on wan links so this
+	 *	should be fine.
+	 */
+	 
+	if(c->mtu  > PAGE_SIZE/2)
+		return -EMSGSIZE;
+	 
+	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+	if(c->tx_dma_buf[0]==NULL)
+		return -ENOBUFS;
+
+	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
+
+
+	spin_lock_irqsave(c->lock, cflags);
+
+	/*
+	 *	Load the PIO receive ring
+	 */
+
+	z8530_rx_done(c);
+	z8530_rx_done(c);
+
+ 	/*
+	 *	Load the DMA interfaces up
+	 */
+
+	c->rxdma_on = 0;
+	c->txdma_on = 0;
+	
+	c->tx_dma_used=0;
+	c->dma_num=0;
+	c->dma_ready=1;
+	c->dma_tx = 1;
+
+ 	/*
+	 *	Enable DMA control mode
+	 */
+
+ 	/*
+	 *	TX DMA via DIR/REQ
+ 	 */
+	c->regs[R14]|= DTRREQ;
+	write_zsreg(c, R14, c->regs[R14]);     
+	
+	c->regs[R1]&= ~TxINT_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);
+	
+	/*
+	 *	Set up the DMA configuration
+	 */	
+	 
+	dflags = claim_dma_lock();
+
+	disable_dma(c->txdma);
+	clear_dma_ff(c->txdma);
+	set_dma_mode(c->txdma, DMA_MODE_WRITE);
+	disable_dma(c->txdma);
+
+	release_dma_lock(dflags);
+	
+	/*
+	 *	Select the DMA interrupt handlers
+	 */
+
+	c->rxdma_on = 0;
+	c->txdma_on = 1;
+	c->tx_dma_used = 1;
+	 
+	c->irqs = &z8530_txdma_sync;
+	z8530_rtsdtr(c,1);
+	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+	spin_unlock_irqrestore(c->lock, cflags);
+	
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_sync_txdma_open);
+
+/**
+ *	z8530_sync_txdma_close - Close down a TX driven DMA channel
+ *	@dev: Network device to detach
+ *	@c: Z8530 channel to move into discard mode
+ *
+ *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, 
+ *	and  free the buffers.
+ */
+
+int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
+{
+	unsigned long dflags, cflags;
+	u8 chk;
+
+	
+	spin_lock_irqsave(c->lock, cflags);
+	
+	c->irqs = &z8530_nop;
+	c->max = 0;
+	c->sync = 0;
+	
+	/*
+	 *	Disable the PC DMA channels
+	 */
+	 
+	dflags = claim_dma_lock();
+
+	disable_dma(c->txdma);
+	clear_dma_ff(c->txdma);
+	c->txdma_on = 0;
+	c->tx_dma_used = 0;
+
+	release_dma_lock(dflags);
+
+	/*
+	 *	Disable DMA control mode
+	 */
+	 
+	c->regs[R1]&= ~WT_RDY_ENAB;
+	write_zsreg(c, R1, c->regs[R1]);            
+	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
+	c->regs[R1]|= INT_ALL_Rx;
+	write_zsreg(c, R1, c->regs[R1]);
+	c->regs[R14]&= ~DTRREQ;
+	write_zsreg(c, R14, c->regs[R14]);   
+	
+	if(c->tx_dma_buf[0])
+	{
+		free_page((unsigned long)c->tx_dma_buf[0]);
+		c->tx_dma_buf[0]=NULL;
+	}
+	chk=read_zsreg(c,R0);
+	write_zsreg(c, R3, c->regs[R3]);
+	z8530_rtsdtr(c,0);
+
+	spin_unlock_irqrestore(c->lock, cflags);
+	return 0;
+}
+
+
+EXPORT_SYMBOL(z8530_sync_txdma_close);
+
+
+/*
+ *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
+ *	it exists...
+ */
+ 
+static const char *z8530_type_name[]={
+	"Z8530",
+	"Z85C30",
+	"Z85230"
+};
+
+/**
+ *	z8530_describe - Uniformly describe a Z8530 port
+ *	@dev: Z8530 device to describe
+ *	@mapping: string holding mapping type (eg "I/O" or "Mem")
+ *	@io: the port value in question
+ *
+ *	Describe a Z8530 in a standard format. We must pass the I/O as
+ *	the port offset isn't predictable. The main reason for this function
+ *	is to try and get a common format of report.
+ */
+
+void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
+{
+	pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
+		dev->name, 
+		z8530_type_name[dev->type],
+		mapping,
+		Z8530_PORT_OF(io),
+		dev->irq);
+}
+
+EXPORT_SYMBOL(z8530_describe);
+
+/*
+ *	Locked operation part of the z8530 init code
+ */
+ 
+static inline int do_z8530_init(struct z8530_dev *dev)
+{
+	/* NOP the interrupt handlers first - we might get a
+	   floating IRQ transition when we reset the chip */
+	dev->chanA.irqs=&z8530_nop;
+	dev->chanB.irqs=&z8530_nop;
+	dev->chanA.dcdcheck=DCD;
+	dev->chanB.dcdcheck=DCD;
+
+	/* Reset the chip */
+	write_zsreg(&dev->chanA, R9, 0xC0);
+	udelay(200);
+	/* Now check its valid */
+	write_zsreg(&dev->chanA, R12, 0xAA);
+	if(read_zsreg(&dev->chanA, R12)!=0xAA)
+		return -ENODEV;
+	write_zsreg(&dev->chanA, R12, 0x55);
+	if(read_zsreg(&dev->chanA, R12)!=0x55)
+		return -ENODEV;
+		
+	dev->type=Z8530;
+	
+	/*
+	 *	See the application note.
+	 */
+	 
+	write_zsreg(&dev->chanA, R15, 0x01);
+	
+	/*
+	 *	If we can set the low bit of R15 then
+	 *	the chip is enhanced.
+	 */
+	 
+	if(read_zsreg(&dev->chanA, R15)==0x01)
+	{
+		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
+		/* Put a char in the fifo */
+		write_zsreg(&dev->chanA, R8, 0);
+		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
+			dev->type = Z85230;	/* Has a FIFO */
+		else
+			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
+	}
+		
+	/*
+	 *	The code assumes R7' and friends are
+	 *	off. Use write_zsext() for these and keep
+	 *	this bit clear.
+	 */
+	 
+	write_zsreg(&dev->chanA, R15, 0);
+		
+	/*
+	 *	At this point it looks like the chip is behaving
+	 */
+	 
+	memcpy(dev->chanA.regs, reg_init, 16);
+	memcpy(dev->chanB.regs, reg_init ,16);
+	
+	return 0;
+}
+
+/**
+ *	z8530_init - Initialise a Z8530 device
+ *	@dev: Z8530 device to initialise.
+ *
+ *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
+ *	is present, identify the type and then program it to hopefully
+ *	keep quite and behave. This matters a lot, a Z8530 in the wrong
+ *	state will sometimes get into stupid modes generating 10Khz
+ *	interrupt streams and the like.
+ *
+ *	We set the interrupt handler up to discard any events, in case
+ *	we get them during reset or setp.
+ *
+ *	Return 0 for success, or a negative value indicating the problem
+ *	in errno form.
+ */
+
+int z8530_init(struct z8530_dev *dev)
+{
+	unsigned long flags;
+	int ret;
+
+	/* Set up the chip level lock */
+	spin_lock_init(&dev->lock);
+	dev->chanA.lock = &dev->lock;
+	dev->chanB.lock = &dev->lock;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = do_z8530_init(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return ret;
+}
+
+
+EXPORT_SYMBOL(z8530_init);
+
+/**
+ *	z8530_shutdown - Shutdown a Z8530 device
+ *	@dev: The Z8530 chip to shutdown
+ *
+ *	We set the interrupt handlers to silence any interrupts. We then 
+ *	reset the chip and wait 100uS to be sure the reset completed. Just
+ *	in case the caller then tries to do stuff.
+ *
+ *	This is called without the lock held
+ */
+ 
+int z8530_shutdown(struct z8530_dev *dev)
+{
+	unsigned long flags;
+	/* Reset the chip */
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->chanA.irqs=&z8530_nop;
+	dev->chanB.irqs=&z8530_nop;
+	write_zsreg(&dev->chanA, R9, 0xC0);
+	/* We must lock the udelay, the chip is offlimits here */
+	udelay(100);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_shutdown);
+
+/**
+ *	z8530_channel_load - Load channel data
+ *	@c: Z8530 channel to configure
+ *	@rtable: table of register, value pairs
+ *	FIXME: ioctl to allow user uploaded tables
+ *
+ *	Load a Z8530 channel up from the system data. We use +16 to 
+ *	indicate the "prime" registers. The value 255 terminates the
+ *	table.
+ */
+
+int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(c->lock, flags);
+
+	while(*rtable!=255)
+	{
+		int reg=*rtable++;
+		if(reg>0x0F)
+			write_zsreg(c, R15, c->regs[15]|1);
+		write_zsreg(c, reg&0x0F, *rtable);
+		if(reg>0x0F)
+			write_zsreg(c, R15, c->regs[15]&~1);
+		c->regs[reg]=*rtable++;
+	}
+	c->rx_function=z8530_null_rx;
+	c->skb=NULL;
+	c->tx_skb=NULL;
+	c->tx_next_skb=NULL;
+	c->mtu=1500;
+	c->max=0;
+	c->count=0;
+	c->status=read_zsreg(c, R0);
+	c->sync=1;
+	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+
+	spin_unlock_irqrestore(c->lock, flags);
+	return 0;
+}
+
+EXPORT_SYMBOL(z8530_channel_load);
+
+
+/**
+ *	z8530_tx_begin - Begin packet transmission
+ *	@c: The Z8530 channel to kick
+ *
+ *	This is the speed sensitive side of transmission. If we are called
+ *	and no buffer is being transmitted we commence the next buffer. If
+ *	nothing is queued we idle the sync. 
+ *
+ *	Note: We are handling this code path in the interrupt path, keep it
+ *	fast or bad things will happen.
+ *
+ *	Called with the lock held.
+ */
+
+static void z8530_tx_begin(struct z8530_channel *c)
+{
+	unsigned long flags;
+	if(c->tx_skb)
+		return;
+		
+	c->tx_skb=c->tx_next_skb;
+	c->tx_next_skb=NULL;
+	c->tx_ptr=c->tx_next_ptr;
+	
+	if(c->tx_skb==NULL)
+	{
+		/* Idle on */
+		if(c->dma_tx)
+		{
+			flags=claim_dma_lock();
+			disable_dma(c->txdma);
+			/*
+			 *	Check if we crapped out.
+			 */
+			if (get_dma_residue(c->txdma))
+			{
+				c->netdevice->stats.tx_dropped++;
+				c->netdevice->stats.tx_fifo_errors++;
+			}
+			release_dma_lock(flags);
+		}
+		c->txcount=0;
+	}
+	else
+	{
+		c->txcount=c->tx_skb->len;
+		
+		
+		if(c->dma_tx)
+		{
+			/*
+			 *	FIXME. DMA is broken for the original 8530,
+			 *	on the older parts we need to set a flag and
+			 *	wait for a further TX interrupt to fire this
+			 *	stage off	
+			 */
+			 
+			flags=claim_dma_lock();
+			disable_dma(c->txdma);
+
+			/*
+			 *	These two are needed by the 8530/85C30
+			 *	and must be issued when idling.
+			 */
+			 
+			if(c->dev->type!=Z85230)
+			{
+				write_zsctrl(c, RES_Tx_CRC);
+				write_zsctrl(c, RES_EOM_L);
+			}	
+			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+			clear_dma_ff(c->txdma);
+			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
+			set_dma_count(c->txdma, c->txcount);
+			enable_dma(c->txdma);
+			release_dma_lock(flags);
+			write_zsctrl(c, RES_EOM_L);
+			write_zsreg(c, R5, c->regs[R5]|TxENAB);
+		}
+		else
+		{
+
+			/* ABUNDER off */
+			write_zsreg(c, R10, c->regs[10]);
+			write_zsctrl(c, RES_Tx_CRC);
+	
+			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
+			{		
+				write_zsreg(c, R8, *c->tx_ptr++);
+				c->txcount--;
+			}
+
+		}
+	}
+	/*
+	 *	Since we emptied tx_skb we can ask for more
+	 */
+	netif_wake_queue(c->netdevice);
+}
+
+/**
+ *	z8530_tx_done - TX complete callback
+ *	@c: The channel that completed a transmit.
+ *
+ *	This is called when we complete a packet send. We wake the queue,
+ *	start the next packet going and then free the buffer of the existing
+ *	packet. This code is fairly timing sensitive.
+ *
+ *	Called with the register lock held.
+ */
+
+static void z8530_tx_done(struct z8530_channel *c)
+{
+	struct sk_buff *skb;
+
+	/* Actually this can happen.*/
+	if (c->tx_skb == NULL)
+		return;
+
+	skb = c->tx_skb;
+	c->tx_skb = NULL;
+	z8530_tx_begin(c);
+	c->netdevice->stats.tx_packets++;
+	c->netdevice->stats.tx_bytes += skb->len;
+	dev_kfree_skb_irq(skb);
+}
+
+/**
+ *	z8530_null_rx - Discard a packet
+ *	@c: The channel the packet arrived on
+ *	@skb: The buffer
+ *
+ *	We point the receive handler at this function when idle. Instead
+ *	of processing the frames we get to throw them away.
+ */
+ 
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+EXPORT_SYMBOL(z8530_null_rx);
+
+/**
+ *	z8530_rx_done - Receive completion callback
+ *	@c: The channel that completed a receive
+ *
+ *	A new packet is complete. Our goal here is to get back into receive
+ *	mode as fast as possible. On the Z85230 we could change to using
+ *	ESCC mode, but on the older chips we have no choice. We flip to the
+ *	new buffer immediately in DMA mode so that the DMA of the next
+ *	frame can occur while we are copying the previous buffer to an sk_buff
+ *
+ *	Called with the lock held
+ */
+ 
+static void z8530_rx_done(struct z8530_channel *c)
+{
+	struct sk_buff *skb;
+	int ct;
+	
+	/*
+	 *	Is our receive engine in DMA mode
+	 */
+	 
+	if(c->rxdma_on)
+	{
+		/*
+		 *	Save the ready state and the buffer currently
+		 *	being used as the DMA target
+		 */
+		 
+		int ready=c->dma_ready;
+		unsigned char *rxb=c->rx_buf[c->dma_num];
+		unsigned long flags;
+		
+		/*
+		 *	Complete this DMA. Necessary to find the length
+		 */		
+		 
+		flags=claim_dma_lock();
+		
+		disable_dma(c->rxdma);
+		clear_dma_ff(c->rxdma);
+		c->rxdma_on=0;
+		ct=c->mtu-get_dma_residue(c->rxdma);
+		if(ct<0)
+			ct=2;	/* Shit happens.. */
+		c->dma_ready=0;
+		
+		/*
+		 *	Normal case: the other slot is free, start the next DMA
+		 *	into it immediately.
+		 */
+		 
+		if(ready)
+		{
+			c->dma_num^=1;
+			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
+			set_dma_count(c->rxdma, c->mtu);
+			c->rxdma_on = 1;
+			enable_dma(c->rxdma);
+			/* Stop any frames that we missed the head of 
+			   from passing */
+			write_zsreg(c, R0, RES_Rx_CRC);
+		}
+		else
+			/* Can't occur as we dont reenable the DMA irq until
+			   after the flip is done */
+			netdev_warn(c->netdevice, "DMA flip overrun!\n");
+
+		release_dma_lock(flags);
+
+		/*
+		 *	Shove the old buffer into an sk_buff. We can't DMA
+		 *	directly into one on a PC - it might be above the 16Mb
+		 *	boundary. Optimisation - we could check to see if we
+		 *	can avoid the copy. Optimisation 2 - make the memcpy
+		 *	a copychecksum.
+		 */
+
+		skb = dev_alloc_skb(ct);
+		if (skb == NULL) {
+			c->netdevice->stats.rx_dropped++;
+			netdev_warn(c->netdevice, "Memory squeeze\n");
+		} else {
+			skb_put(skb, ct);
+			skb_copy_to_linear_data(skb, rxb, ct);
+			c->netdevice->stats.rx_packets++;
+			c->netdevice->stats.rx_bytes += ct;
+		}
+		c->dma_ready = 1;
+	} else {
+		RT_LOCK;
+		skb = c->skb;
+
+		/*
+		 *	The game we play for non DMA is similar. We want to
+		 *	get the controller set up for the next packet as fast
+		 *	as possible. We potentially only have one byte + the
+		 *	fifo length for this. Thus we want to flip to the new
+		 *	buffer and then mess around copying and allocating
+		 *	things. For the current case it doesn't matter but
+		 *	if you build a system where the sync irq isn't blocked
+		 *	by the kernel IRQ disable then you need only block the
+		 *	sync IRQ for the RT_LOCK area.
+		 *
+		 */
+		ct=c->count;
+
+		c->skb = c->skb2;
+		c->count = 0;
+		c->max = c->mtu;
+		if (c->skb) {
+			c->dptr = c->skb->data;
+			c->max = c->mtu;
+		} else {
+			c->count = 0;
+			c->max = 0;
+		}
+		RT_UNLOCK;
+
+		c->skb2 = dev_alloc_skb(c->mtu);
+		if (c->skb2 == NULL)
+			netdev_warn(c->netdevice, "memory squeeze\n");
+		else
+			skb_put(c->skb2, c->mtu);
+		c->netdevice->stats.rx_packets++;
+		c->netdevice->stats.rx_bytes += ct;
+	}
+	/*
+	 *	If we received a frame we must now process it.
+	 */
+	if (skb) {
+		skb_trim(skb, ct);
+		c->rx_function(c, skb);
+	} else {
+		c->netdevice->stats.rx_dropped++;
+		netdev_err(c->netdevice, "Lost a frame\n");
+	}
+}
+
+/**
+ *	spans_boundary - Check a packet can be ISA DMA'd
+ *	@skb: The buffer to check
+ *
+ *	Returns true if the buffer cross a DMA boundary on a PC. The poor
+ *	thing can only DMA within a 64K block not across the edges of it.
+ */
+
+static inline int spans_boundary(struct sk_buff *skb)
+{
+	unsigned long a=(unsigned long)skb->data;
+	a^=(a+skb->len);
+	if(a&0x00010000)	/* If the 64K bit is different.. */
+		return 1;
+	return 0;
+}
+
+/**
+ *	z8530_queue_xmit - Queue a packet
+ *	@c: The channel to use
+ *	@skb: The packet to kick down the channel
+ *
+ *	Queue a packet for transmission. Because we have rather
+ *	hard to hit interrupt latencies for the Z85230 per packet 
+ *	even in DMA mode we do the flip to DMA buffer if needed here
+ *	not in the IRQ.
+ *
+ *	Called from the network code. The lock is not held at this 
+ *	point.
+ */
+
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
+{
+	unsigned long flags;
+	
+	netif_stop_queue(c->netdevice);
+	if(c->tx_next_skb)
+		return NETDEV_TX_BUSY;
+
+	
+	/* PC SPECIFIC - DMA limits */
+	
+	/*
+	 *	If we will DMA the transmit and its gone over the ISA bus
+	 *	limit, then copy to the flip buffer
+	 */
+	 
+	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
+	{
+		/* 
+		 *	Send the flip buffer, and flip the flippy bit.
+		 *	We don't care which is used when just so long as
+		 *	we never use the same buffer twice in a row. Since
+		 *	only one buffer can be going out at a time the other
+		 *	has to be safe.
+		 */
+		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
+		c->tx_dma_used^=1;	/* Flip temp buffer */
+		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
+	}
+	else
+		c->tx_next_ptr=skb->data;	
+	RT_LOCK;
+	c->tx_next_skb=skb;
+	RT_UNLOCK;
+	
+	spin_lock_irqsave(c->lock, flags);
+	z8530_tx_begin(c);
+	spin_unlock_irqrestore(c->lock, flags);
+	
+	return NETDEV_TX_OK;
+}
+
+EXPORT_SYMBOL(z8530_queue_xmit);
+
+/*
+ *	Module support
+ */
+static const char banner[] __initdata =
+	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
+
+static int __init z85230_init_driver(void)
+{
+	printk(banner);
+	return 0;
+}
+module_init(z85230_init_driver);
+
+static void __exit z85230_cleanup_driver(void)
+{
+}
+module_exit(z85230_cleanup_driver);
+
+MODULE_AUTHOR("Red Hat Inc.");
+MODULE_DESCRIPTION("Z85x30 synchronous driver core");
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.h b/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.h
new file mode 100644
index 0000000..f29d554
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/net/wan/z85230.h
@@ -0,0 +1,448 @@
+/*
+ *	Description of Z8530 Z85C30 and Z85230 communications chips
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ */
+
+#ifndef _Z8530_H
+#define _Z8530_H
+
+#include <linux/tty.h>
+#include <linux/interrupt.h>
+
+/* Conversion routines to/from brg time constants from/to bits
+ * per second.
+ */
+#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/* The Zilog register set */
+
+#define	FLAG	0x7e
+
+/* Write Register 0 */
+#define	R0	0		/* Register selects */
+#define	R1	1
+#define	R2	2
+#define	R3	3
+#define	R4	4
+#define	R5	5
+#define	R6	6
+#define	R7	7
+#define	R8	8
+#define	R9	9
+#define	R10	10
+#define	R11	11
+#define	R12	12
+#define	R13	13
+#define	R14	14
+#define	R15	15
+
+#define RPRIME	16		/* Indicate a prime register access on 230 */
+
+#define	NULLCODE	0	/* Null Code */
+#define	POINT_HIGH	0x8	/* Select upper half of registers */
+#define	RES_EXT_INT	0x10	/* Reset Ext. Status Interrupts */
+#define	SEND_ABORT	0x18	/* HDLC Abort */
+#define	RES_RxINT_FC	0x20	/* Reset RxINT on First Character */
+#define	RES_Tx_P	0x28	/* Reset TxINT Pending */
+#define	ERR_RES		0x30	/* Error Reset */
+#define	RES_H_IUS	0x38	/* Reset highest IUS */
+
+#define	RES_Rx_CRC	0x40	/* Reset Rx CRC Checker */
+#define	RES_Tx_CRC	0x80	/* Reset Tx CRC Checker */
+#define	RES_EOM_L	0xC0	/* Reset EOM latch */
+
+/* Write Register 1 */
+
+#define	EXT_INT_ENAB	0x1	/* Ext Int Enable */
+#define	TxINT_ENAB	0x2	/* Tx Int Enable */
+#define	PAR_SPEC	0x4	/* Parity is special condition */
+
+#define	RxINT_DISAB	0	/* Rx Int Disable */
+#define	RxINT_FCERR	0x8	/* Rx Int on First Character Only or Error */
+#define	INT_ALL_Rx	0x10	/* Int on all Rx Characters or error */
+#define	INT_ERR_Rx	0x18	/* Int on error only */
+
+#define	WT_RDY_RT	0x20	/* Wait/Ready on R/T */
+#define	WT_FN_RDYFN	0x40	/* Wait/FN/Ready FN */
+#define	WT_RDY_ENAB	0x80	/* Wait/Ready Enable */
+
+/* Write Register #2 (Interrupt Vector) */
+
+/* Write Register 3 */
+
+#define	RxENABLE	0x1	/* Rx Enable */
+#define	SYNC_L_INH	0x2	/* Sync Character Load Inhibit */
+#define	ADD_SM		0x4	/* Address Search Mode (SDLC) */
+#define	RxCRC_ENAB	0x8	/* Rx CRC Enable */
+#define	ENT_HM		0x10	/* Enter Hunt Mode */
+#define	AUTO_ENAB	0x20	/* Auto Enables */
+#define	Rx5		0x0	/* Rx 5 Bits/Character */
+#define	Rx7		0x40	/* Rx 7 Bits/Character */
+#define	Rx6		0x80	/* Rx 6 Bits/Character */
+#define	Rx8		0xc0	/* Rx 8 Bits/Character */
+
+/* Write Register 4 */
+
+#define	PAR_ENA		0x1	/* Parity Enable */
+#define	PAR_EVEN	0x2	/* Parity Even/Odd* */
+
+#define	SYNC_ENAB	0	/* Sync Modes Enable */
+#define	SB1		0x4	/* 1 stop bit/char */
+#define	SB15		0x8	/* 1.5 stop bits/char */
+#define	SB2		0xc	/* 2 stop bits/char */
+
+#define	MONSYNC		0	/* 8 Bit Sync character */
+#define	BISYNC		0x10	/* 16 bit sync character */
+#define	SDLC		0x20	/* SDLC Mode (01111110 Sync Flag) */
+#define	EXTSYNC		0x30	/* External Sync Mode */
+
+#define	X1CLK		0x0	/* x1 clock mode */
+#define	X16CLK		0x40	/* x16 clock mode */
+#define	X32CLK		0x80	/* x32 clock mode */
+#define	X64CLK		0xC0	/* x64 clock mode */
+
+/* Write Register 5 */
+
+#define	TxCRC_ENAB	0x1	/* Tx CRC Enable */
+#define	RTS		0x2	/* RTS */
+#define	SDLC_CRC	0x4	/* SDLC/CRC-16 */
+#define	TxENAB		0x8	/* Tx Enable */
+#define	SND_BRK		0x10	/* Send Break */
+#define	Tx5		0x0	/* Tx 5 bits (or less)/character */
+#define	Tx7		0x20	/* Tx 7 bits/character */
+#define	Tx6		0x40	/* Tx 6 bits/character */
+#define	Tx8		0x60	/* Tx 8 bits/character */
+#define	DTR		0x80	/* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (transmit buffer) */
+
+/* Write Register 9 (Master interrupt control) */
+#define	VIS	1	/* Vector Includes Status */
+#define	NV	2	/* No Vector */
+#define	DLC	4	/* Disable Lower Chain */
+#define	MIE	8	/* Master Interrupt Enable */
+#define	STATHI	0x10	/* Status high */
+#define	NORESET	0	/* No reset on write to R9 */
+#define	CHRB	0x40	/* Reset channel B */
+#define	CHRA	0x80	/* Reset channel A */
+#define	FHWRES	0xc0	/* Force hardware reset */
+
+/* Write Register 10 (misc control bits) */
+#define	BIT6	1	/* 6 bit/8bit sync */
+#define	LOOPMODE 2	/* SDLC Loop mode */
+#define	ABUNDER	4	/* Abort/flag on SDLC xmit underrun */
+#define	MARKIDLE 8	/* Mark/flag on idle */
+#define	GAOP	0x10	/* Go active on poll */
+#define	NRZ	0	/* NRZ mode */
+#define	NRZI	0x20	/* NRZI mode */
+#define	FM1	0x40	/* FM1 (transition = 1) */
+#define	FM0	0x60	/* FM0 (transition = 0) */
+#define	CRCPS	0x80	/* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode control) */
+#define	TRxCXT	0	/* TRxC = Xtal output */
+#define	TRxCTC	1	/* TRxC = Transmit clock */
+#define	TRxCBR	2	/* TRxC = BR Generator Output */
+#define	TRxCDP	3	/* TRxC = DPLL output */
+#define	TRxCOI	4	/* TRxC O/I */
+#define	TCRTxCP	0	/* Transmit clock = RTxC pin */
+#define	TCTRxCP	8	/* Transmit clock = TRxC pin */
+#define	TCBR	0x10	/* Transmit clock = BR Generator output */
+#define	TCDPLL	0x18	/* Transmit clock = DPLL output */
+#define	RCRTxCP	0	/* Receive clock = RTxC pin */
+#define	RCTRxCP	0x20	/* Receive clock = TRxC pin */
+#define	RCBR	0x40	/* Receive clock = BR Generator output */
+#define	RCDPLL	0x60	/* Receive clock = DPLL output */
+#define	RTxCX	0x80	/* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (lower byte of baud rate generator time constant) */
+
+/* Write Register 13 (upper byte of baud rate generator time constant) */
+
+/* Write Register 14 (Misc control bits) */
+#define	BRENABL	1	/* Baud rate generator enable */
+#define	BRSRC	2	/* Baud rate generator source */
+#define	DTRREQ	4	/* DTR/Request function */
+#define	AUTOECHO 8	/* Auto Echo */
+#define	LOOPBAK	0x10	/* Local loopback */
+#define	SEARCH	0x20	/* Enter search mode */
+#define	RMC	0x40	/* Reset missing clock */
+#define	DISDPLL	0x60	/* Disable DPLL */
+#define	SSBR	0x80	/* Set DPLL source = BR generator */
+#define	SSRTxC	0xa0	/* Set DPLL source = RTxC */
+#define	SFMM	0xc0	/* Set FM mode */
+#define	SNRZI	0xe0	/* Set NRZI mode */
+
+/* Write Register 15 (external/status interrupt control) */
+#define PRIME	1	/* R5' etc register access (Z85C30/230 only) */
+#define	ZCIE	2	/* Zero count IE */
+#define FIFOE	4	/* Z85230 only */
+#define	DCDIE	8	/* DCD IE */
+#define	SYNCIE	0x10	/* Sync/hunt IE */
+#define	CTSIE	0x20	/* CTS IE */
+#define	TxUIE	0x40	/* Tx Underrun/EOM IE */
+#define	BRKIE	0x80	/* Break/Abort IE */
+
+
+/* Read Register 0 */
+#define	Rx_CH_AV	0x1	/* Rx Character Available */
+#define	ZCOUNT		0x2	/* Zero count */
+#define	Tx_BUF_EMP	0x4	/* Tx Buffer empty */
+#define	DCD		0x8	/* DCD */
+#define	SYNC_HUNT	0x10	/* Sync/hunt */
+#define	CTS		0x20	/* CTS */
+#define	TxEOM		0x40	/* Tx underrun */
+#define	BRK_ABRT	0x80	/* Break/Abort */
+
+/* Read Register 1 */
+#define	ALL_SNT		0x1	/* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define	RES3		0x8	/* 0/3 */
+#define	RES4		0x4	/* 0/4 */
+#define	RES5		0xc	/* 0/5 */
+#define	RES6		0x2	/* 0/6 */
+#define	RES7		0xa	/* 0/7 */
+#define	RES8		0x6	/* 0/8 */
+#define	RES18		0xe	/* 1/8 */
+#define	RES28		0x0	/* 2/8 */
+/* Special Rx Condition Interrupts */
+#define	PAR_ERR		0x10	/* Parity error */
+#define	Rx_OVR		0x20	/* Rx Overrun Error */
+#define	CRC_ERR		0x40	/* CRC/Framing Error */
+#define	END_FR		0x80	/* End of Frame (SDLC) */
+
+/* Read Register 2 (channel b only) - Interrupt vector */
+
+/* Read Register 3 (interrupt pending register) ch a only */
+#define	CHBEXT	0x1		/* Channel B Ext/Stat IP */
+#define	CHBTxIP	0x2		/* Channel B Tx IP */
+#define	CHBRxIP	0x4		/* Channel B Rx IP */
+#define	CHAEXT	0x8		/* Channel A Ext/Stat IP */
+#define	CHATxIP	0x10		/* Channel A Tx IP */
+#define	CHARxIP	0x20		/* Channel A Rx IP */
+
+/* Read Register 8 (receive data register) */
+
+/* Read Register 10  (misc status bits) */
+#define	ONLOOP	2		/* On loop */
+#define	LOOPSEND 0x10		/* Loop sending */
+#define	CLK2MIS	0x40		/* Two clocks missing */
+#define	CLK1MIS	0x80		/* One clock missing */
+
+/* Read Register 12 (lower byte of baud rate generator constant) */
+
+/* Read Register 13 (upper byte of baud rate generator constant) */
+
+/* Read Register 15 (value of WR 15) */
+
+
+/*
+ *	Interrupt handling functions for this SCC
+ */
+
+struct z8530_channel;
+ 
+struct z8530_irqhandler
+{
+	void (*rx)(struct z8530_channel *);
+	void (*tx)(struct z8530_channel *);
+	void (*status)(struct z8530_channel *);
+};
+
+/*
+ *	A channel of the Z8530
+ */
+
+struct z8530_channel
+{
+	struct		z8530_irqhandler *irqs;		/* IRQ handlers */
+	/*
+	 *	Synchronous
+	 */
+	u16		count;		/* Buyes received */
+	u16		max;		/* Most we can receive this frame */
+	u16		mtu;		/* MTU of the device */
+	u8		*dptr;		/* Pointer into rx buffer */
+	struct sk_buff	*skb;		/* Buffer dptr points into */
+	struct sk_buff	*skb2;		/* Pending buffer */
+	u8		status;		/* Current DCD */
+	u8		dcdcheck;	/* which bit to check for line */
+	u8		sync;		/* Set if in sync mode */
+
+	u8		regs[32];	/* Register map for the chip */
+	u8		pendregs[32];	/* Pending register values */
+	
+	struct sk_buff 	*tx_skb;	/* Buffer being transmitted */
+	struct sk_buff  *tx_next_skb;	/* Next transmit buffer */
+	u8		*tx_ptr;	/* Byte pointer into the buffer */
+	u8		*tx_next_ptr;	/* Next pointer to use */
+	u8		*tx_dma_buf[2];	/* TX flip buffers for DMA */
+	u8		tx_dma_used;	/* Flip buffer usage toggler */
+	u16		txcount;	/* Count of bytes to transmit */
+	
+	void		(*rx_function)(struct z8530_channel *, struct sk_buff *);
+	
+	/*
+	 *	Sync DMA
+	 */
+	
+	u8		rxdma;		/* DMA channels */
+	u8		txdma;		
+	u8		rxdma_on;	/* DMA active if flag set */
+	u8		txdma_on;
+	u8		dma_num;	/* Buffer we are DMAing into */
+	u8		dma_ready;	/* Is the other buffer free */
+	u8		dma_tx;		/* TX is to use DMA */
+	u8		*rx_buf[2];	/* The flip buffers */
+	
+	/*
+	 *	System
+	 */
+	 
+	struct z8530_dev *dev;		/* Z85230 chip instance we are from */
+	unsigned long	ctrlio;		/* I/O ports */
+	unsigned long	dataio;
+
+	/*
+	 *	For PC we encode this way.
+	 */	
+#define Z8530_PORT_SLEEP	0x80000000
+#define Z8530_PORT_OF(x)	((x)&0xFFFF)
+
+	u32		rx_overrun;		/* Overruns - not done yet */
+	u32		rx_crc_err;
+
+	/*
+	 *	Bound device pointers
+	 */
+
+	void		*private;	/* For our owner */
+	struct net_device	*netdevice;	/* Network layer device */
+
+	/*
+	 *	Async features
+	 */
+
+	struct tty_struct 	*tty;		/* Attached terminal */
+	int			line;		/* Minor number */
+	wait_queue_head_t	open_wait;	/* Tasks waiting to open */
+	wait_queue_head_t	close_wait;	/* and for close to end */
+	unsigned long		event;		/* Pending events */
+	int			fdcount;    	/* # of fd on device */
+	int			blocked_open;	/* # of blocked opens */
+	int			x_char;		/* XON/XOF char */
+	unsigned char 		*xmit_buf;	/* Transmit pointer */
+	int			xmit_head;	/* Transmit ring */
+	int			xmit_tail;
+	int			xmit_cnt;
+	int			flags;	
+	int			timeout;
+	int			xmit_fifo_size;	/* Transmit FIFO info */
+
+	int			close_delay;	/* Do we wait for drain on close ? */
+	unsigned short		closing_wait;
+
+	/* We need to know the current clock divisor
+	 * to read the bps rate the chip has currently
+	 * loaded.
+	 */
+
+	unsigned char		clk_divisor;  /* May be 1, 16, 32, or 64 */
+	int			zs_baud;
+
+	int			magic;
+	int			baud_base;		/* Baud parameters */
+	int			custom_divisor;
+
+
+	unsigned char		tx_active; /* character is being xmitted */
+	unsigned char		tx_stopped; /* output is suspended */
+
+	spinlock_t		*lock;	  /* Device lock */
+};
+
+/*
+ *	Each Z853x0 device.
+ */
+
+struct z8530_dev
+{
+	char *name;	/* Device instance name */
+	struct z8530_channel chanA;	/* SCC channel A */
+	struct z8530_channel chanB;	/* SCC channel B */
+	int type;
+#define Z8530	0	/* NMOS dinosaur */	
+#define Z85C30	1	/* CMOS - better */
+#define Z85230	2	/* CMOS with real FIFO */
+	int irq;	/* Interrupt for the device */
+	int active;	/* Soft interrupt enable - the Mac doesn't 
+			   always have a hard disable on its 8530s... */
+	spinlock_t lock;
+};
+
+
+/*
+ *	Functions
+ */
+ 
+extern u8 z8530_dead_port[];
+extern u8 z8530_hdlc_kilostream_85230[];
+extern u8 z8530_hdlc_kilostream[];
+extern irqreturn_t z8530_interrupt(int, void *);
+extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+extern int z8530_init(struct z8530_dev *);
+extern int z8530_shutdown(struct z8530_dev *);
+extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+extern int z8530_channel_load(struct z8530_channel *, u8 *);
+extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
+					  struct sk_buff *skb);
+extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+
+
+/*
+ *	Standard interrupt vector sets
+ */
+ 
+extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
+
+/*
+ *	Asynchronous Interfacing
+ */
+
+#define SERIAL_MAGIC 0x5301
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+
+#define SERIAL_XMIT_SIZE 4096
+#define WAKEUP_CHARS	256
+
+/*
+ * Events are used to schedule things to happen at timer-interrupt
+ * time, instead of at rs interrupt time.
+ */
+#define RS_EVENT_WRITE_WAKEUP	0
+
+/* Internal flags used only by kernel/chr_drv/serial.c */
+#define ZILOG_INITIALIZED	0x80000000 /* Serial port was initialized */
+#define ZILOG_CALLOUT_ACTIVE	0x40000000 /* Call out device is active */
+#define ZILOG_NORMAL_ACTIVE	0x20000000 /* Normal device is active */
+#define ZILOG_BOOT_AUTOCONF	0x10000000 /* Autoconfigure port on bootup */
+#define ZILOG_CLOSING		0x08000000 /* Serial port is closing */
+#define ZILOG_CTS_FLOW		0x04000000 /* Do CTS flow control */
+#define ZILOG_CHECK_CD		0x02000000 /* i.e., CLOCAL */
+
+#endif /* !(_Z8530_H) */