[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/arch/parisc/Kconfig b/src/kernel/linux/v4.14/arch/parisc/Kconfig
new file mode 100644
index 0000000..89e684f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/Kconfig
@@ -0,0 +1,384 @@
+# SPDX-License-Identifier: GPL-2.0
+config PARISC
+	def_bool y
+	select ARCH_MIGHT_HAVE_PC_PARPORT
+	select HAVE_IDE
+	select HAVE_OPROFILE
+	select HAVE_FUNCTION_TRACER
+	select HAVE_FUNCTION_GRAPH_TRACER
+	select HAVE_SYSCALL_TRACEPOINTS
+	select ARCH_WANT_FRAME_POINTERS
+	select ARCH_HAS_ELF_RANDOMIZE
+	select ARCH_HAS_STRICT_KERNEL_RWX
+	select ARCH_HAS_UBSAN_SANITIZE_ALL
+	select ARCH_WANTS_UBSAN_NO_NULL
+	select ARCH_SUPPORTS_MEMORY_FAILURE
+	select RTC_CLASS
+	select RTC_DRV_GENERIC
+	select INIT_ALL_POSSIBLE
+	select HAVE_MEMBLOCK
+	select NO_BOOTMEM
+	select BUG
+	select BUILDTIME_EXTABLE_SORT
+	select HAVE_PERF_EVENTS
+	select HAVE_KERNEL_BZIP2
+	select HAVE_KERNEL_GZIP
+	select HAVE_KERNEL_LZ4
+	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_LZO
+	select HAVE_KERNEL_XZ
+	select GENERIC_ATOMIC64 if !64BIT
+	select GENERIC_IRQ_PROBE
+	select GENERIC_PCI_IOMAP
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+	select GENERIC_SMP_IDLE_THREAD
+	select GENERIC_STRNCPY_FROM_USER
+	select SYSCTL_ARCH_UNALIGN_ALLOW
+	select SYSCTL_EXCEPTION_TRACE
+	select HAVE_MOD_ARCH_SPECIFIC
+	select VIRT_TO_BUS
+	select MODULES_USE_ELF_RELA
+	select CLONE_BACKWARDS
+	select TTY # Needed for pdc_cons.c
+	select HAVE_DEBUG_STACKOVERFLOW
+	select HAVE_ARCH_AUDITSYSCALL
+	select HAVE_ARCH_HASH
+	select HAVE_ARCH_SECCOMP_FILTER
+	select HAVE_ARCH_TRACEHOOK
+	select GENERIC_SCHED_CLOCK
+	select HAVE_UNSTABLE_SCHED_CLOCK if SMP
+	select GENERIC_CLOCKEVENTS
+	select ARCH_NO_COHERENT_DMA_MMAP
+	select CPU_NO_EFFICIENT_FFS
+
+	help
+	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
+	  in many of their workstations & servers (HP9000 700 and 800 series,
+	  and later HP3000 series).  The PA-RISC Linux project home page is
+	  at <http://www.parisc-linux.org/>.
+
+config CPU_BIG_ENDIAN
+	def_bool y
+
+config CPU_BIG_ENDIAN
+	def_bool y
+
+config MMU
+	def_bool y
+
+config STACK_GROWSUP
+	def_bool y
+
+config GENERIC_LOCKBREAK
+	bool
+	default y
+	depends on SMP && PREEMPT
+
+config RWSEM_GENERIC_SPINLOCK
+	def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+	bool
+
+config ARCH_HAS_ILOG2_U32
+	bool
+	default n
+
+config ARCH_HAS_ILOG2_U64
+	bool
+	default n
+
+config GENERIC_BUG
+	bool
+	default y
+	depends on BUG
+
+config GENERIC_HWEIGHT
+	bool
+	default y
+
+config GENERIC_CALIBRATE_DELAY
+	bool
+	default y
+
+config TIME_LOW_RES
+	bool
+	depends on SMP
+	default y
+
+# unless you want to implement ACPI on PA-RISC ... ;-)
+config PM
+	bool
+
+config STACKTRACE_SUPPORT
+	def_bool y
+
+config NEED_DMA_MAP_STATE
+	def_bool y
+
+config NEED_SG_DMA_LENGTH
+	def_bool y
+
+config ISA_DMA_API
+	bool
+
+config ARCH_MAY_HAVE_PC_FDC
+	bool
+	depends on BROKEN
+	default y
+
+config PGTABLE_LEVELS
+	int
+	default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
+	default 2
+
+config SYS_SUPPORTS_HUGETLBFS
+	def_bool y if PA20
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+
+menu "Processor type and features"
+
+choice
+	prompt "Processor type"
+	default PA7000
+
+config PA7000
+	bool "PA7000/PA7100"
+	---help---
+	  This is the processor type of your CPU.  This information is
+	  used for optimizing purposes.  In order to compile a kernel
+	  that can run on all 32-bit PA CPUs (albeit not optimally fast),
+	  you can specify "PA7000" here.
+
+	  Specifying "PA8000" here will allow you to select a 64-bit kernel
+	  which is required on some machines.
+
+config PA7100LC
+	bool "PA7100LC"
+	help
+	  Select this option for the PCX-L processor, as used in the
+	  712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
+	  D200, D210, D300, D310 and E-class
+
+config PA7200
+	bool "PA7200"
+	help
+	  Select this option for the PCX-T' processor, as used in the
+	  C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
+	  K100, K200, K210, K220, K400, K410 and K420
+
+config PA7300LC
+	bool "PA7300LC"
+	help
+	  Select this option for the PCX-L2 processor, as used in the
+	  744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
+	  D220, D230, D320 and D330.
+
+config PA8X00
+	bool "PA8000 and up"
+	help
+	  Select this option for PCX-U to PCX-W2 processors.
+
+endchoice
+
+# Define implied options from the CPU selection here
+
+config PA20
+	def_bool y
+	depends on PA8X00
+
+config PA11
+	def_bool y
+	depends on PA7000 || PA7100LC || PA7200 || PA7300LC
+
+config PREFETCH
+	def_bool y
+	depends on PA8X00 || PA7200
+
+config MLONGCALLS
+	bool "Enable the -mlong-calls compiler option for big kernels"
+	default y
+	depends on PA8X00
+	help
+	  If you configure the kernel to include many drivers built-in instead
+	  as modules, the kernel executable may become too big, so that the
+	  linker will not be able to resolve some long branches and fails to link
+	  your vmlinux kernel. In that case enabling this option will help you
+	  to overcome this limit by using the -mlong-calls compiler option.
+
+	  Usually you want to say N here, unless you e.g. want to build
+	  a kernel which includes all necessary drivers built-in and which can
+	  be used for TFTP booting without the need to have an initrd ramdisk.
+
+	  Enabling this option will probably slow down your kernel.
+
+config 64BIT
+	bool "64-bit kernel"
+	depends on PA8X00
+	help
+	  Enable this if you want to support 64bit kernel on PA-RISC platform.
+
+	  At the moment, only people willing to use more than 2GB of RAM,
+	  or having a 64bit-only capable PA-RISC machine should say Y here.
+
+	  Since there is no 64bit userland on PA-RISC, there is no point to
+	  enable this option otherwise. The 64bit kernel is significantly bigger
+	  and slower than the 32bit one.
+
+choice
+	prompt "Kernel page size"
+	default PARISC_PAGE_SIZE_4KB
+
+config PARISC_PAGE_SIZE_4KB
+	bool "4KB"
+	help
+	  This lets you select the page size of the kernel.  For best
+	  performance, a page size of 16KB is recommended.  For best
+	  compatibility with 32bit applications, a page size of 4KB should be
+	  selected (the vast majority of 32bit binaries work perfectly fine
+	  with a larger page size).
+
+	  4KB                For best 32bit compatibility
+	  16KB               For best performance
+	  64KB               For best performance, might give more overhead.
+
+	  If you don't know what to do, choose 4KB.
+
+config PARISC_PAGE_SIZE_16KB
+	bool "16KB"
+	depends on PA8X00
+
+config PARISC_PAGE_SIZE_64KB
+	bool "64KB"
+	depends on PA8X00
+
+endchoice
+
+config PARISC_SELF_EXTRACT
+	bool "Build kernel as self-extracting executable"
+	default y
+	help
+	  Say Y if you want to build the parisc kernel as a kind of
+	  self-extracting executable.
+
+	  If you say N here, the kernel will be compressed with gzip
+	  which can be loaded by the palo bootloader directly too.
+
+	  If you don't know what to do here, say Y.
+
+config SMP
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, say N. If you have a system with more
+	  than one CPU, say Y.
+
+	  If you say N here, the kernel will run on uni- and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on many, but not all,
+	  uniprocessor machines. On a uniprocessor machine, the kernel
+	  will run faster if you say N here.
+
+	  See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
+	  available at <http://www.tldp.org/docs.html#howto>.
+
+	  If you don't know what to do here, say N.
+
+config IRQSTACKS
+	bool "Use separate kernel stacks when processing interrupts"
+	default y
+	help
+	  If you say Y here the kernel will use separate kernel stacks
+	  for handling hard and soft interrupts.  This can help avoid
+	  overflowing the process kernel stacks.
+
+config HOTPLUG_CPU
+	bool
+	default y if SMP
+
+config ARCH_SELECT_MEMORY_MODEL
+	def_bool y
+	depends on 64BIT
+
+config ARCH_DISCONTIGMEM_ENABLE
+	def_bool y
+	depends on 64BIT
+
+config ARCH_FLATMEM_ENABLE
+	def_bool y
+
+config ARCH_DISCONTIGMEM_DEFAULT
+	def_bool y
+	depends on ARCH_DISCONTIGMEM_ENABLE
+
+config NODES_SHIFT
+	int
+	default "3"
+	depends on NEED_MULTIPLE_NODES
+
+source "kernel/Kconfig.preempt"
+source "kernel/Kconfig.hz"
+source "mm/Kconfig"
+
+config COMPAT
+	def_bool y
+	depends on 64BIT
+
+config SYSVIPC_COMPAT
+	def_bool y
+	depends on COMPAT && SYSVIPC
+
+config AUDIT_ARCH
+	def_bool y
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-32)"
+	range 2 32
+	depends on SMP
+	default "32"
+
+endmenu
+
+
+source "drivers/parisc/Kconfig"
+
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/parisc/Kconfig.debug"
+
+config SECCOMP
+	def_bool y
+	prompt "Enable seccomp to safely compute untrusted bytecode"
+	---help---
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+	  and the task is only allowed to execute a few safe syscalls
+	  defined by each seccomp mode.
+
+	  If unsure, say Y. Only embedded should say N here.
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/src/kernel/linux/v4.14/arch/parisc/Kconfig.debug b/src/kernel/linux/v4.14/arch/parisc/Kconfig.debug
new file mode 100644
index 0000000..fb3507f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/Kconfig.debug
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config TRACE_IRQFLAGS_SUPPORT
+	def_bool y
+
+endmenu
diff --git a/src/kernel/linux/v4.14/arch/parisc/Makefile b/src/kernel/linux/v4.14/arch/parisc/Makefile
new file mode 100644
index 0000000..01946eb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/Makefile
@@ -0,0 +1,168 @@
+#
+# parisc/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Portions Copyright (C) 1999 The Puffin Group
+#
+# Modified for PA-RISC Linux by Paul Lahaie, Alex deVries, 
+# Mike Shaver, Helge Deller and Martin K. Petersen
+#
+
+KBUILD_IMAGE := vmlinuz
+
+KBUILD_DEFCONFIG := default_defconfig
+
+NM		= sh $(srctree)/arch/parisc/nm
+CHECKFLAGS	+= -D__hppa__=1
+LIBGCC		= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+export LIBGCC
+
+ifdef CONFIG_64BIT
+UTS_MACHINE	:= parisc64
+CHECKFLAGS	+= -D__LP64__=1 -m64
+CC_ARCHES	= hppa64
+LD_BFD		:= elf64-hppa-linux
+else # 32-bit
+CC_ARCHES	= hppa hppa2.0 hppa1.1
+LD_BFD		:= elf32-hppa-linux
+endif
+
+export LD_BFD
+
+ifneq ($(SUBARCH),$(UTS_MACHINE))
+	ifeq ($(CROSS_COMPILE),)
+		CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+		CROSS_COMPILE := $(call cc-cross-prefix, \
+			$(foreach a,$(CC_ARCHES), \
+			$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
+	endif
+endif
+
+OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
+
+cflags-y	:= -pipe
+
+# These flags should be implied by an hppa-linux configuration, but they
+# are not in gcc 3.2.
+cflags-y	+= -mno-space-regs
+
+# -mfast-indirect-calls is only relevant for 32-bit kernels.
+ifndef CONFIG_64BIT
+cflags-y	+= -mfast-indirect-calls
+endif
+
+# Currently we save and restore fpregs on all kernel entry/interruption paths.
+# If that gets optimized, we might need to disable the use of fpregs in the
+# kernel.
+cflags-y	+= -mdisable-fpregs
+
+# Without this, "ld -r" results in .text sections that are too big
+# (> 0x40000) for branches to reach stubs.
+cflags-y	+= -ffunction-sections
+
+# Use long jumps instead of long branches (needed if your linker fails to
+# link a too big vmlinux executable). Not enabled for building modules.
+ifdef CONFIG_MLONGCALLS
+KBUILD_CFLAGS_KERNEL += -mlong-calls
+endif
+
+# select which processor to optimise for
+cflags-$(CONFIG_PA7000)		+= -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7200)		+= -march=1.1 -mschedule=7200
+cflags-$(CONFIG_PA7100LC)	+= -march=1.1 -mschedule=7100LC
+cflags-$(CONFIG_PA7300LC)	+= -march=1.1 -mschedule=7300
+cflags-$(CONFIG_PA8X00)		+= -march=2.0 -mschedule=8000
+
+head-y			:= arch/parisc/kernel/head.o 
+
+KBUILD_CFLAGS	+= $(cflags-y)
+
+kernel-y			:= mm/ kernel/ math-emu/
+
+core-y	+= $(addprefix arch/parisc/, $(kernel-y))
+libs-y	+= arch/parisc/lib/ $(LIBGCC)
+
+drivers-$(CONFIG_OPROFILE)		+= arch/parisc/oprofile/
+
+boot	:= arch/parisc/boot
+
+PALO := $(shell if (which palo 2>&1); then : ; \
+	elif [ -x /sbin/palo ]; then echo /sbin/palo; \
+	fi)
+
+PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
+	else echo $(obj)/palo.conf; \
+	fi)
+
+palo lifimage: vmlinuz
+	@if test ! -x "$(PALO)"; then \
+		echo 'ERROR: Please install palo first (apt-get install palo)';\
+		echo 'or build it from source and install it somewhere in your $$PATH';\
+		false; \
+	fi
+	@if test ! -f "$(PALOCONF)"; then \
+		cp $(src)/arch/parisc/defpalo.conf $(obj)/palo.conf; \
+		echo 'A generic palo config file ($(obj)/palo.conf) has been created for you.'; \
+		echo 'You should check it and re-run "make palo".'; \
+		echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \
+		false; \
+	fi
+	$(PALO) -f $(PALOCONF)
+
+BOOT_TARGETS    = zImage Image palo lifimage
+INSTALL_TARGETS = zinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+zImage: vmlinuz
+Image: vmlinux
+
+bzImage: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+ifdef CONFIG_PARISC_SELF_EXTRACT
+vmlinuz: bzImage
+	$(OBJCOPY) $(boot)/bzImage $@
+else
+vmlinuz: vmlinux
+	@gzip -cf -9 $< > $@
+endif
+
+install:
+	$(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+			$(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
+zinstall:
+	$(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+			$(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)"
+
+CLEAN_FILES	+= lifimage
+MRPROPER_FILES	+= palo.conf
+
+define archhelp
+	@echo  '* vmlinux	- Uncompressed kernel image (./vmlinux)'
+	@echo  '  vmlinuz	- Compressed kernel image (./vmlinuz)'
+	@echo  '  palo		- Bootable image (./lifimage)'
+	@echo  '  install	- Install uncompressed vmlinux kernel using'
+	@echo  '		  (your) ~/bin/$(INSTALLKERNEL) or'
+	@echo  '		  (distribution) /sbin/$(INSTALLKERNEL) or'
+	@echo  '		  copy to $$(INSTALL_PATH)'
+	@echo  '  zinstall	- Install compressed vmlinuz kernel'
+endef
+
+# we require gcc 3.3 or above to compile the kernel
+archprepare: checkbin
+checkbin:
+	@if test "$(cc-version)" -lt "0303"; then \
+		echo -n "Sorry, GCC v3.3 or above is required to build " ; \
+		echo "the kernel." ; \
+		false ; \
+	fi
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/.gitignore b/src/kernel/linux/v4.14/arch/parisc/boot/.gitignore
new file mode 100644
index 0000000..017d591
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/.gitignore
@@ -0,0 +1,2 @@
+image
+bzImage
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/Makefile b/src/kernel/linux/v4.14/arch/parisc/boot/Makefile
new file mode 100644
index 0000000..cad68a5
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the linux parisc-specific parts of the boot image creator.
+#
+
+COMPILE_VERSION := __linux_compile_version_id__`hostname |  \
+			tr -c '[0-9A-Za-z]' '_'`__`date | \
+			tr -c '[0-9A-Za-z]' '_'`_t
+
+ccflags-y  := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+
+targets := image
+targets += bzImage
+subdir- := compressed
+
+$(obj)/image: vmlinux FORCE
+	$(call if_changed,objcopy)
+
+$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+	$(call if_changed,objcopy)
+
+$(obj)/compressed/vmlinux: FORCE
+	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
+
+install: $(CONFIGURE) $(obj)/bzImage
+	sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+	      System.map "$(INSTALL_PATH)"
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/.gitignore b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/.gitignore
new file mode 100644
index 0000000..ae06b9b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/.gitignore
@@ -0,0 +1,3 @@
+sizes.h
+vmlinux
+vmlinux.lds
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/Makefile b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/Makefile
new file mode 100644
index 0000000..7d7e594
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/Makefile
@@ -0,0 +1,86 @@
+#
+# linux/arch/parisc/boot/compressed/Makefile
+#
+# create a compressed self-extracting vmlinux image from the original vmlinux
+#
+
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
+
+KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
+ifndef CONFIG_64BIT
+KBUILD_CFLAGS += -mfast-indirect-calls
+endif
+
+OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o
+
+# LDFLAGS_vmlinux := -X --whole-archive -e startup -T
+LDFLAGS_vmlinux := -X -e startup --as-needed -T
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC)
+	$(call if_changed,ld)
+
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\)$$/\#define SZ\2 0x\1/p'
+
+quiet_cmd_sizes = GEN $@
+      cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
+
+$(obj)/sizes.h: vmlinux
+	$(call if_changed,sizes)
+
+AFLAGS_head.o += -I$(objtree)/$(obj) -DBOOTLOADER
+$(obj)/head.o: $(obj)/sizes.h
+
+CFLAGS_misc.o += -I$(objtree)/$(obj)
+$(obj)/misc.o: $(obj)/sizes.h
+
+$(obj)/firmware.o: $(obj)/firmware.c
+$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c
+	$(call cmd,shipped)
+
+AFLAGS_real2.o += -DBOOTLOADER
+$(obj)/real2.o: $(obj)/real2.S
+$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S
+	$(call cmd,shipped)
+
+$(obj)/misc.o: $(obj)/sizes.h
+
+CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
+$(obj)/vmlinux.lds: $(obj)/sizes.h
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
+	$(call if_changed,objcopy)
+
+vmlinux.bin.all-y := $(obj)/vmlinux.bin
+
+suffix-$(CONFIG_KERNEL_GZIP)  := gz
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4)  := lz4
+suffix-$(CONFIG_KERNEL_LZMA)  := lzma
+suffix-$(CONFIG_KERNEL_LZO)  := lzo
+suffix-$(CONFIG_KERNEL_XZ)  := xz
+
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+	$(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+	$(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+	$(call if_changed,lz4)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+	$(call if_changed,lzo)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+	$(call if_changed,xzkern)
+
+LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+	$(call if_changed,ld)
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/head.S b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/head.S
new file mode 100644
index 0000000..e8b798f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/head.S
@@ -0,0 +1,85 @@
+/*
+ * Startup glue code to uncompress the kernel
+ *
+ *   (C) 2017 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/psw.h>
+#include <asm/pdc.h>
+#include <asm/assembly.h>
+#include "sizes.h"
+
+#define BOOTADDR(x)	(x)
+
+#ifndef CONFIG_64BIT
+	.import	$global$		/* forward declaration */
+#endif /*!CONFIG_64BIT*/
+
+	__HEAD
+
+ENTRY(startup)
+	 .level PA_ASM_LEVEL
+
+#define PSW_W_SM	0x200
+#define PSW_W_BIT       36
+
+	;! nuke the W bit, saving original value
+	.level 2.0
+	rsm	PSW_W_SM, %r1
+
+	.level 1.1
+	extrw,u	%r1, PSW_W_BIT-32, 1, %r1
+	copy	%r1, %arg0
+
+	/* Make sure sr4-sr7 are set to zero for the kernel address space */
+	mtsp    %r0,%sr4
+	mtsp    %r0,%sr5
+	mtsp    %r0,%sr6
+	mtsp    %r0,%sr7
+
+	/* Clear BSS */
+
+	.import _bss,data
+	.import _ebss,data
+
+	load32	BOOTADDR(_bss),%r3
+	load32	BOOTADDR(_ebss),%r4
+	ldo	FRAME_SIZE(%r4),%sp	/* stack at end of bss */
+$bss_loop:
+	cmpb,<<,n %r3,%r4,$bss_loop
+	stw,ma	%r0,4(%r3)
+
+	/* Initialize the global data pointer */
+	loadgp
+
+	/* arg0..arg4 were set by palo. */
+	copy	%arg1, %r6		/* command line */
+	copy	%arg2, %r7		/* rd-start */
+	copy	%arg3, %r8		/* rd-end */
+	load32	BOOTADDR(decompress_kernel),%r3
+
+#ifdef CONFIG_64BIT
+	.level PA_ASM_LEVEL
+	ssm	PSW_W_SM, %r0		/* set W-bit */
+	depdi	0, 31, 32, %r3
+#endif
+	load32	BOOTADDR(startup_continue), %r2
+	bv,n	0(%r3)
+
+startup_continue:
+#ifdef CONFIG_64BIT
+	.level PA_ASM_LEVEL
+	rsm	PSW_W_SM, %r0		/* clear W-bit */
+#endif
+
+	load32	KERNEL_BINARY_TEXT_START, %arg0 /* free mem */
+	copy	%r6, %arg1		/* command line */
+	copy	%r7, %arg2		/* rd-start */
+	copy	%r8, %arg3		/* rd-end */
+
+	bv,n	0(%ret0)
+END(startup)
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/misc.c b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/misc.c
new file mode 100644
index 0000000..f57118e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/misc.c
@@ -0,0 +1,302 @@
+/*
+ * Definitions and wrapper functions for kernel decompressor
+ *
+ *   (C) 2017 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include "sizes.h"
+
+/*
+ * gzip declarations
+ */
+#define STATIC static
+
+#undef memmove
+#define memmove memmove
+#define memzero(s, n) memset((s), 0, (n))
+
+#define malloc	malloc_gzip
+#define free	free_gzip
+
+/* Symbols defined by linker scripts */
+extern char input_data[];
+extern int input_len;
+/* output_len is inserted by the linker possibly at an unaligned address */
+extern __le32 output_len __aligned(1);
+extern char _text, _end;
+extern char _bss, _ebss;
+extern char _startcode_end;
+extern void startup_continue(void *entry, unsigned long cmdline,
+	unsigned long rd_start, unsigned long rd_end) __noreturn;
+
+void error(char *m) __noreturn;
+
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+	const char *s = src;
+	char *d = dest;
+
+	if (d <= s) {
+		while (n--)
+			*d++ = *s++;
+	} else {
+		d += n;
+		s += n;
+		while (n--)
+			*--d = *--s;
+	}
+	return dest;
+}
+
+void *memset(void *s, int c, size_t count)
+{
+	char *xs = (char *)s;
+
+	while (count--)
+		*xs++ = c;
+	return s;
+}
+
+void *memcpy(void *d, const void *s, size_t len)
+{
+	char *dest = (char *)d;
+	const char *source = (const char *)s;
+
+	while (len--)
+		*dest++ = *source++;
+	return d;
+}
+
+size_t strlen(const char *s)
+{
+	const char *sc;
+
+	for (sc = s; *sc != '\0'; ++sc)
+		;
+	return sc - s;
+}
+
+char *strchr(const char *s, int c)
+{
+	while (*s) {
+		if (*s == (char)c)
+			return (char *)s;
+		++s;
+	}
+	return NULL;
+}
+
+int puts(const char *s)
+{
+	const char *nuline = s;
+
+	while ((nuline = strchr(s, '\n')) != NULL) {
+		if (nuline != s)
+			pdc_iodc_print(s, nuline - s);
+		pdc_iodc_print("\r\n", 2);
+		s = nuline + 1;
+	}
+	if (*s != '\0')
+		pdc_iodc_print(s, strlen(s));
+
+	return 0;
+}
+
+static int putchar(int c)
+{
+	char buf[2];
+
+	buf[0] = c;
+	buf[1] = '\0';
+	puts(buf);
+	return c;
+}
+
+void __noreturn error(char *x)
+{
+	puts("\n\n");
+	puts(x);
+	puts("\n\n -- System halted");
+	while (1)	/* wait forever */
+		;
+}
+
+static int print_hex(unsigned long num)
+{
+	const char hex[] = "0123456789abcdef";
+	char str[40];
+	int i = sizeof(str)-1;
+
+	str[i--] = '\0';
+	do {
+		str[i--] = hex[num & 0x0f];
+		num >>= 4;
+	} while (num);
+
+	str[i--] = 'x';
+	str[i] = '0';
+	puts(&str[i]);
+
+	return 0;
+}
+
+int printf(const char *fmt, ...)
+{
+	va_list args;
+	int i = 0;
+
+	va_start(args, fmt);
+
+	while (fmt[i]) {
+		if (fmt[i] != '%') {
+put:
+			putchar(fmt[i++]);
+			continue;
+		}
+
+		if (fmt[++i] == '%')
+			goto put;
+		++i;
+		print_hex(va_arg(args, unsigned long));
+	}
+
+	va_end(args);
+	return 0;
+}
+
+/* helper functions for libgcc */
+void abort(void)
+{
+	error("aborted.");
+}
+
+#undef malloc
+void *malloc(size_t size)
+{
+	return malloc_gzip(size);
+}
+
+#undef free
+void free(void *ptr)
+{
+	return free_gzip(ptr);
+}
+
+
+static void flush_data_cache(char *start, unsigned long length)
+{
+	char *end = start + length;
+
+	do {
+		asm volatile("fdc 0(%0)" : : "r" (start));
+		asm volatile("fic 0(%%sr0,%0)" : : "r" (start));
+		start += 16;
+	} while (start < end);
+	asm volatile("fdc 0(%0)" : : "r" (end));
+
+	asm ("sync");
+}
+
+unsigned long decompress_kernel(unsigned int started_wide,
+		unsigned int command_line,
+		const unsigned int rd_start,
+		const unsigned int rd_end)
+{
+	char *output;
+	unsigned long len, len_all;
+
+#ifdef CONFIG_64BIT
+	parisc_narrow_firmware = 0;
+#endif
+
+	set_firmware_width_unlocked();
+
+	putchar('U');	/* if you get this p and no more, string storage */
+			/* in $GLOBAL$ is wrong or %dp is wrong */
+	puts("ncompressing ...\n");
+
+	output = (char *) KERNEL_BINARY_TEXT_START;
+	len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start);
+
+	if ((unsigned long) &_startcode_end > (unsigned long) output)
+		error("Bootcode overlaps kernel code");
+
+	len = get_unaligned_le32(&output_len);
+	if (len > len_all)
+		error("Output len too big.");
+	else
+		memset(&output[len], 0, len_all - len);
+
+	/*
+	 * Initialize free_mem_ptr and free_mem_end_ptr.
+	 */
+	free_mem_ptr = (unsigned long) &_ebss;
+	free_mem_ptr += 2*1024*1024;	/* leave 2 MB for stack */
+
+	/* Limit memory for bootoader to 1GB */
+	#define ARTIFICIAL_LIMIT (1*1024*1024*1024)
+	free_mem_end_ptr = PAGE0->imm_max_mem;
+	if (free_mem_end_ptr > ARTIFICIAL_LIMIT)
+		free_mem_end_ptr = ARTIFICIAL_LIMIT;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	/* if we have ramdisk this is at end of memory */
+	if (rd_start && rd_start < free_mem_end_ptr)
+		free_mem_end_ptr = rd_start;
+#endif
+
+#ifdef DEBUG
+	printf("startcode_end = %x\n", &_startcode_end);
+	printf("commandline   = %x\n", command_line);
+	printf("rd_start      = %x\n", rd_start);
+	printf("rd_end        = %x\n", rd_end);
+
+	printf("free_ptr      = %x\n", free_mem_ptr);
+	printf("free_ptr_end  = %x\n", free_mem_end_ptr);
+
+	printf("input_data    = %x\n", input_data);
+	printf("input_len     = %x\n", input_len);
+	printf("output        = %x\n", output);
+	printf("output_len    = %x\n", len);
+	printf("output_max    = %x\n", len_all);
+#endif
+
+	__decompress(input_data, input_len, NULL, NULL,
+			output, 0, NULL, error);
+
+	flush_data_cache(output, len);
+
+	printf("Booting kernel ...\n\n");
+
+	return (unsigned long) output;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.lds.S b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.lds.S
new file mode 100644
index 0000000..b658f77
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -0,0 +1,101 @@
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include "sizes.h"
+
+#ifndef CONFIG_64BIT
+OUTPUT_FORMAT("elf32-hppa-linux")
+OUTPUT_ARCH(hppa)
+#else
+OUTPUT_FORMAT("elf64-hppa-linux")
+OUTPUT_ARCH(hppa:hppa2.0w)
+#endif
+
+ENTRY(startup)
+
+SECTIONS
+{
+	/* palo loads at 0x60000 */
+	/* loaded kernel will move to 0x10000 */
+	. = 0xe0000;    /* should not overwrite palo code */
+
+	.head.text : {
+		_head = . ;
+		HEAD_TEXT
+		_ehead = . ;
+	}
+
+	/* keep __gp below 0x1000000 */
+#ifdef CONFIG_64BIT
+	. = ALIGN(16);
+	/* Linkage tables */
+	.opd : {
+		*(.opd)
+	} PROVIDE (__gp = .);
+	.plt : {
+		*(.plt)
+	}
+	.dlt : {
+		*(.dlt)
+	}
+#endif
+	_startcode_end = .;
+
+	/* bootloader code and data starts at least behind area of extracted kernel */
+	. = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
+
+	/* align on next page boundary */
+	. = ALIGN(4096);
+	.text :	{
+		_text = .;	/* Text */
+		*(.text)
+		*(.text.*)
+		_etext = . ;
+	}
+	. = ALIGN(8);
+	.data :	{
+		_data = . ;
+		*(.data)
+		*(.data.*)
+		_edata = . ;
+	}
+	. = ALIGN(8);
+	.rodata : {
+		_rodata = . ;
+		*(.rodata)	 /* read-only data */
+		*(.rodata.*)
+		_erodata = . ;
+	}
+	. = ALIGN(8);
+	.rodata.compressed : {
+		*(.rodata.compressed)
+	}
+	. = ALIGN(8);
+	.bss : {
+		_bss = . ;
+		*(.bss)
+		*(.bss.*)
+		*(COMMON)
+		. = ALIGN(4096);
+		_ebss = .;
+	}
+
+	STABS_DEBUG
+	.note 0 : { *(.note) }
+
+	/* Sections to be discarded */
+	DISCARDS
+	/DISCARD/ : {
+#ifdef CONFIG_64BIT
+		/* temporary hack until binutils is fixed to not emit these
+		 * for static binaries
+		 */
+		*(.PARISC.unwind)	/* no unwind data */
+		*(.interp)
+		*(.dynsym)
+		*(.dynstr)
+		*(.dynamic)
+		*(.hash)
+		*(.gnu.hash)
+#endif
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.scr b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.scr
new file mode 100644
index 0000000..dac2d14
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/compressed/vmlinux.scr
@@ -0,0 +1,10 @@
+SECTIONS
+{
+  .rodata.compressed : {
+	input_len = .;
+	LONG(input_data_end - input_data) input_data = .;
+	*(.data)
+	output_len = . - 4; /* can be at unaligned address */
+	input_data_end = .;
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/boot/install.sh b/src/kernel/linux/v4.14/arch/parisc/boot/install.sh
new file mode 100644
index 0000000..8f7c365
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/boot/install.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+#
+# arch/parisc/install.sh, derived from arch/i386/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for i386 architecture
+#
+# Arguments:
+#   $1 - kernel version
+#   $2 - kernel image file
+#   $3 - kernel map file
+#   $4 - default install path (blank if root directory)
+#
+
+verify () {
+	if [ ! -f "$1" ]; then
+		echo ""                                                   1>&2
+		echo " *** Missing file: $1"                              1>&2
+		echo ' *** You need to run "make" before "make install".' 1>&2
+		echo ""                                                   1>&2
+		exit 1
+	fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+
+if [ -n "${INSTALLKERNEL}" ]; then
+  if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
+
+# Default install
+
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/712_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/712_defconfig
new file mode 100644
index 0000000..ccc1097
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/712_defconfig
@@ -0,0 +1,182 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PA7100LC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_GSC_LASI=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_LLC2=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_ATA_OVER_ETH=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_LASI_82596=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_MOUSE_SERIAL=m
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_PDC_CONSOLE=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_HARMONY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/a500_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/a500_defconfig
new file mode 100644
index 0000000..5acb93d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/a500_defconfig
@@ -0,0 +1,177 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PA8X00=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+# CONFIG_GSC is not set
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+# CONFIG_PCMCIA_LOAD_CIS is not set
+CONFIG_YENTA=m
+CONFIG_PD6729=m
+CONFIG_I82092=m
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_LLC2=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_QLOGIC_1280=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_CTL=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
+CONFIG_VORTEX=m
+CONFIG_TYPHOON=m
+CONFIG_ACENIC=m
+CONFIG_ACENIC_OMIT_TIGON_I=y
+CONFIG_PCNET32=m
+CONFIG_TIGON3=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=y
+CONFIG_TULIP_MMIO=y
+CONFIG_PCMCIA_XIRCOM=m
+CONFIG_HP100=m
+CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_PCMCIA_SMC91C92=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PDC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_JFS_FS=m
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_HW is not set
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/b180_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/b180_defconfig
new file mode 100644
index 0000000..83ffd16
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/b180_defconfig
@@ -0,0 +1,97 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_PA7100LC=y
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_ISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_NCR53C8XX_SYNC=40
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_LASI_82596=y
+CONFIG_PPP=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_HARMONY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_SECURITY=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/c3000_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/c3000_defconfig
new file mode 100644
index 0000000..8d41a73
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/c3000_defconfig
@@ -0,0 +1,151 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PA8X00=y
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_GSC is not set
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_PDC_CHASSIS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_DIAG is not set
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NETFILTER=y
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_DEBUG=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_CTL=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_ACENIC=m
+CONFIG_TIGON3=m
+CONFIG_NET_TULIP=y
+CONFIG_DE2104X=m
+CONFIG_TULIP=y
+CONFIG_TULIP_MMIO=y
+CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_SERIO=m
+CONFIG_SERIO_LIBPS2=m
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_AD1889=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_PRINTER=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+CONFIG_USB_LEGOTOWER=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_XFS_FS=m
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_HW is not set
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/c8000_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 0000000..088ab94
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,237 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PA8X00=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_PDC_CHASSIS_WARN is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_TIPC=m
+CONFIG_LLC2=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_WCACHE=y
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=y
+CONFIG_E1000=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_MISC=y
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_GSCPS2=m
+# CONFIG_HP_SDC is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_SERIAL_JSM=m
+CONFIG_PRINTER=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_STI is not set
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_AD1889=m
+# CONFIG_SND_USB is not set
+# CONFIG_SND_GSC is not set
+CONFIG_USB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DEBUG_SLAB_LEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+CONFIG_LATENCYTOP=y
+CONFIG_KEYS=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/default_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/default_defconfig
new file mode 100644
index 0000000..52c9050
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/default_defconfig
@@ -0,0 +1,206 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_PD6729=y
+CONFIG_I82092=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_DIAG=m
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_LLC2=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_PCMCIA=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_ACENIC=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_LASI_82596=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_MOUSE_SERIAL=y
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=y
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_AD1889=y
+CONFIG_SND_HARMONY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/generic-32bit_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/generic-32bit_defconfig
new file mode 100644
index 0000000..37ae4b5
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/generic-32bit_defconfig
@@ -0,0 +1,307 @@
+CONFIG_LOCALVERSION="-32bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_DIAG=m
+CONFIG_LLC2=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_KEYBOARD_HIL_OLD=m
+CONFIG_KEYBOARD_HIL=m
+CONFIG_MOUSE_SERIAL=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_VOODOO1=m
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_AD1889=m
+CONFIG_SND_HARMONY=m
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_DMADEVICES=y
+CONFIG_AUXDISPLAY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+# CONFIG_NFS_V2 is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
+CONFIG_KEYS=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/configs/generic-64bit_defconfig b/src/kernel/linux/v4.14/arch/parisc/configs/generic-64bit_defconfig
new file mode 100644
index 0000000..d39e7f8
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/configs/generic-64bit_defconfig
@@ -0,0 +1,294 @@
+CONFIG_LOCALVERSION="-64bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CPUSETS=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PA8X00=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+# CONFIG_COMPACTION is not set
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_DCB=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_IDE=y
+CONFIG_IDE_GD=m
+CONFIG_IDE_GD_ATAPI=y
+CONFIG_BLK_DEV_IDECD=m
+CONFIG_BLK_DEV_NS87415=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_RAID=m
+CONFIG_DM_UEVENT=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+CONFIG_HP100=m
+CONFIG_E1000=y
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MDIO_BITBANG=m
+CONFIG_PHYLIB=y
+CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_STE10XP=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_MISC=y
+CONFIG_SERIO_SERPORT=m
+# CONFIG_HP_SDC is not set
+CONFIG_SERIO_RAW=m
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_NOZOMI=m
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_JSM=m
+CONFIG_IPMI_HANDLER=y
+CONFIG_IPMI_DEVICE_INTERFACE=y
+CONFIG_IPMI_SI=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_TCG_TPM=m
+CONFIG_TCG_ATMEL=m
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_HTC_PASIC3=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_SM501=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+CONFIG_HIDRAW=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_STAGING=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_SYSV_FS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFS_V4_1=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_LIBCRC32C=y
diff --git a/src/kernel/linux/v4.14/arch/parisc/defpalo.conf b/src/kernel/linux/v4.14/arch/parisc/defpalo.conf
new file mode 100644
index 0000000..208ff3b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/defpalo.conf
@@ -0,0 +1,21 @@
+# This a generic Palo configuration file.  For more information about how
+# it works try 'palo -?'.
+#
+# Most people using 'make palo' want a bootable file, usable for
+# network or tape booting for example.
+--init-tape=lifimage
+--recoverykernel=vmlinuz
+
+########## Pick your ROOT here! ##########
+# You need at least one 'root='!
+#
+# If you want a root ramdisk, use the next 2 lines
+#   (Edit the ramdisk image name!!!!)
+--ramdisk=ram-disk-image-file
+--commandline=0/vmlinuz HOME=/ root=/dev/ram initrd=0/ramdisk panic_timeout=60 panic=-1
+
+# If you want NFS root, use the following command line (Edit the HOSTNAME!!!)
+#--commandline=0/vmlinuz HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp
+
+# If you have root on a disk partition, use this (Edit the partition name!!!)
+#--commandline=0/vmlinuz HOME=/ root=/dev/sda1
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/Kbuild b/src/kernel/linux/v4.14/arch/parisc/include/asm/Kbuild
new file mode 100644
index 0000000..a411395
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/Kbuild
@@ -0,0 +1,26 @@
+generic-y += barrier.h
+generic-y += clkdev.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += hw_irq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kprobes.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += seccomp.h
+generic-y += segment.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/agp.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/agp.h
new file mode 100644
index 0000000..cb04470
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/agp.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_AGP_H
+#define _ASM_PARISC_AGP_H
+
+/*
+ * PARISC specific AGP definitions.
+ * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ *
+ */
+
+#define map_page_into_agp(page)		/* nothing */
+#define unmap_page_from_agp(page)	/* nothing */
+#define flush_agp_cache()		mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order)		\
+	((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order)	\
+	free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_PARISC_AGP_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/asm-offsets.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/asmregs.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/asmregs.h
new file mode 100644
index 0000000..d93c646
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/asmregs.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2, or (at your option)
+ *	any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the Free Software
+ *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PARISC_ASMREGS_H
+#define _PARISC_ASMREGS_H
+
+;! General Registers
+
+rp:	.reg	%r2
+arg3:	.reg	%r23
+arg2:	.reg	%r24
+arg1:	.reg	%r25
+arg0:	.reg	%r26
+dp:	.reg	%r27
+ret0:	.reg	%r28
+ret1:	.reg	%r29
+sl:	.reg	%r29
+sp:	.reg	%r30
+
+#if 0
+/* PA20_REVISIT */
+arg7:	.reg	r19
+arg6:	.reg	r20
+arg5:	.reg	r21
+arg4:	.reg	r22
+gp:	.reg	r27
+ap:	.reg	r29
+#endif
+
+
+r0:	.reg	%r0
+r1:	.reg	%r1
+r2:	.reg	%r2
+r3:	.reg	%r3
+r4:	.reg	%r4
+r5:	.reg	%r5
+r6:	.reg	%r6
+r7:	.reg	%r7
+r8:	.reg	%r8
+r9:	.reg	%r9
+r10:	.reg	%r10
+r11:	.reg	%r11
+r12:	.reg	%r12
+r13:	.reg	%r13
+r14:	.reg	%r14
+r15:	.reg	%r15
+r16:	.reg	%r16
+r17:	.reg	%r17
+r18:	.reg	%r18
+r19:	.reg	%r19
+r20:	.reg	%r20
+r21:	.reg	%r21
+r22:	.reg	%r22
+r23:	.reg	%r23
+r24:	.reg	%r24
+r25:	.reg	%r25
+r26:	.reg	%r26
+r27:	.reg	%r27
+r28:	.reg	%r28
+r29:	.reg	%r29
+r30:	.reg	%r30
+r31:	.reg	%r31
+
+
+;! Space Registers
+
+sr0:	.reg	%sr0
+sr1:	.reg	%sr1
+sr2:	.reg	%sr2
+sr3:	.reg	%sr3
+sr4:	.reg	%sr4
+sr5:	.reg	%sr5
+sr6:	.reg	%sr6
+sr7:	.reg	%sr7
+
+
+;! Floating Point Registers
+
+fr0:	.reg	%fr0
+fr1:	.reg	%fr1
+fr2:	.reg	%fr2
+fr3:	.reg	%fr3
+fr4:	.reg	%fr4
+fr5:	.reg	%fr5
+fr6:	.reg	%fr6
+fr7:	.reg	%fr7
+fr8:	.reg	%fr8
+fr9:	.reg	%fr9
+fr10:	.reg	%fr10
+fr11:	.reg	%fr11
+fr12:	.reg	%fr12
+fr13:	.reg	%fr13
+fr14:	.reg	%fr14
+fr15:	.reg	%fr15
+fr16:	.reg	%fr16
+fr17:	.reg	%fr17
+fr18:	.reg	%fr18
+fr19:	.reg	%fr19
+fr20:	.reg	%fr20
+fr21:	.reg	%fr21
+fr22:	.reg	%fr22
+fr23:	.reg	%fr23
+fr24:	.reg	%fr24
+fr25:	.reg	%fr25
+fr26:	.reg	%fr26
+fr27:	.reg	%fr27
+fr28:	.reg	%fr28
+fr29:	.reg	%fr29
+fr30:	.reg	%fr30
+fr31:	.reg	%fr31
+
+
+;! Control Registers
+
+rctr:	.reg	%cr0
+pidr1:	.reg	%cr8
+pidr2:	.reg	%cr9
+ccr:	.reg	%cr10
+sar:	.reg	%cr11
+pidr3:	.reg	%cr12
+pidr4:	.reg	%cr13
+iva:	.reg	%cr14
+eiem:	.reg	%cr15
+itmr:	.reg	%cr16
+pcsq:	.reg	%cr17
+pcoq:	.reg	%cr18
+iir:	.reg	%cr19
+isr:	.reg	%cr20
+ior:	.reg	%cr21
+ipsw:	.reg	%cr22
+eirr:	.reg	%cr23
+tr0:	.reg	%cr24
+tr1:	.reg	%cr25
+tr2:	.reg	%cr26
+tr3:	.reg	%cr27
+tr4:	.reg	%cr28
+tr5:	.reg	%cr29
+tr6:	.reg	%cr30
+tr7:	.reg	%cr31
+
+
+cr0:	.reg	%cr0
+cr8:	.reg	%cr8
+cr9:	.reg	%cr9
+cr10:	.reg	%cr10
+cr11:	.reg	%cr11
+cr12:	.reg	%cr12
+cr13:	.reg	%cr13
+cr14:	.reg	%cr14
+cr15:	.reg	%cr15
+cr16:	.reg	%cr16
+cr17:	.reg	%cr17
+cr18:	.reg	%cr18
+cr19:	.reg	%cr19
+cr20:	.reg	%cr20
+cr21:	.reg	%cr21
+cr22:	.reg	%cr22
+cr23:	.reg	%cr23
+cr24:	.reg	%cr24
+cr25:	.reg	%cr25
+cr26:	.reg	%cr26
+cr27:	.reg	%cr27
+cr28:	.reg	%cr28
+cr29:	.reg	%cr29
+cr30:	.reg	%cr30
+cr31:	.reg	%cr31
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/assembly.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/assembly.h
new file mode 100644
index 0000000..eb83d65
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/assembly.h
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 1999 SuSE GmbH
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PARISC_ASSEMBLY_H
+#define _PARISC_ASSEMBLY_H
+
+#define CALLEE_FLOAT_FRAME_SIZE	80
+
+#ifdef CONFIG_64BIT
+#define LDREG	ldd
+#define STREG	std
+#define LDREGX  ldd,s
+#define LDREGM	ldd,mb
+#define STREGM	std,ma
+#define SHRREG	shrd
+#define SHLREG	shld
+#define ANDCM   andcm,*
+#define	COND(x)	* ## x
+#define RP_OFFSET	16
+#define FRAME_SIZE	128
+#define CALLEE_REG_FRAME_SIZE	144
+#define ASM_ULONG_INSN	.dword
+#else	/* CONFIG_64BIT */
+#define LDREG	ldw
+#define STREG	stw
+#define LDREGX  ldwx,s
+#define LDREGM	ldwm
+#define STREGM	stwm
+#define SHRREG	shr
+#define SHLREG	shlw
+#define ANDCM   andcm
+#define COND(x)	x
+#define RP_OFFSET	20
+#define FRAME_SIZE	64
+#define CALLEE_REG_FRAME_SIZE	128
+#define ASM_ULONG_INSN	.word
+#endif
+
+#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
+
+#ifdef CONFIG_PA20
+#define LDCW		ldcw,co
+#define BL		b,l
+# ifdef CONFIG_64BIT
+#  define PA_ASM_LEVEL	2.0w
+# else
+#  define PA_ASM_LEVEL	2.0
+# endif
+#else
+#define LDCW		ldcw
+#define BL		bl
+#define PA_ASM_LEVEL	1.1
+#endif
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_64BIT
+/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
+ * work around that for now... */
+	.level 2.0w
+#endif
+
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/types.h>
+
+#include <asm/asmregs.h>
+
+	sp	=	30
+	gp	=	27
+	ipsw	=	22
+
+	/*
+	 * We provide two versions of each macro to convert from physical
+	 * to virtual and vice versa. The "_r1" versions take one argument
+	 * register, but trashes r1 to do the conversion. The other
+	 * version takes two arguments: a src and destination register.
+	 * However, the source and destination registers can not be
+	 * the same register.
+	 */
+
+	.macro  tophys  grvirt, grphys
+	ldil    L%(__PAGE_OFFSET), \grphys
+	sub     \grvirt, \grphys, \grphys
+	.endm
+	
+	.macro  tovirt  grphys, grvirt
+	ldil    L%(__PAGE_OFFSET), \grvirt
+	add     \grphys, \grvirt, \grvirt
+	.endm
+
+	.macro  tophys_r1  gr
+	ldil    L%(__PAGE_OFFSET), %r1
+	sub     \gr, %r1, \gr
+	.endm
+	
+	.macro  tovirt_r1  gr
+	ldil    L%(__PAGE_OFFSET), %r1
+	add     \gr, %r1, \gr
+	.endm
+
+	.macro delay value
+	ldil	L%\value, 1
+	ldo	R%\value(1), 1
+	addib,UV,n -1,1,.
+	addib,NUV,n -1,1,.+8
+	nop
+	.endm
+
+	.macro	debug value
+	.endm
+
+
+	/* Shift Left - note the r and t can NOT be the same! */
+	.macro shl r, sa, t
+	dep,z	\r, 31-(\sa), 32-(\sa), \t
+	.endm
+
+	/* The PA 2.0 shift left */
+	.macro shlw r, sa, t
+	depw,z	\r, 31-(\sa), 32-(\sa), \t
+	.endm
+
+	/* And the PA 2.0W shift left */
+	.macro shld r, sa, t
+	depd,z	\r, 63-(\sa), 64-(\sa), \t
+	.endm
+
+	/* Shift Right - note the r and t can NOT be the same! */
+	.macro shr r, sa, t
+	extru \r, 31-(\sa), 32-(\sa), \t
+	.endm
+
+	/* pa20w version of shift right */
+	.macro shrd r, sa, t
+	extrd,u \r, 63-(\sa), 64-(\sa), \t
+	.endm
+
+	/* load 32-bit 'value' into 'reg' compensating for the ldil
+	 * sign-extension when running in wide mode.
+	 * WARNING!! neither 'value' nor 'reg' can be expressions
+	 * containing '.'!!!! */
+	.macro	load32 value, reg
+	ldil	L%\value, \reg
+	ldo	R%\value(\reg), \reg
+	.endm
+
+	.macro loadgp
+#ifdef CONFIG_64BIT
+	ldil		L%__gp, %r27
+	ldo		R%__gp(%r27), %r27
+#else
+	ldil		L%$global$, %r27
+	ldo		R%$global$(%r27), %r27
+#endif
+	.endm
+
+#define SAVE_SP(r, where) mfsp r, %r1 ! STREG %r1, where
+#define REST_SP(r, where) LDREG where, %r1 ! mtsp %r1, r
+#define SAVE_CR(r, where) mfctl r, %r1 ! STREG %r1, where
+#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
+
+	.macro	save_general	regs
+	STREG %r1, PT_GR1 (\regs)
+	STREG %r2, PT_GR2 (\regs)
+	STREG %r3, PT_GR3 (\regs)
+	STREG %r4, PT_GR4 (\regs)
+	STREG %r5, PT_GR5 (\regs)
+	STREG %r6, PT_GR6 (\regs)
+	STREG %r7, PT_GR7 (\regs)
+	STREG %r8, PT_GR8 (\regs)
+	STREG %r9, PT_GR9 (\regs)
+	STREG %r10, PT_GR10(\regs)
+	STREG %r11, PT_GR11(\regs)
+	STREG %r12, PT_GR12(\regs)
+	STREG %r13, PT_GR13(\regs)
+	STREG %r14, PT_GR14(\regs)
+	STREG %r15, PT_GR15(\regs)
+	STREG %r16, PT_GR16(\regs)
+	STREG %r17, PT_GR17(\regs)
+	STREG %r18, PT_GR18(\regs)
+	STREG %r19, PT_GR19(\regs)
+	STREG %r20, PT_GR20(\regs)
+	STREG %r21, PT_GR21(\regs)
+	STREG %r22, PT_GR22(\regs)
+	STREG %r23, PT_GR23(\regs)
+	STREG %r24, PT_GR24(\regs)
+	STREG %r25, PT_GR25(\regs)
+	/* r26 is saved in get_stack and used to preserve a value across virt_map */
+	STREG %r27, PT_GR27(\regs)
+	STREG %r28, PT_GR28(\regs)
+	/* r29 is saved in get_stack and used to point to saved registers */
+	/* r30 stack pointer saved in get_stack */
+	STREG %r31, PT_GR31(\regs)
+	.endm
+
+	.macro	rest_general	regs
+	/* r1 used as a temp in rest_stack and is restored there */
+	LDREG PT_GR2 (\regs), %r2
+	LDREG PT_GR3 (\regs), %r3
+	LDREG PT_GR4 (\regs), %r4
+	LDREG PT_GR5 (\regs), %r5
+	LDREG PT_GR6 (\regs), %r6
+	LDREG PT_GR7 (\regs), %r7
+	LDREG PT_GR8 (\regs), %r8
+	LDREG PT_GR9 (\regs), %r9
+	LDREG PT_GR10(\regs), %r10
+	LDREG PT_GR11(\regs), %r11
+	LDREG PT_GR12(\regs), %r12
+	LDREG PT_GR13(\regs), %r13
+	LDREG PT_GR14(\regs), %r14
+	LDREG PT_GR15(\regs), %r15
+	LDREG PT_GR16(\regs), %r16
+	LDREG PT_GR17(\regs), %r17
+	LDREG PT_GR18(\regs), %r18
+	LDREG PT_GR19(\regs), %r19
+	LDREG PT_GR20(\regs), %r20
+	LDREG PT_GR21(\regs), %r21
+	LDREG PT_GR22(\regs), %r22
+	LDREG PT_GR23(\regs), %r23
+	LDREG PT_GR24(\regs), %r24
+	LDREG PT_GR25(\regs), %r25
+	LDREG PT_GR26(\regs), %r26
+	LDREG PT_GR27(\regs), %r27
+	LDREG PT_GR28(\regs), %r28
+	/* r29 points to register save area, and is restored in rest_stack */
+	/* r30 stack pointer restored in rest_stack */
+	LDREG PT_GR31(\regs), %r31
+	.endm
+
+	.macro	save_fp 	regs
+	fstd,ma  %fr0, 8(\regs)
+	fstd,ma	 %fr1, 8(\regs)
+	fstd,ma	 %fr2, 8(\regs)
+	fstd,ma	 %fr3, 8(\regs)
+	fstd,ma	 %fr4, 8(\regs)
+	fstd,ma	 %fr5, 8(\regs)
+	fstd,ma	 %fr6, 8(\regs)
+	fstd,ma	 %fr7, 8(\regs)
+	fstd,ma	 %fr8, 8(\regs)
+	fstd,ma	 %fr9, 8(\regs)
+	fstd,ma	%fr10, 8(\regs)
+	fstd,ma	%fr11, 8(\regs)
+	fstd,ma	%fr12, 8(\regs)
+	fstd,ma	%fr13, 8(\regs)
+	fstd,ma	%fr14, 8(\regs)
+	fstd,ma	%fr15, 8(\regs)
+	fstd,ma	%fr16, 8(\regs)
+	fstd,ma	%fr17, 8(\regs)
+	fstd,ma	%fr18, 8(\regs)
+	fstd,ma	%fr19, 8(\regs)
+	fstd,ma	%fr20, 8(\regs)
+	fstd,ma	%fr21, 8(\regs)
+	fstd,ma	%fr22, 8(\regs)
+	fstd,ma	%fr23, 8(\regs)
+	fstd,ma	%fr24, 8(\regs)
+	fstd,ma	%fr25, 8(\regs)
+	fstd,ma	%fr26, 8(\regs)
+	fstd,ma	%fr27, 8(\regs)
+	fstd,ma	%fr28, 8(\regs)
+	fstd,ma	%fr29, 8(\regs)
+	fstd,ma	%fr30, 8(\regs)
+	fstd	%fr31, 0(\regs)
+	.endm
+
+	.macro	rest_fp 	regs
+	fldd	0(\regs),	 %fr31
+	fldd,mb	-8(\regs),       %fr30
+	fldd,mb	-8(\regs),       %fr29
+	fldd,mb	-8(\regs),       %fr28
+	fldd,mb	-8(\regs),       %fr27
+	fldd,mb	-8(\regs),       %fr26
+	fldd,mb	-8(\regs),       %fr25
+	fldd,mb	-8(\regs),       %fr24
+	fldd,mb	-8(\regs),       %fr23
+	fldd,mb	-8(\regs),       %fr22
+	fldd,mb	-8(\regs),       %fr21
+	fldd,mb	-8(\regs),       %fr20
+	fldd,mb	-8(\regs),       %fr19
+	fldd,mb	-8(\regs),       %fr18
+	fldd,mb	-8(\regs),       %fr17
+	fldd,mb	-8(\regs),       %fr16
+	fldd,mb	-8(\regs),       %fr15
+	fldd,mb	-8(\regs),       %fr14
+	fldd,mb	-8(\regs),       %fr13
+	fldd,mb	-8(\regs),       %fr12
+	fldd,mb	-8(\regs),       %fr11
+	fldd,mb	-8(\regs),       %fr10
+	fldd,mb	-8(\regs),       %fr9
+	fldd,mb	-8(\regs),       %fr8
+	fldd,mb	-8(\regs),       %fr7
+	fldd,mb	-8(\regs),       %fr6
+	fldd,mb	-8(\regs),       %fr5
+	fldd,mb	-8(\regs),       %fr4
+	fldd,mb	-8(\regs),       %fr3
+	fldd,mb	-8(\regs),       %fr2
+	fldd,mb	-8(\regs),       %fr1
+	fldd,mb	-8(\regs),       %fr0
+	.endm
+
+	.macro	callee_save_float
+	fstd,ma	 %fr12,	8(%r30)
+	fstd,ma	 %fr13,	8(%r30)
+	fstd,ma	 %fr14,	8(%r30)
+	fstd,ma	 %fr15,	8(%r30)
+	fstd,ma	 %fr16,	8(%r30)
+	fstd,ma	 %fr17,	8(%r30)
+	fstd,ma	 %fr18,	8(%r30)
+	fstd,ma	 %fr19,	8(%r30)
+	fstd,ma	 %fr20,	8(%r30)
+	fstd,ma	 %fr21,	8(%r30)
+	.endm
+
+	.macro	callee_rest_float
+	fldd,mb	-8(%r30),   %fr21
+	fldd,mb	-8(%r30),   %fr20
+	fldd,mb	-8(%r30),   %fr19
+	fldd,mb	-8(%r30),   %fr18
+	fldd,mb	-8(%r30),   %fr17
+	fldd,mb	-8(%r30),   %fr16
+	fldd,mb	-8(%r30),   %fr15
+	fldd,mb	-8(%r30),   %fr14
+	fldd,mb	-8(%r30),   %fr13
+	fldd,mb	-8(%r30),   %fr12
+	.endm
+
+#ifdef CONFIG_64BIT
+	.macro	callee_save
+	std,ma	  %r3,	 CALLEE_REG_FRAME_SIZE(%r30)
+	mfctl	  %cr27, %r3
+	std	  %r4,	-136(%r30)
+	std	  %r5,	-128(%r30)
+	std	  %r6,	-120(%r30)
+	std	  %r7,	-112(%r30)
+	std	  %r8,	-104(%r30)
+	std	  %r9,	 -96(%r30)
+	std	 %r10,	 -88(%r30)
+	std	 %r11,	 -80(%r30)
+	std	 %r12,	 -72(%r30)
+	std	 %r13,	 -64(%r30)
+	std	 %r14,	 -56(%r30)
+	std	 %r15,	 -48(%r30)
+	std	 %r16,	 -40(%r30)
+	std	 %r17,	 -32(%r30)
+	std	 %r18,	 -24(%r30)
+	std	  %r3,	 -16(%r30)
+	.endm
+
+	.macro	callee_rest
+	ldd	 -16(%r30),    %r3
+	ldd	 -24(%r30),   %r18
+	ldd	 -32(%r30),   %r17
+	ldd	 -40(%r30),   %r16
+	ldd	 -48(%r30),   %r15
+	ldd	 -56(%r30),   %r14
+	ldd	 -64(%r30),   %r13
+	ldd	 -72(%r30),   %r12
+	ldd	 -80(%r30),   %r11
+	ldd	 -88(%r30),   %r10
+	ldd	 -96(%r30),    %r9
+	ldd	-104(%r30),    %r8
+	ldd	-112(%r30),    %r7
+	ldd	-120(%r30),    %r6
+	ldd	-128(%r30),    %r5
+	ldd	-136(%r30),    %r4
+	mtctl	%r3, %cr27
+	ldd,mb	-CALLEE_REG_FRAME_SIZE(%r30),    %r3
+	.endm
+
+#else /* ! CONFIG_64BIT */
+
+	.macro	callee_save
+	stw,ma	 %r3,	CALLEE_REG_FRAME_SIZE(%r30)
+	mfctl	 %cr27, %r3
+	stw	 %r4,	-124(%r30)
+	stw	 %r5,	-120(%r30)
+	stw	 %r6,	-116(%r30)
+	stw	 %r7,	-112(%r30)
+	stw	 %r8,	-108(%r30)
+	stw	 %r9,	-104(%r30)
+	stw	 %r10,	-100(%r30)
+	stw	 %r11,	 -96(%r30)
+	stw	 %r12,	 -92(%r30)
+	stw	 %r13,	 -88(%r30)
+	stw	 %r14,	 -84(%r30)
+	stw	 %r15,	 -80(%r30)
+	stw	 %r16,	 -76(%r30)
+	stw	 %r17,	 -72(%r30)
+	stw	 %r18,	 -68(%r30)
+	stw	  %r3,	 -64(%r30)
+	.endm
+
+	.macro	callee_rest
+	ldw	 -64(%r30),    %r3
+	ldw	 -68(%r30),   %r18
+	ldw	 -72(%r30),   %r17
+	ldw	 -76(%r30),   %r16
+	ldw	 -80(%r30),   %r15
+	ldw	 -84(%r30),   %r14
+	ldw	 -88(%r30),   %r13
+	ldw	 -92(%r30),   %r12
+	ldw	 -96(%r30),   %r11
+	ldw	-100(%r30),   %r10
+	ldw	-104(%r30),   %r9
+	ldw	-108(%r30),   %r8
+	ldw	-112(%r30),   %r7
+	ldw	-116(%r30),   %r6
+	ldw	-120(%r30),   %r5
+	ldw	-124(%r30),   %r4
+	mtctl	%r3, %cr27
+	ldw,mb	-CALLEE_REG_FRAME_SIZE(%r30),   %r3
+	.endm
+#endif /* ! CONFIG_64BIT */
+
+	.macro	save_specials	regs
+
+	SAVE_SP  (%sr0, PT_SR0 (\regs))
+	SAVE_SP  (%sr1, PT_SR1 (\regs))
+	SAVE_SP  (%sr2, PT_SR2 (\regs))
+	SAVE_SP  (%sr3, PT_SR3 (\regs))
+	SAVE_SP  (%sr4, PT_SR4 (\regs))
+	SAVE_SP  (%sr5, PT_SR5 (\regs))
+	SAVE_SP  (%sr6, PT_SR6 (\regs))
+
+	SAVE_CR  (%cr17, PT_IASQ0(\regs))
+	mtctl	 %r0,	%cr17
+	SAVE_CR  (%cr17, PT_IASQ1(\regs))
+
+	SAVE_CR  (%cr18, PT_IAOQ0(\regs))
+	mtctl	 %r0,	%cr18
+	SAVE_CR  (%cr18, PT_IAOQ1(\regs))
+
+#ifdef CONFIG_64BIT
+	/* cr11 (sar) is a funny one.  5 bits on PA1.1 and 6 bit on PA2.0
+	 * For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
+	 * reads 5 bits.  Use mfctl,w to read all six bits.  Otherwise
+	 * we lose the 6th bit on a save/restore over interrupt.
+	 */
+	mfctl,w  %cr11, %r1
+	STREG    %r1, PT_SAR (\regs)
+#else
+	SAVE_CR  (%cr11, PT_SAR  (\regs))
+#endif
+	SAVE_CR  (%cr19, PT_IIR  (\regs))
+
+	/*
+	 * Code immediately following this macro (in intr_save) relies
+	 * on r8 containing ipsw.
+	 */
+	mfctl    %cr22, %r8
+	STREG    %r8,   PT_PSW(\regs)
+	.endm
+
+	.macro	rest_specials	regs
+
+	REST_SP  (%sr0, PT_SR0 (\regs))
+	REST_SP  (%sr1, PT_SR1 (\regs))
+	REST_SP  (%sr2, PT_SR2 (\regs))
+	REST_SP  (%sr3, PT_SR3 (\regs))
+	REST_SP  (%sr4, PT_SR4 (\regs))
+	REST_SP  (%sr5, PT_SR5 (\regs))
+	REST_SP  (%sr6, PT_SR6 (\regs))
+	REST_SP  (%sr7, PT_SR7 (\regs))
+
+	REST_CR	(%cr17, PT_IASQ0(\regs))
+	REST_CR	(%cr17, PT_IASQ1(\regs))
+
+	REST_CR	(%cr18, PT_IAOQ0(\regs))
+	REST_CR	(%cr18, PT_IAOQ1(\regs))
+
+	REST_CR (%cr11, PT_SAR	(\regs))
+
+	REST_CR	(%cr22, PT_PSW	(\regs))
+	.endm
+
+
+	/* First step to create a "relied upon translation"
+	 * See PA 2.0 Arch. page F-4 and F-5.
+	 *
+	 * The ssm was originally necessary due to a "PCxT bug".
+	 * But someone decided it needed to be added to the architecture
+	 * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
+	 * It's been carried forward into PA 2.0 Arch as well. :^(
+	 *
+	 * "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
+	 * rsm/ssm prevents the ifetch unit from speculatively fetching
+	 * instructions past this line in the code stream.
+	 * PA 2.0 processor will single step all insn in the same QUAD (4 insn).
+	 */
+	.macro	pcxt_ssm_bug
+	rsm	PSW_SM_I,%r0
+	nop	/* 1 */
+	nop	/* 2 */
+	nop	/* 3 */
+	nop	/* 4 */
+	nop	/* 5 */
+	nop	/* 6 */
+	nop	/* 7 */
+	.endm
+
+	/*
+	 * ASM_EXCEPTIONTABLE_ENTRY
+	 *
+	 * Creates an exception table entry.
+	 * Do not convert to a assembler macro. This won't work.
+	 */
+#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr)	\
+	.section __ex_table,"aw"			!	\
+	.word (fault_addr - .), (except_addr - .)	!	\
+	.previous
+
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/atomic.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/atomic.h
new file mode 100644
index 0000000..614bcc7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/atomic.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#ifndef _ASM_PARISC_ATOMIC_H_
+#define _ASM_PARISC_ATOMIC_H_
+
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ * And probably incredibly slow on parisc.  OTOH, we don't
+ * have to write any serious assembly.   prumpf
+ */
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#include <asm/cache.h>		/* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+#  define ATOMIC_HASH_SIZE 4
+#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+
+/* Can't use raw_spin_lock_irq because of #include problems, so
+ * this is the substitute */
+#define _atomic_spin_lock_irqsave(l,f) do {	\
+	arch_spinlock_t *s = ATOMIC_HASH(l);		\
+	local_irq_save(f);			\
+	arch_spin_lock(s);			\
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do {	\
+	arch_spinlock_t *s = ATOMIC_HASH(l);			\
+	arch_spin_unlock(s);				\
+	local_irq_restore(f);				\
+} while(0)
+
+
+#else
+#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
+#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#endif
+
+/*
+ * Note that we need not lock read accesses - aligned word writes/reads
+ * are atomic, so a reader never sees inconsistent values.
+ */
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+	unsigned long flags;
+	_atomic_spin_lock_irqsave(v, flags);
+
+	v->counter = i;
+
+	_atomic_spin_unlock_irqrestore(v, flags);
+}
+
+#define atomic_set_release(v, i)	atomic_set((v), (i))
+
+static __inline__ int atomic_read(const atomic_t *v)
+{
+	return READ_ONCE((v)->counter);
+}
+
+/* exported interface */
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int c, old;
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		old = atomic_cmpxchg((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c;
+}
+
+#define ATOMIC_OP(op, c_op)						\
+static __inline__ void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long flags;						\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	v->counter c_op i;						\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+}									\
+
+#define ATOMIC_OP_RETURN(op, c_op)					\
+static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned long flags;						\
+	int ret;							\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	ret = (v->counter c_op i);					\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+									\
+	return ret;							\
+}
+
+#define ATOMIC_FETCH_OP(op, c_op)					\
+static __inline__ int atomic_fetch_##op(int i, atomic_t *v)		\
+{									\
+	unsigned long flags;						\
+	int ret;							\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	ret = v->counter;						\
+	v->counter c_op i;						\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+									\
+	return ret;							\
+}
+
+#define ATOMIC_OPS(op, c_op)						\
+	ATOMIC_OP(op, c_op)						\
+	ATOMIC_OP_RETURN(op, c_op)					\
+	ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op)						\
+	ATOMIC_OP(op, c_op)						\
+	ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_inc(v)	(atomic_add(   1,(v)))
+#define atomic_dec(v)	(atomic_add(  -1,(v)))
+
+#define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
+#define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
+
+#define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+#ifdef CONFIG_64BIT
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#define ATOMIC64_OP(op, c_op)						\
+static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
+{									\
+	unsigned long flags;						\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	v->counter c_op i;						\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+}									\
+
+#define ATOMIC64_OP_RETURN(op, c_op)					\
+static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
+{									\
+	unsigned long flags;						\
+	s64 ret;							\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	ret = (v->counter c_op i);					\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+									\
+	return ret;							\
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op)					\
+static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)		\
+{									\
+	unsigned long flags;						\
+	s64 ret;							\
+									\
+	_atomic_spin_lock_irqsave(v, flags);				\
+	ret = v->counter;						\
+	v->counter c_op i;						\
+	_atomic_spin_unlock_irqrestore(v, flags);			\
+									\
+	return ret;							\
+}
+
+#define ATOMIC64_OPS(op, c_op)						\
+	ATOMIC64_OP(op, c_op)						\
+	ATOMIC64_OP_RETURN(op, c_op)					\
+	ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op)						\
+	ATOMIC64_OP(op, c_op)						\
+	ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static __inline__ void
+atomic64_set(atomic64_t *v, s64 i)
+{
+	unsigned long flags;
+	_atomic_spin_lock_irqsave(v, flags);
+
+	v->counter = i;
+
+	_atomic_spin_unlock_irqrestore(v, flags);
+}
+
+#define atomic64_set_release(v, i)	atomic64_set((v), (i))
+
+static __inline__ s64
+atomic64_read(const atomic64_t *v)
+{
+	return ACCESS_ONCE((v)->counter);
+}
+
+#define atomic64_inc(v)		(atomic64_add(   1,(v)))
+#define atomic64_dec(v)		(atomic64_add(  -1,(v)))
+
+#define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
+#define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
+
+#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
+
+#define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
+
+/* exported interface */
+#define atomic64_cmpxchg(v, o, n) \
+	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+	long c, old;
+	c = atomic64_read(v);
+	for (;;) {
+		if (unlikely(c == (u)))
+			break;
+		old = atomic64_cmpxchg((v), c, c + (a));
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long c, old, dec;
+	c = atomic64_read(v);
+	for (;;) {
+		dec = c - 1;
+		if (unlikely(dec < 0))
+			break;
+		old = atomic64_cmpxchg((v), c, dec);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return dec;
+}
+
+#endif /* !CONFIG_64BIT */
+
+
+#endif /* _ASM_PARISC_ATOMIC_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/barrier.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/barrier.h
new file mode 100644
index 0000000..640d46e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()		do { synchronize_caches(); } while (0)
+#define rmb()		mb()
+#define wmb()		mb()
+#define dma_rmb()	mb()
+#define dma_wmb()	mb()
+#else
+#define mb()		barrier()
+#define rmb()		barrier()
+#define wmb()		barrier()
+#define dma_rmb()	barrier()
+#define dma_wmb()	barrier()
+#endif
+
+#define __smp_mb()	mb()
+#define __smp_rmb()	mb()
+#define __smp_wmb()	mb()
+
+#define __smp_store_release(p, v)					\
+do {									\
+	typeof(p) __p = (p);						\
+        union { typeof(*p) __val; char __c[1]; } __u =			\
+                { .__val = (__force typeof(*p)) (v) };			\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 1:								\
+		asm volatile("stb,ma %0,0(%1)"				\
+				: : "r"(*(__u8 *)__u.__c), "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 2:								\
+		asm volatile("sth,ma %0,0(%1)"				\
+				: : "r"(*(__u16 *)__u.__c), "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 4:								\
+		asm volatile("stw,ma %0,0(%1)"				\
+				: : "r"(*(__u32 *)__u.__c), "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 8:								\
+		if (IS_ENABLED(CONFIG_64BIT))				\
+			asm volatile("std,ma %0,0(%1)"			\
+				: : "r"(*(__u64 *)__u.__c), "r"(__p)	\
+				: "memory");				\
+		break;							\
+	}								\
+} while (0)
+
+#define __smp_load_acquire(p)						\
+({									\
+	union { typeof(*p) __val; char __c[1]; } __u;			\
+	typeof(p) __p = (p);						\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 1:								\
+		asm volatile("ldb,ma 0(%1),%0"				\
+				: "=r"(*(__u8 *)__u.__c) : "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 2:								\
+		asm volatile("ldh,ma 0(%1),%0"				\
+				: "=r"(*(__u16 *)__u.__c) : "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 4:								\
+		asm volatile("ldw,ma 0(%1),%0"				\
+				: "=r"(*(__u32 *)__u.__c) : "r"(__p)	\
+				: "memory");				\
+		break;							\
+	case 8:								\
+		if (IS_ENABLED(CONFIG_64BIT))				\
+			asm volatile("ldd,ma 0(%1),%0"			\
+				: "=r"(*(__u64 *)__u.__c) : "r"(__p)	\
+				: "memory");				\
+		break;							\
+	}								\
+	__u.__val;							\
+})
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/bitops.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/bitops.h
new file mode 100644
index 0000000..53252d4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/bitops.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_BITOPS_H
+#define _PARISC_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <asm/barrier.h>
+#include <linux/atomic.h>
+
+/*
+ * HP-PARISC specific bit operations
+ * for a detailed description of the functions please refer
+ * to include/asm-i386/bitops.h or kerneldoc
+ */
+
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
+#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
+
+
+/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
+ * on use of volatile and __*_bit() (set/clear/change):
+ *	*_bit() want use of volatile.
+ *	__*_bit() are "relaxed" and don't use spinlock or volatile.
+ */
+
+static __inline__ void set_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+	unsigned long flags;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	*addr |= mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
+	unsigned long flags;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	*addr &= mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+	unsigned long flags;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	*addr ^= mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+	unsigned long old;
+	unsigned long flags;
+	int set;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	old = *addr;
+	set = (old & mask) ? 1 : 0;
+	if (!set)
+		*addr = old | mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+
+	return set;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+	unsigned long old;
+	unsigned long flags;
+	int set;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	old = *addr;
+	set = (old & mask) ? 1 : 0;
+	if (set)
+		*addr = old & ~mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+
+	return set;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
+{
+	unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
+	unsigned long oldbit;
+	unsigned long flags;
+
+	addr += (nr >> SHIFT_PER_LONG);
+	_atomic_spin_lock_irqsave(addr, flags);
+	oldbit = *addr;
+	*addr = oldbit ^ mask;
+	_atomic_spin_unlock_irqrestore(addr, flags);
+
+	return (oldbit & mask) ? 1 : 0;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#ifdef __KERNEL__
+
+/**
+ * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
+ * @word: The word to search
+ *
+ * __ffs() return is undefined if no bit is set.
+ *
+ * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
+ * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
+ * (with help from willy/jejb to get the semantics right)
+ *
+ * This algorithm avoids branches by making use of nullification.
+ * One side effect of "extr" instructions is it sets PSW[N] bit.
+ * How PSW[N] (nullify next insn) gets set is determined by the 
+ * "condition" field (eg "<>" or "TR" below) in the extr* insn.
+ * Only the 1st and one of either the 2cd or 3rd insn will get executed.
+ * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
+ * cycles for each mispredicted branch.
+ */
+
+static __inline__ unsigned long __ffs(unsigned long x)
+{
+	unsigned long ret;
+
+	__asm__(
+#ifdef CONFIG_64BIT
+		" ldi       63,%1\n"
+		" extrd,u,*<>  %0,63,32,%%r0\n"
+		" extrd,u,*TR  %0,31,32,%0\n"	/* move top 32-bits down */
+		" addi    -32,%1,%1\n"
+#else
+		" ldi       31,%1\n"
+#endif
+		" extru,<>  %0,31,16,%%r0\n"
+		" extru,TR  %0,15,16,%0\n"	/* xxxx0000 -> 0000xxxx */
+		" addi    -16,%1,%1\n"
+		" extru,<>  %0,31,8,%%r0\n"
+		" extru,TR  %0,23,8,%0\n"	/* 0000xx00 -> 000000xx */
+		" addi    -8,%1,%1\n"
+		" extru,<>  %0,31,4,%%r0\n"
+		" extru,TR  %0,27,4,%0\n"	/* 000000x0 -> 0000000x */
+		" addi    -4,%1,%1\n"
+		" extru,<>  %0,31,2,%%r0\n"
+		" extru,TR  %0,29,2,%0\n"	/* 0000000y, 1100b -> 0011b */
+		" addi    -2,%1,%1\n"
+		" extru,=  %0,31,1,%%r0\n"	/* check last bit */
+		" addi    -1,%1,%1\n"
+			: "+r" (x), "=r" (ret) );
+	return ret;
+}
+
+#include <asm-generic/bitops/ffz.h>
+
+/*
+ * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
+ * This is defined the same way as the libc and compiler builtin
+ * ffs routines, therefore differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+	return x ? (__ffs((unsigned long)x) + 1) : 0;
+}
+
+/*
+ * fls: find last (most significant) bit set.
+ * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __inline__ int fls(int x)
+{
+	int ret;
+	if (!x)
+		return 0;
+
+	__asm__(
+	"	ldi		1,%1\n"
+	"	extru,<>	%0,15,16,%%r0\n"
+	"	zdep,TR		%0,15,16,%0\n"		/* xxxx0000 */
+	"	addi		16,%1,%1\n"
+	"	extru,<>	%0,7,8,%%r0\n"
+	"	zdep,TR		%0,23,24,%0\n"		/* xx000000 */
+	"	addi		8,%1,%1\n"
+	"	extru,<>	%0,3,4,%%r0\n"
+	"	zdep,TR		%0,27,28,%0\n"		/* x0000000 */
+	"	addi		4,%1,%1\n"
+	"	extru,<>	%0,1,2,%%r0\n"
+	"	zdep,TR		%0,29,30,%0\n"		/* y0000000 (y&3 = 0) */
+	"	addi		2,%1,%1\n"
+	"	extru,=		%0,0,1,%%r0\n"
+	"	addi		1,%1,%1\n"		/* if y & 8, add 1 */
+		: "+r" (x), "=r" (ret) );
+
+	return ret;
+}
+
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif	/* __KERNEL__ */
+
+#endif /* _PARISC_BITOPS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/bug.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/bug.h
new file mode 100644
index 0000000..4b6d60b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/bug.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_BUG_H
+#define _PARISC_BUG_H
+
+#include <linux/kernel.h>	/* for BUGFLAG_TAINT */
+
+/*
+ * Tell the user there is some problem.
+ * The offending file and line are encoded in the __bug_table section.
+ */
+
+#ifdef CONFIG_BUG
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
+
+/* the break instruction is used as BUG() marker.  */
+#define	PARISC_BUG_BREAK_ASM	"break 0x1f, 0x1fff"
+#define	PARISC_BUG_BREAK_INSN	0x03ffe01f  /* PARISC_BUG_BREAK_ASM */
+
+#if defined(CONFIG_64BIT)
+#define ASM_WORD_INSN		".dword\t"
+#else
+#define ASM_WORD_INSN		".word\t"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define BUG()								\
+	do {								\
+		asm volatile("\n"					\
+			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+			     "\t.pushsection __bug_table,\"aw\"\n"	\
+			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
+			     "\t.short %c1, %c2\n"			\
+			     "\t.org 2b+%c3\n"				\
+			     "\t.popsection"				\
+			     : : "i" (__FILE__), "i" (__LINE__),	\
+			     "i" (0), "i" (sizeof(struct bug_entry)) ); \
+		unreachable();						\
+	} while(0)
+
+#else
+#define BUG()								\
+	do {								\
+		asm volatile(PARISC_BUG_BREAK_ASM : : );		\
+		unreachable();						\
+	} while(0)
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __WARN_FLAGS(flags)						\
+	do {								\
+		asm volatile("\n"					\
+			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+			     "\t.pushsection __bug_table,\"aw\"\n"	\
+			     "2:\t" ASM_WORD_INSN "1b, %c0\n"		\
+			     "\t.short %c1, %c2\n"			\
+			     "\t.org 2b+%c3\n"				\
+			     "\t.popsection"				\
+			     : : "i" (__FILE__), "i" (__LINE__),	\
+			     "i" (BUGFLAG_WARNING|(flags)),		\
+			     "i" (sizeof(struct bug_entry)) );		\
+	} while(0)
+#else
+#define __WARN_FLAGS(flags)						\
+	do {								\
+		asm volatile("\n"					\
+			     "1:\t" PARISC_BUG_BREAK_ASM "\n"		\
+			     "\t.pushsection __bug_table,\"aw\"\n"	\
+			     "2:\t" ASM_WORD_INSN "1b\n"		\
+			     "\t.short %c0\n"				\
+			     "\t.org 2b+%c1\n"				\
+			     "\t.popsection"				\
+			     : : "i" (BUGFLAG_WARNING|(flags)),		\
+			     "i" (sizeof(struct bug_entry)) );		\
+	} while(0)
+#endif
+
+
+#define WARN_ON(x) ({						\
+	int __ret_warn_on = !!(x);				\
+	if (__builtin_constant_p(__ret_warn_on)) {		\
+		if (__ret_warn_on)				\
+			__WARN();				\
+	} else {						\
+		if (unlikely(__ret_warn_on))			\
+			__WARN();				\
+	}							\
+	unlikely(__ret_warn_on);				\
+})
+
+#endif
+
+#include <asm-generic/bug.h>
+#endif
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/bugs.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/bugs.h
new file mode 100644
index 0000000..0a7f9db
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/bugs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  include/asm-parisc/bugs.h
+ *
+ *  Copyright (C) 1999	Mike Shaver
+ */
+
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ *	void check_bugs(void);
+ */
+
+#include <asm/processor.h>
+
+static inline void check_bugs(void)
+{
+//	identify_cpu(&boot_cpu_data);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/cache.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/cache.h
new file mode 100644
index 0000000..150b7f3
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/cache.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/asm-parisc/cache.h
+ */
+
+#ifndef __ARCH_PARISC_CACHE_H
+#define __ARCH_PARISC_CACHE_H
+
+
+/*
+ * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
+ * have 32-byte cachelines.  The L1 length appears to be 16 bytes but this
+ * is not clearly documented.
+ */
+#define L1_CACHE_BYTES 16
+#define L1_CACHE_SHIFT 4
+
+#ifndef __ASSEMBLY__
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+/* Read-only memory is marked before mark_rodata_ro() is called. */
+#define __ro_after_init	__read_mostly
+
+void parisc_cache_init(void);	/* initializes cache-flushing */
+void disable_sr_hashing_asm(int); /* low level support for above */
+void disable_sr_hashing(void);   /* turns off space register hashing */
+void free_sid(unsigned long);
+unsigned long alloc_sid(void);
+
+struct seq_file;
+extern void show_cache_info(struct seq_file *m);
+
+extern int split_tlb;
+extern int dcache_stride;
+extern int icache_stride;
+extern struct pdc_cache_info cache_info;
+void parisc_setup_cache_timing(void);
+
+#define pdtlb(addr)         asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
+#define pitlb(addr)         asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
+#define pdtlb_kernel(addr)  asm volatile("pdtlb 0(%0)" : : "r" (addr));
+
+#endif /* ! __ASSEMBLY__ */
+
+/* Classes of processor wrt: disabling space register hashing */
+
+#define SRHASH_PCXST    0   /* pcxs, pcxt, pcxt_ */
+#define SRHASH_PCXL     1   /* pcxl */
+#define SRHASH_PA20     2   /* pcxu, pcxu_, pcxw, pcxw_ */
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/cacheflush.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/cacheflush.h
new file mode 100644
index 0000000..bd5ce31
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/cacheflush.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_CACHEFLUSH_H
+#define _PARISC_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+
+/* The usual comment is "Caches aren't brain-dead on the <architecture>".
+ * Unfortunately, that doesn't apply to PA-RISC. */
+
+/* Internal implementation */
+void flush_data_cache_local(void *);  /* flushes local data-cache only */
+void flush_instruction_cache_local(void *); /* flushes local code-cache only */
+#ifdef CONFIG_SMP
+void flush_data_cache(void); /* flushes data-cache only (all processors) */
+void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
+#else
+#define flush_data_cache() flush_data_cache_local(NULL)
+#define flush_instruction_cache() flush_instruction_cache_local(NULL)
+#endif
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+void flush_user_icache_range_asm(unsigned long, unsigned long);
+void flush_kernel_icache_range_asm(unsigned long, unsigned long);
+void flush_user_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_page_asm(void *);
+void flush_kernel_icache_page(void *);
+
+/* Cache flush operations */
+
+void flush_cache_all_local(void);
+void flush_cache_all(void);
+void flush_cache_mm(struct mm_struct *mm);
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+void flush_kernel_dcache_page_addr(void *addr);
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+	flush_kernel_dcache_page_addr(page_address(page));
+}
+
+#define flush_kernel_dcache_range(start,size) \
+	flush_kernel_dcache_range_asm((start), (start)+(size));
+
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
+
+#define flush_cache_vmap(start, end)		flush_cache_all()
+#define flush_cache_vunmap(start, end)		flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *page);
+
+#define flush_dcache_mmap_lock(mapping) \
+	spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+	spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_page(vma,page)	do { 		\
+	flush_kernel_dcache_page(page);			\
+	flush_kernel_icache_page(page_address(page)); 	\
+} while (0)
+
+#define flush_icache_range(s,e)		do { 		\
+	flush_kernel_dcache_range_asm(s,e); 		\
+	flush_kernel_icache_range_asm(s,e); 		\
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+	memcpy(dst, src, len); \
+	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+	memcpy(dst, src, len); \
+} while (0)
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
+void flush_cache_range(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end);
+
+/* defined in pacache.S exported in cache.c used by flush_anon_page */
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void
+flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+	if (PageAnon(page)) {
+		flush_tlb_page(vma, vmaddr);
+		preempt_disable();
+		flush_dcache_page_asm(page_to_phys(page), vmaddr);
+		preempt_enable();
+	}
+}
+
+#include <asm/kmap_types.h>
+
+#define ARCH_HAS_KMAP
+
+static inline void *kmap(struct page *page)
+{
+	might_sleep();
+	return page_address(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+	flush_kernel_dcache_page_addr(page_address(page));
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+	preempt_disable();
+	pagefault_disable();
+	return page_address(page);
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+	flush_kernel_dcache_page_addr(addr);
+	pagefault_enable();
+	preempt_enable();
+}
+
+#define kmap_atomic_prot(page, prot)	kmap_atomic(page)
+#define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
+
+#endif /* _PARISC_CACHEFLUSH_H */
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/checksum.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/checksum.h
new file mode 100644
index 0000000..3cbf1f1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/checksum.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_CHECKSUM_H
+#define _PARISC_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *, int, __wsum);
+
+/*
+ * The same as csum_partial, but copies from src while it checksums.
+ *
+ * Here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
+
+/*
+ * this is a new version of the above that records errors it finds in *errp,
+ * but continues and zeros the rest of the buffer.
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+		void *dst, int len, __wsum sum, int *errp);
+
+/*
+ *	Optimized for IP headers, which always checksum on 4 octet boundaries.
+ *
+ *	Written by Randolph Chung <tausq@debian.org>, and then mucked with by
+ *	LaMont Jones <lamont@debian.org>
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	unsigned int sum;
+
+	__asm__ __volatile__ (
+"	ldws,ma		4(%1), %0\n"
+"	addib,<=	-4, %2, 2f\n"
+"\n"
+"	ldws		4(%1), %%r20\n"
+"	ldws		8(%1), %%r21\n"
+"	add		%0, %%r20, %0\n"
+"	ldws,ma		12(%1), %%r19\n"
+"	addc		%0, %%r21, %0\n"
+"	addc		%0, %%r19, %0\n"
+"1:	ldws,ma		4(%1), %%r19\n"
+"	addib,<		0, %2, 1b\n"
+"	addc		%0, %%r19, %0\n"
+"\n"
+"	extru		%0, 31, 16, %%r20\n"
+"	extru		%0, 15, 16, %%r21\n"
+"	addc		%%r20, %%r21, %0\n"
+"	extru		%0, 15, 16, %%r21\n"
+"	add		%0, %%r21, %0\n"
+"	subi		-1, %0, %0\n"
+"2:\n"
+	: "=r" (sum), "=r" (iph), "=r" (ihl)
+	: "1" (iph), "2" (ihl)
+	: "r19", "r20", "r21", "memory");
+
+	return (__force __sum16)sum;
+}
+
+/*
+ *	Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+	u32 sum = (__force u32)csum;
+	/* add the swapped two 16-bit halves of sum,
+	   a possible carry from adding the two 16-bit halves,
+	   will carry from the lower half into the upper half,
+	   giving us the correct sum in the upper half. */
+	sum += (sum << 16) + (sum >> 16);
+	return (__force __sum16)(~sum >> 16);
+}
+ 
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+					__u32 len, __u8 proto,
+					__wsum sum)
+{
+	__asm__(
+	"	add  %1, %0, %0\n"
+	"	addc %2, %0, %0\n"
+	"	addc %3, %0, %0\n"
+	"	addc %%r0, %0, %0\n"
+		: "=r" (sum)
+		: "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
+	return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					__u32 len, __u8 proto,
+					__wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline __sum16 ip_compute_csum(const void *buf, int len)
+{
+	 return csum_fold (csum_partial(buf, len, 0));
+}
+
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, __u8 proto,
+					  __wsum sum)
+{
+	__asm__ __volatile__ (
+
+#if BITS_PER_LONG > 32
+
+	/*
+	** We can execute two loads and two adds per cycle on PA 8000.
+	** But add insn's get serialized waiting for the carry bit.
+	** Try to keep 4 registers with "live" values ahead of the ALU.
+	*/
+
+"	ldd,ma		8(%1), %%r19\n"	/* get 1st saddr word */
+"	ldd,ma		8(%2), %%r20\n"	/* get 1st daddr word */
+"	add		%8, %3, %3\n"/* add 16-bit proto + len */
+"	add		%%r19, %0, %0\n"
+"	ldd,ma		8(%1), %%r21\n"	/* 2cd saddr */
+"	ldd,ma		8(%2), %%r22\n"	/* 2cd daddr */
+"	add,dc		%%r20, %0, %0\n"
+"	add,dc		%%r21, %0, %0\n"
+"	add,dc		%%r22, %0, %0\n"
+"	add,dc		%3, %0, %0\n"  /* fold in proto+len | carry bit */
+"	extrd,u		%0, 31, 32, %%r19\n"	/* copy upper half down */
+"	depdi		0, 31, 32, %0\n"	/* clear upper half */
+"	add		%%r19, %0, %0\n"	/* fold into 32-bits */
+"	addc		0, %0, %0\n"		/* add carry */
+
+#else
+
+	/*
+	** For PA 1.x, the insn order doesn't matter as much.
+	** Insn stream is serialized on the carry bit here too.
+	** result from the previous operation (eg r0 + x)
+	*/
+
+"	ldw,ma		4(%1), %%r19\n"	/* get 1st saddr word */
+"	ldw,ma		4(%2), %%r20\n"	/* get 1st daddr word */
+"	add		%8, %3, %3\n"	/* add 16-bit proto + len */
+"	add		%%r19, %0, %0\n"
+"	ldw,ma		4(%1), %%r21\n"	/* 2cd saddr */
+"	addc		%%r20, %0, %0\n"
+"	ldw,ma		4(%2), %%r22\n"	/* 2cd daddr */
+"	addc		%%r21, %0, %0\n"
+"	ldw,ma		4(%1), %%r19\n"	/* 3rd saddr */
+"	addc		%%r22, %0, %0\n"
+"	ldw,ma		4(%2), %%r20\n"	/* 3rd daddr */
+"	addc		%%r19, %0, %0\n"
+"	ldw,ma		4(%1), %%r21\n"	/* 4th saddr */
+"	addc		%%r20, %0, %0\n"
+"	ldw,ma		4(%2), %%r22\n"	/* 4th daddr */
+"	addc		%%r21, %0, %0\n"
+"	addc		%%r22, %0, %0\n"
+"	addc		%3, %0, %0\n"	/* fold in proto+len, catch carry */
+
+#endif
+	: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
+	: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
+	: "r19", "r20", "r21", "r22", "memory");
+	return csum_fold(sum);
+}
+
+/* 
+ *	Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+						      void __user *dst,
+						      int len, __wsum sum,
+						      int *err_ptr)
+{
+	/* code stolen from include/asm-mips64 */
+	sum = csum_partial(src, len, sum);
+	 
+	if (copy_to_user(dst, src, len)) {
+		*err_ptr = -EFAULT;
+		return (__force __wsum)-1;
+	}
+
+	return sum;
+}
+
+#endif
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/cmpxchg.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..0689585
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/cmpxchg.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * forked from parisc asm/atomic.h which was:
+ *	Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ *	Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#ifndef _ASM_PARISC_CMPXCHG_H_
+#define _ASM_PARISC_CMPXCHG_H_
+
+/* This should get optimized out since it's never called.
+** Or get a link error if xchg is used "wrong".
+*/
+extern void __xchg_called_with_bad_pointer(void);
+
+/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __xchg8(char, char *);
+extern unsigned long __xchg32(int, int *);
+#ifdef CONFIG_64BIT
+extern unsigned long __xchg64(unsigned long, unsigned long *);
+#endif
+
+/* optimizer better get rid of switch since size is a constant */
+static inline unsigned long
+__xchg(unsigned long x, __volatile__ void *ptr, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8: return __xchg64(x, (unsigned long *) ptr);
+#endif
+	case 4: return __xchg32((int) x, (int *) ptr);
+	case 1: return __xchg8((char) x, (char *) ptr);
+	}
+	__xchg_called_with_bad_pointer();
+	return x;
+}
+
+/*
+** REVISIT - Abandoned use of LDCW in xchg() for now:
+** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
+** o and while we are at it, could CONFIG_64BIT code use LDCD too?
+**
+**	if (__builtin_constant_p(x) && (x == NULL))
+**		if (((unsigned long)p & 0xf) == 0)
+**			return __ldcw(p);
+*/
+#define xchg(ptr, x)							\
+({									\
+	__typeof__(*(ptr)) __ret;					\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	__ret = (__typeof__(*(ptr)))					\
+		__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr)));	\
+	__ret;								\
+})
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
+extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
+				   unsigned int new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
+#endif
+	case 4: return __cmpxchg_u32((unsigned int *)ptr,
+				     (unsigned int)old, (unsigned int)new_);
+	case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+	}
+	__cmpxchg_called_with_bad_pointer();
+	return old;
+}
+
+#define cmpxchg(ptr, o, n)						 \
+({									 \
+	__typeof__(*(ptr)) _o_ = (o);					 \
+	__typeof__(*(ptr)) _n_ = (n);					 \
+	(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,	 \
+				    (unsigned long)_n_, sizeof(*(ptr))); \
+})
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+				      unsigned long old,
+				      unsigned long new_, int size)
+{
+	switch (size) {
+#ifdef CONFIG_64BIT
+	case 8:	return __cmpxchg_u64((u64 *)ptr, old, new_);
+#endif
+	case 4:	return __cmpxchg_u32(ptr, old, new_);
+	default:
+		return __cmpxchg_local_generic(ptr, old, new_, size);
+	}
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
+			(unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_local((ptr), (o), (n));					\
+})
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+
+#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/compat.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/compat.h
new file mode 100644
index 0000000..acf8aa0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/compat.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_COMPAT_H
+#define _ASM_PARISC_COMPAT_H
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#define COMPAT_USER_HZ 		100
+#define COMPAT_UTS_MACHINE	"parisc\0\0"
+
+typedef u32	compat_size_t;
+typedef s32	compat_ssize_t;
+typedef s32	compat_time_t;
+typedef s32	compat_clock_t;
+typedef s32	compat_pid_t;
+typedef u32	__compat_uid_t;
+typedef u32	__compat_gid_t;
+typedef u32	__compat_uid32_t;
+typedef u32	__compat_gid32_t;
+typedef u16	compat_mode_t;
+typedef u32	compat_ino_t;
+typedef u32	compat_dev_t;
+typedef s32	compat_off_t;
+typedef s64	compat_loff_t;
+typedef u16	compat_nlink_t;
+typedef u16	compat_ipc_pid_t;
+typedef s32	compat_daddr_t;
+typedef u32	compat_caddr_t;
+typedef s32	compat_key_t;
+typedef s32	compat_timer_t;
+
+typedef s32	compat_int_t;
+typedef s32	compat_long_t;
+typedef s64	compat_s64;
+typedef u32	compat_uint_t;
+typedef u32	compat_ulong_t;
+typedef u64	compat_u64;
+typedef u32	compat_uptr_t;
+
+struct compat_timespec {
+	compat_time_t		tv_sec;
+	s32			tv_nsec;
+};
+
+struct compat_timeval {
+	compat_time_t		tv_sec;
+	s32			tv_usec;
+};
+
+struct compat_stat {
+	compat_dev_t		st_dev;	/* dev_t is 32 bits on parisc */
+	compat_ino_t		st_ino;	/* 32 bits */
+	compat_mode_t		st_mode;	/* 16 bits */
+	compat_nlink_t  	st_nlink;	/* 16 bits */
+	u16			st_reserved1;	/* old st_uid */
+	u16			st_reserved2;	/* old st_gid */
+	compat_dev_t		st_rdev;
+	compat_off_t		st_size;
+	compat_time_t		st_atime;
+	u32			st_atime_nsec;
+	compat_time_t		st_mtime;
+	u32			st_mtime_nsec;
+	compat_time_t		st_ctime;
+	u32			st_ctime_nsec;
+	s32			st_blksize;
+	s32			st_blocks;
+	u32			__unused1;	/* ACL stuff */
+	compat_dev_t		__unused2;	/* network */
+	compat_ino_t		__unused3;	/* network */
+	u32			__unused4;	/* cnodes */
+	u16			__unused5;	/* netsite */
+	short			st_fstype;
+	compat_dev_t		st_realdev;
+	u16			st_basemode;
+	u16			st_spareshort;
+	__compat_uid32_t	st_uid;
+	__compat_gid32_t	st_gid;
+	u32			st_spare4[3];
+};
+
+struct compat_flock {
+	short			l_type;
+	short			l_whence;
+	compat_off_t		l_start;
+	compat_off_t		l_len;
+	compat_pid_t		l_pid;
+};
+
+struct compat_flock64 {
+	short			l_type;
+	short			l_whence;
+	compat_loff_t		l_start;
+	compat_loff_t		l_len;
+	compat_pid_t		l_pid;
+};
+
+struct compat_statfs {
+	s32		f_type;
+	s32		f_bsize;
+	s32		f_blocks;
+	s32		f_bfree;
+	s32		f_bavail;
+	s32		f_files;
+	s32		f_ffree;
+	__kernel_fsid_t	f_fsid;
+	s32		f_namelen;
+	s32		f_frsize;
+	s32		f_flags;
+	s32		f_spare[4];
+};
+
+struct compat_sigcontext {
+	compat_int_t sc_flags;
+	compat_int_t sc_gr[32]; /* PSW in sc_gr[0] */
+	u64 sc_fr[32];
+	compat_int_t sc_iasq[2];
+	compat_int_t sc_iaoq[2];
+	compat_int_t sc_sar; /* cr11 */
+};
+
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32		compat_old_sigset_t;	/* at least 32 bits */
+
+#define _COMPAT_NSIG		64
+#define _COMPAT_NSIG_BPW	32
+
+typedef u32		compat_sigset_word;
+
+typedef union compat_sigval {
+	compat_int_t	sival_int;
+	compat_uptr_t	sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+	int si_signo;
+	int si_errno;
+	int si_code;
+
+	union {
+		int _pad[128/sizeof(int) - 3];
+
+		/* kill() */
+		struct {
+			unsigned int _pid;      /* sender's pid */
+			unsigned int _uid;      /* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			compat_timer_t _tid;            /* timer id */
+			int _overrun;           /* overrun count */
+			char _pad[sizeof(unsigned int) - sizeof(int)];
+			compat_sigval_t _sigval;        /* same as below */
+			int _sys_private;       /* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			unsigned int _pid;      /* sender's pid */
+			unsigned int _uid;      /* sender's uid */
+			compat_sigval_t _sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			unsigned int _pid;      /* which child */
+			unsigned int _uid;      /* sender's uid */
+			int _status;            /* exit code */
+			compat_clock_t _utime;
+			compat_clock_t _stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+		struct {
+			unsigned int _addr;     /* faulting insn/memory ref. */
+		} _sigfault;
+
+		/* SIGPOLL */
+		struct {
+			int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
+			int _fd;
+		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			compat_uptr_t _call_addr; /* calling user insn */
+			int _syscall;	/* triggering system call number */
+			compat_uint_t _arch;	/* AUDIT_ARCH_* of syscall */
+		} _sigsys;
+	} _sifields;
+} compat_siginfo_t;
+
+#define COMPAT_OFF_T_MAX	0x7fffffff
+
+struct compat_ipc64_perm {
+	compat_key_t key;
+	__compat_uid_t uid;
+	__compat_gid_t gid;
+	__compat_uid_t cuid;
+	__compat_gid_t cgid;
+	unsigned short int __pad1;
+	compat_mode_t mode;
+	unsigned short int __pad2;
+	unsigned short int seq;
+	unsigned int __pad3;
+	unsigned long __unused1;	/* yes they really are 64bit pads */
+	unsigned long __unused2;
+};
+
+struct compat_semid64_ds {
+	struct compat_ipc64_perm sem_perm;
+	unsigned int __unused1;
+	compat_time_t sem_otime;
+	unsigned int __unused2;
+	compat_time_t sem_ctime;
+	compat_ulong_t sem_nsems;
+	compat_ulong_t __unused3;
+	compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+	struct compat_ipc64_perm msg_perm;
+	unsigned int __unused1;
+	compat_time_t msg_stime;
+	unsigned int __unused2;
+	compat_time_t msg_rtime;
+	unsigned int __unused3;
+	compat_time_t msg_ctime;
+	compat_ulong_t msg_cbytes;
+	compat_ulong_t msg_qnum;
+	compat_ulong_t msg_qbytes;
+	compat_pid_t msg_lspid;
+	compat_pid_t msg_lrpid;
+	compat_ulong_t __unused4;
+	compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+	struct compat_ipc64_perm shm_perm;
+	unsigned int __unused1;
+	compat_time_t shm_atime;
+	unsigned int __unused2;
+	compat_time_t shm_dtime;
+	unsigned int __unused3;
+	compat_time_t shm_ctime;
+	unsigned int __unused4;
+	compat_size_t shm_segsz;
+	compat_pid_t shm_cpid;
+	compat_pid_t shm_lpid;
+	compat_ulong_t shm_nattch;
+	compat_ulong_t __unused5;
+	compat_ulong_t __unused6;
+};
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+	return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+	return (u32)(unsigned long)uptr;
+}
+
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
+{
+	struct pt_regs *regs = &current->thread.regs;
+	return (void __user *)regs->gr[30];
+}
+
+static inline int __is_compat_task(struct task_struct *t)
+{
+	return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
+}
+
+static inline int is_compat_task(void)
+{
+	return __is_compat_task(current);
+}
+
+#endif /* _ASM_PARISC_COMPAT_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/compat_ucontext.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/compat_ucontext.h
new file mode 100644
index 0000000..c606f1b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/compat_ucontext.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_COMPAT_UCONTEXT_H
+#define _ASM_PARISC_COMPAT_UCONTEXT_H
+
+#include <linux/compat.h>
+
+/* 32-bit ucontext as seen from an 64-bit kernel */
+struct compat_ucontext {
+	compat_uint_t uc_flags;
+	compat_uptr_t uc_link;
+	compat_stack_t uc_stack;	/* struct compat_sigaltstack (12 bytes)*/	
+	/* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
+	compat_uint_t pad[1];
+	struct compat_sigcontext uc_mcontext;
+	compat_sigset_t uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* !_ASM_PARISC_COMPAT_UCONTEXT_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/delay.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/delay.h
new file mode 100644
index 0000000..841b506
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/delay.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_DELAY_H
+#define _ASM_PARISC_DELAY_H
+
+static __inline__ void __delay(unsigned long loops) {
+	asm volatile(
+	"	.balignl	64,0x34000034\n"
+	"	addib,UV -1,%0,.\n"
+	"	nop\n"
+		: "=r" (loops) : "0" (loops));
+}
+
+extern void __udelay(unsigned long usecs);
+extern void __udelay_bad(unsigned long usecs);
+
+static inline void udelay(unsigned long usecs)
+{
+	if (__builtin_constant_p(usecs) && (usecs) > 20000)
+		__udelay_bad(usecs);
+	__udelay(usecs);
+}
+
+#endif /* _ASM_PARISC_DELAY_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/dma-mapping.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/dma-mapping.h
new file mode 100644
index 0000000..7af4a00
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/dma-mapping.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_DMA_MAPPING_H
+#define _PARISC_DMA_MAPPING_H
+
+#include <asm/cacheflush.h>
+
+/*
+** We need to support 4 different coherent dma models with one binary:
+**
+**     I/O MMU        consistent method           dma_sync behavior
+**  =============   ======================       =======================
+**  a) PA-7x00LC    uncachable host memory          flush/purge
+**  b) U2/Uturn      cachable host memory              NOP
+**  c) Ike/Astro     cachable host memory              NOP
+**  d) EPIC/SAGA     memory on EPIC/SAGA         flush/reset DMA channel
+**
+** PA-7[13]00LC processors have a GSC bus interface and no I/O MMU.
+**
+** Systems (eg PCX-T workstations) that don't fall into the above
+** categories will need to modify the needed drivers to perform
+** flush/purge and allocate "regular" cacheable pages for everything.
+*/
+
+#ifdef CONFIG_PA11
+extern const struct dma_map_ops pcxl_dma_ops;
+extern const struct dma_map_ops pcx_dma_ops;
+#endif
+
+extern const struct dma_map_ops *hppa_dma_ops;
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+	return hppa_dma_ops;
+}
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	       enum dma_data_direction direction)
+{
+	if (hppa_dma_ops->sync_single_for_cpu)
+		flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
+static inline void *
+parisc_walk_tree(struct device *dev)
+{
+	struct device *otherdev;
+	if(likely(dev->platform_data != NULL))
+		return dev->platform_data;
+	/* OK, just traverse the bus to find it */
+	for(otherdev = dev->parent; otherdev;
+	    otherdev = otherdev->parent) {
+		if(otherdev->platform_data) {
+			dev->platform_data = otherdev->platform_data;
+			break;
+		}
+	}
+	return dev->platform_data;
+}
+
+#define GET_IOC(dev) ({					\
+	void *__pdata = parisc_walk_tree(dev);		\
+	__pdata ? HBA_DATA(__pdata)->iommu : NULL;	\
+})
+
+#ifdef CONFIG_IOMMU_CCIO
+struct parisc_device;
+struct ioc;
+void * ccio_get_iommu(const struct parisc_device *dev);
+int ccio_request_resource(const struct parisc_device *dev,
+		struct resource *res);
+int ccio_allocate_resource(const struct parisc_device *dev,
+		struct resource *res, unsigned long size,
+		unsigned long min, unsigned long max, unsigned long align);
+#else /* !CONFIG_IOMMU_CCIO */
+#define ccio_get_iommu(dev) NULL
+#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
+#define ccio_allocate_resource(dev, res, size, min, max, align) \
+		allocate_resource(&iomem_resource, res, size, min, max, \
+				align, NULL, NULL)
+#endif /* !CONFIG_IOMMU_CCIO */
+
+#ifdef CONFIG_IOMMU_SBA
+struct parisc_device;
+void * sba_get_iommu(struct parisc_device *dev);
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/dma.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/dma.h
new file mode 100644
index 0000000..eea80ed
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/dma.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * (c) Copyright 2000, Grant Grundler
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h>		/* need byte IO */
+
+#define dma_outb	outb
+#define dma_inb		inb
+
+/*
+** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
+** (or rather not merge) DMAs into manageable chunks.
+** On parisc, this is more of the software/tuning constraint
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
+*/
+#define DMA_CHUNK_SIZE	(BITS_PER_LONG*PAGE_SIZE)
+
+/* The maximum address that we can perform a DMA transfer to on this platform
+** New dynamic DMA interfaces should obsolete this....
+*/
+#define MAX_DMA_ADDRESS (~0UL)
+
+/*
+** We don't have DMA channels... well V-class does but the
+** Dynamic DMA Mapping interface will support them... right? :^)
+** Note: this is not relevant right now for PA-RISC, but we cannot 
+** leave this as undefined because some things (e.g. sound)
+** won't compile :-(
+*/
+#define MAX_DMA_CHANNELS 8
+#define DMA_MODE_READ	0x44	/* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE	0x48	/* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0	/* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT	0x10
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG		0x08	/* command register (w) */
+#define DMA1_STAT_REG		0x08	/* status register (r) */
+#define DMA1_REQ_REG            0x09    /* request register (w) */
+#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
+#define DMA1_MODE_REG		0x0B	/* mode register (w) */
+#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
+#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
+#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
+#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
+#define DMA1_EXT_MODE_REG	(0x400 | DMA1_MODE_REG)
+
+#define DMA2_CMD_REG		0xD0	/* command register (w) */
+#define DMA2_STAT_REG		0xD0	/* status register (r) */
+#define DMA2_REQ_REG            0xD2    /* request register (w) */
+#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
+#define DMA2_MODE_REG		0xD6	/* mode register (w) */
+#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
+#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
+#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
+#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
+#define DMA2_EXT_MODE_REG	(0x400 | DMA2_MODE_REG)
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+	return 0;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+	unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+					 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+	/* using short to get 16-bit wrap around */
+	unsigned short count;
+
+	count = 1 + dma_inb(io_port);
+	count += dma_inb(io_port) << 8;
+	
+	return (dmanr<=3)? count : (count<<1);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+#ifdef CONFIG_SUPERIO
+	if (dmanr<=3)
+		dma_outb(dmanr,  DMA1_MASK_REG);
+	else
+		dma_outb(dmanr & 3,  DMA2_MASK_REG);
+#endif
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+#ifdef CONFIG_SUPERIO
+	if (dmanr<=3)
+		dma_outb(dmanr | 4,  DMA1_MASK_REG);
+	else
+		dma_outb((dmanr & 3) | 4,  DMA2_MASK_REG);
+#endif
+}
+
+/* reserve a DMA channel */
+#define request_dma(dmanr, device_id)	(0)
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+}
+
+
+#define free_dma(dmanr)
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy 	(0)
+#endif
+
+#endif /* _ASM_DMA_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/dwarf.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/dwarf.h
new file mode 100644
index 0000000..8fe7d6b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/dwarf.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Helge Deller <deller@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PARISC_DWARF_H
+#define _ASM_PARISC_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#define CFI_STARTPROC	.cfi_startproc
+#define CFI_ENDPROC	.cfi_endproc
+#define CFI_DEF_CFA	.cfi_def_cfa
+#define CFI_REGISTER	.cfi_register
+#define CFI_REL_OFFSET	.cfi_rel_offset
+#define CFI_UNDEFINED	.cfi_undefined
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* _ASM_PARISC_DWARF_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_bus.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_bus.h
new file mode 100644
index 0000000..201085f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_bus.h
@@ -0,0 +1,23 @@
+/*
+ * eisa_bus.h interface between the eisa BA driver and the bus enumerator
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
+ *
+ */
+
+#ifndef ASM_EISA_H
+#define ASM_EISA_H
+
+extern void eisa_make_irq_level(int num);
+extern void eisa_make_irq_edge(int num);
+extern int eisa_enumerator(unsigned long eeprom_addr,
+			   struct resource *io_parent, 
+			   struct resource *mem_parent);
+extern int eisa_eeprom_init(unsigned long addr);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_eeprom.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_eeprom.h
new file mode 100644
index 0000000..5637ac9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/eisa_eeprom.h
@@ -0,0 +1,153 @@
+/*
+ * eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com>
+ *
+ */
+
+#ifndef ASM_EISA_EEPROM_H
+#define ASM_EISA_EEPROM_H
+
+extern void __iomem *eisa_eeprom_addr;
+
+#define HPEE_MAX_LENGTH       0x2000	/* maximum eeprom length */
+
+#define HPEE_SLOT_INFO(slot) (20+(48*slot))
+
+struct eeprom_header 
+{
+   
+	u_int32_t num_writes;       /* number of writes */
+ 	u_int8_t  flags;            /* flags, usage? */
+	u_int8_t  ver_maj;
+	u_int8_t  ver_min;
+	u_int8_t  num_slots;        /* number of EISA slots in system */
+	u_int16_t csum;             /* checksum, I don't know how to calculate this */
+	u_int8_t  pad[10];
+} __attribute__ ((packed));
+
+
+struct eeprom_eisa_slot_info
+{
+	u_int32_t eisa_slot_id;
+	u_int32_t config_data_offset;
+	u_int32_t num_writes;
+	u_int16_t csum;
+	u_int16_t num_functions;
+	u_int16_t config_data_length;
+	
+	/* bits 0..3 are the duplicate slot id */ 
+#define HPEE_SLOT_INFO_EMBEDDED  0x10
+#define HPEE_SLOT_INFO_VIRTUAL   0x20
+#define HPEE_SLOT_INFO_NO_READID 0x40
+#define HPEE_SLOT_INFO_DUPLICATE 0x80
+	u_int8_t slot_info;
+	
+#define HPEE_SLOT_FEATURES_ENABLE         0x01
+#define HPEE_SLOT_FEATURES_IOCHK          0x02
+#define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80
+	u_int8_t slot_features;
+	
+	u_int8_t  ver_min;
+	u_int8_t  ver_maj;
+	
+#define HPEE_FUNCTION_INFO_HAVE_TYPE      0x01
+#define HPEE_FUNCTION_INFO_HAVE_MEMORY    0x02
+#define HPEE_FUNCTION_INFO_HAVE_IRQ       0x04
+#define HPEE_FUNCTION_INFO_HAVE_DMA       0x08
+#define HPEE_FUNCTION_INFO_HAVE_PORT      0x10
+#define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20
+/* I think there are two slighty different 
+ * versions of the function_info field 
+ * one int the fixed header and one optional 
+ * in the parsed slot data area */
+#define HPEE_FUNCTION_INFO_HAVE_FUNCTION  0x01
+#define HPEE_FUNCTION_INFO_F_DISABLED     0x80
+#define HPEE_FUNCTION_INFO_CFG_FREE_FORM  0x40
+	u_int8_t  function_info;
+
+#define HPEE_FLAG_BOARD_IS_ISA		  0x01 /* flag and minor version for isa board */
+	u_int8_t  flags;
+	u_int8_t  pad[24];
+} __attribute__ ((packed));
+
+
+#define HPEE_MEMORY_MAX_ENT   9
+/* memory descriptor: byte 0 */
+#define HPEE_MEMORY_WRITABLE  0x01
+#define HPEE_MEMORY_CACHABLE  0x02
+#define HPEE_MEMORY_TYPE_MASK 0x18
+#define HPEE_MEMORY_TYPE_SYS  0x00
+#define HPEE_MEMORY_TYPE_EXP  0x08
+#define HPEE_MEMORY_TYPE_VIR  0x10
+#define HPEE_MEMORY_TYPE_OTH  0x18
+#define HPEE_MEMORY_SHARED    0x20
+#define HPEE_MEMORY_MORE      0x80
+
+/* memory descriptor: byte 1 */
+#define HPEE_MEMORY_WIDTH_MASK 0x03
+#define HPEE_MEMORY_WIDTH_BYTE 0x00
+#define HPEE_MEMORY_WIDTH_WORD 0x01
+#define HPEE_MEMORY_WIDTH_DWORD 0x02
+#define HPEE_MEMORY_DECODE_MASK 0x0c
+#define HPEE_MEMORY_DECODE_20BITS 0x00
+#define HPEE_MEMORY_DECODE_24BITS 0x04
+#define HPEE_MEMORY_DECODE_32BITS 0x08
+/* byte 2 and 3 are a 16bit LE value
+ * containing the memory size in kilobytes */
+/* byte 4,5,6 are a 24bit LE value
+ * containing the memory base address */
+
+
+#define HPEE_IRQ_MAX_ENT      7
+/* Interrupt entry: byte 0 */
+#define HPEE_IRQ_CHANNEL_MASK 0xf
+#define HPEE_IRQ_TRIG_LEVEL   0x20
+#define HPEE_IRQ_MORE         0x80
+/* byte 1 seems to be unused */
+
+#define HPEE_DMA_MAX_ENT     4
+
+/* dma entry: byte 0 */
+#define HPEE_DMA_CHANNEL_MASK 7
+#define HPEE_DMA_SIZE_MASK	0xc
+#define HPEE_DMA_SIZE_BYTE	0x0
+#define HPEE_DMA_SIZE_WORD	0x4
+#define HPEE_DMA_SIZE_DWORD	0x8
+#define HPEE_DMA_SHARED      0x40
+#define HPEE_DMA_MORE        0x80
+
+/* dma entry: byte 1 */
+#define HPEE_DMA_TIMING_MASK 0x30
+#define HPEE_DMA_TIMING_ISA	0x0
+#define HPEE_DMA_TIMING_TYPEA 0x10
+#define HPEE_DMA_TIMING_TYPEB 0x20
+#define HPEE_DMA_TIMING_TYPEC 0x30
+
+#define HPEE_PORT_MAX_ENT 20
+/* port entry byte 0 */
+#define HPEE_PORT_SIZE_MASK 0x1f
+#define HPEE_PORT_SHARED    0x40
+#define HPEE_PORT_MORE      0x80
+/* byte 1 and 2 is a 16bit LE value
+ * containing the start port number */
+
+#define HPEE_PORT_INIT_MAX_LEN     60 /* in bytes here */
+/* port init entry byte 0 */
+#define HPEE_PORT_INIT_WIDTH_MASK  0x3
+#define HPEE_PORT_INIT_WIDTH_BYTE  0x0
+#define HPEE_PORT_INIT_WIDTH_WORD  0x1
+#define HPEE_PORT_INIT_WIDTH_DWORD 0x2
+#define HPEE_PORT_INIT_MASK        0x4
+#define HPEE_PORT_INIT_MORE        0x80
+
+#define HPEE_SELECTION_MAX_ENT 26
+
+#define HPEE_TYPE_MAX_LEN    80
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/elf.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/elf.h
new file mode 100644
index 0000000..382d75a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/elf.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASMPARISC_ELF_H
+#define __ASMPARISC_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+
+#define EM_PARISC 15
+
+/* HPPA specific definitions.  */
+
+/* Legal values for e_flags field of Elf32_Ehdr.  */
+
+#define EF_PARISC_TRAPNIL	0x00010000 /* Trap nil pointer dereference.  */
+#define EF_PARISC_EXT		0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB		0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE		0x00080000 /* Program expects wide mode.  */
+#define EF_PARISC_NO_KABP	0x00100000 /* No kernel assisted branch
+					      prediction.  */
+#define EF_PARISC_LAZYSWAP	0x00400000 /* Allow lazy swapping.  */
+#define EF_PARISC_ARCH		0x0000ffff /* Architecture version.  */
+
+/* Defined values for `e_flags & EF_PARISC_ARCH' are:  */
+
+#define EFA_PARISC_1_0		    0x020b /* PA-RISC 1.0 big-endian.  */
+#define EFA_PARISC_1_1		    0x0210 /* PA-RISC 1.1 big-endian.  */
+#define EFA_PARISC_2_0		    0x0214 /* PA-RISC 2.0 big-endian.  */
+
+/* Additional section indices.  */
+
+#define SHN_PARISC_ANSI_COMMON	0xff00	   /* Section for tenatively declared
+					      symbols in ANSI C.  */
+#define SHN_PARISC_HUGE_COMMON	0xff01	   /* Common blocks in huge model.  */
+
+/* Legal values for sh_type field of Elf32_Shdr.  */
+
+#define SHT_PARISC_EXT		0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND	0x70000001 /* Unwind information.  */
+#define SHT_PARISC_DOC		0x70000002 /* Debug info for optimized code. */
+
+/* Legal values for sh_flags field of Elf32_Shdr.  */
+
+#define SHF_PARISC_SHORT	0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE		0x40000000 /* Section far from gp.  */
+#define SHF_PARISC_SBP		0x80000000 /* Static branch prediction code. */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type).  */
+
+#define STT_PARISC_MILLICODE	13	/* Millicode function entry point.  */
+
+#define STT_HP_OPAQUE		(STT_LOOS + 0x1)
+#define STT_HP_STUB		(STT_LOOS + 0x2)
+
+/* HPPA relocs.  */
+
+#define R_PARISC_NONE		0	/* No reloc.  */
+#define R_PARISC_DIR32		1	/* Direct 32-bit reference.  */
+#define R_PARISC_DIR21L		2	/* Left 21 bits of eff. address.  */
+#define R_PARISC_DIR17R		3	/* Right 17 bits of eff. address.  */
+#define R_PARISC_DIR17F		4	/* 17 bits of eff. address.  */
+#define R_PARISC_DIR14R		6	/* Right 14 bits of eff. address.  */
+#define R_PARISC_PCREL32	9	/* 32-bit rel. address.  */
+#define R_PARISC_PCREL21L	10	/* Left 21 bits of rel. address.  */
+#define R_PARISC_PCREL17R	11	/* Right 17 bits of rel. address.  */
+#define R_PARISC_PCREL17F	12	/* 17 bits of rel. address.  */
+#define R_PARISC_PCREL14R	14	/* Right 14 bits of rel. address.  */
+#define R_PARISC_DPREL21L	18	/* Left 21 bits of rel. address.  */
+#define R_PARISC_DPREL14R	22	/* Right 14 bits of rel. address.  */
+#define R_PARISC_GPREL21L	26	/* GP-relative, left 21 bits.  */
+#define R_PARISC_GPREL14R	30	/* GP-relative, right 14 bits.  */
+#define R_PARISC_LTOFF21L	34	/* LT-relative, left 21 bits.  */
+#define R_PARISC_LTOFF14R	38	/* LT-relative, right 14 bits.  */
+#define R_PARISC_SECREL32	41	/* 32 bits section rel. address.  */
+#define R_PARISC_SEGBASE	48	/* No relocation, set segment base.  */
+#define R_PARISC_SEGREL32	49	/* 32 bits segment rel. address.  */
+#define R_PARISC_PLTOFF21L	50	/* PLT rel. address, left 21 bits.  */
+#define R_PARISC_PLTOFF14R	54	/* PLT rel. address, right 14 bits.  */
+#define R_PARISC_LTOFF_FPTR32	57	/* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L	58	/* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R	62	/* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64		64	/* 64 bits function address.  */
+#define R_PARISC_PLABEL32	65	/* 32 bits function address.  */
+#define R_PARISC_PCREL64	72	/* 64 bits PC-rel. address.  */
+#define R_PARISC_PCREL22F	74	/* 22 bits PC-rel. address.  */
+#define R_PARISC_PCREL14WR	75	/* PC-rel. address, right 14 bits.  */
+#define R_PARISC_PCREL14DR	76	/* PC rel. address, right 14 bits.  */
+#define R_PARISC_PCREL16F	77	/* 16 bits PC-rel. address.  */
+#define R_PARISC_PCREL16WF	78	/* 16 bits PC-rel. address.  */
+#define R_PARISC_PCREL16DF	79	/* 16 bits PC-rel. address.  */
+#define R_PARISC_DIR64		80	/* 64 bits of eff. address.  */
+#define R_PARISC_DIR14WR	83	/* 14 bits of eff. address.  */
+#define R_PARISC_DIR14DR	84	/* 14 bits of eff. address.  */
+#define R_PARISC_DIR16F		85	/* 16 bits of eff. address.  */
+#define R_PARISC_DIR16WF	86	/* 16 bits of eff. address.  */
+#define R_PARISC_DIR16DF	87	/* 16 bits of eff. address.  */
+#define R_PARISC_GPREL64	88	/* 64 bits of GP-rel. address.  */
+#define R_PARISC_GPREL14WR	91	/* GP-rel. address, right 14 bits.  */
+#define R_PARISC_GPREL14DR	92	/* GP-rel. address, right 14 bits.  */
+#define R_PARISC_GPREL16F	93	/* 16 bits GP-rel. address.  */
+#define R_PARISC_GPREL16WF	94	/* 16 bits GP-rel. address.  */
+#define R_PARISC_GPREL16DF	95	/* 16 bits GP-rel. address.  */
+#define R_PARISC_LTOFF64	96	/* 64 bits LT-rel. address.  */
+#define R_PARISC_LTOFF14WR	99	/* LT-rel. address, right 14 bits.  */
+#define R_PARISC_LTOFF14DR	100	/* LT-rel. address, right 14 bits.  */
+#define R_PARISC_LTOFF16F	101	/* 16 bits LT-rel. address.  */
+#define R_PARISC_LTOFF16WF	102	/* 16 bits LT-rel. address.  */
+#define R_PARISC_LTOFF16DF	103	/* 16 bits LT-rel. address.  */
+#define R_PARISC_SECREL64	104	/* 64 bits section rel. address.  */
+#define R_PARISC_SEGREL64	112	/* 64 bits segment rel. address.  */
+#define R_PARISC_PLTOFF14WR	115	/* PLT-rel. address, right 14 bits.  */
+#define R_PARISC_PLTOFF14DR	116	/* PLT-rel. address, right 14 bits.  */
+#define R_PARISC_PLTOFF16F	117	/* 16 bits LT-rel. address.  */
+#define R_PARISC_PLTOFF16WF	118	/* 16 bits PLT-rel. address.  */
+#define R_PARISC_PLTOFF16DF	119	/* 16 bits PLT-rel. address.  */
+#define R_PARISC_LTOFF_FPTR64	120	/* 64 bits LT-rel. function ptr.  */
+#define R_PARISC_LTOFF_FPTR14WR	123	/* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR	124	/* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F	125	/* 16 bits LT-rel. function ptr.  */
+#define R_PARISC_LTOFF_FPTR16WF	126	/* 16 bits LT-rel. function ptr.  */
+#define R_PARISC_LTOFF_FPTR16DF	127	/* 16 bits LT-rel. function ptr.  */
+#define R_PARISC_LORESERVE	128
+#define R_PARISC_COPY		128	/* Copy relocation.  */
+#define R_PARISC_IPLT		129	/* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT		130	/* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32	153	/* 32 bits TP-rel. address.  */
+#define R_PARISC_TPREL21L	154	/* TP-rel. address, left 21 bits.  */
+#define R_PARISC_TPREL14R	158	/* TP-rel. address, right 14 bits.  */
+#define R_PARISC_LTOFF_TP21L	162	/* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R	166	/* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F	167	/* 14 bits LT-TP-rel. address.  */
+#define R_PARISC_TPREL64	216	/* 64 bits TP-rel. address.  */
+#define R_PARISC_TPREL14WR	219	/* TP-rel. address, right 14 bits.  */
+#define R_PARISC_TPREL14DR	220	/* TP-rel. address, right 14 bits.  */
+#define R_PARISC_TPREL16F	221	/* 16 bits TP-rel. address.  */
+#define R_PARISC_TPREL16WF	222	/* 16 bits TP-rel. address.  */
+#define R_PARISC_TPREL16DF	223	/* 16 bits TP-rel. address.  */
+#define R_PARISC_LTOFF_TP64	224	/* 64 bits LT-TP-rel. address.  */
+#define R_PARISC_LTOFF_TP14WR	227	/* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR	228	/* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F	229	/* 16 bits LT-TP-rel. address.  */
+#define R_PARISC_LTOFF_TP16WF	230	/* 16 bits LT-TP-rel. address.  */
+#define R_PARISC_LTOFF_TP16DF	231	/* 16 bits LT-TP-rel. address.  */
+#define R_PARISC_HIRESERVE	255
+
+#define PA_PLABEL_FDESC		0x02	/* bit set if PLABEL points to
+					 * a function descriptor, not
+					 * an address */
+
+/* The following are PA function descriptors 
+ *
+ * addr:	the absolute address of the function
+ * gp:		either the data pointer (r27) for non-PIC code or the
+ *		the PLT pointer (r19) for PIC code */
+
+/* Format for the Elf32 Function descriptor */
+typedef struct elf32_fdesc {
+	__u32	addr;
+	__u32	gp;
+} Elf32_Fdesc;
+
+/* Format for the Elf64 Function descriptor */
+typedef struct elf64_fdesc {
+	__u64	dummy[2]; /* FIXME: nothing uses these, why waste
+			   * the space */
+	__u64	addr;
+	__u64	gp;
+} Elf64_Fdesc;
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_64BIT
+#define Elf_Fdesc	Elf64_Fdesc
+#else
+#define Elf_Fdesc	Elf32_Fdesc
+#endif /*CONFIG_64BIT*/
+
+#endif /*__KERNEL__*/
+
+/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr.  */
+
+#define PT_HP_TLS		(PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE		(PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION	(PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL	(PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM		(PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC		(PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE	(PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK	(PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM		(PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF		(PT_LOOS + 0x9)
+#define PT_HP_PARALLEL		(PT_LOOS + 0x10)
+#define PT_HP_FASTBIND		(PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT		(PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT		(PT_LOOS + 0x13)
+#define PT_HP_STACK		(PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT	0x70000000
+#define PT_PARISC_UNWIND	0x70000001
+
+/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr.  */
+
+#define PF_PARISC_SBP		0x08000000
+
+#define PF_HP_PAGE_SIZE		0x00100000
+#define PF_HP_FAR_SHARED	0x00200000
+#define PF_HP_NEAR_SHARED	0x00400000
+#define PF_HP_CODE		0x01000000
+#define PF_HP_MODIFY		0x02000000
+#define PF_HP_LAZYSWAP		0x04000000
+#define PF_HP_SBP		0x08000000
+
+/*
+ * The following definitions are those for 32-bit ELF binaries on a 32-bit
+ * kernel and for 64-bit binaries on a 64-bit kernel.  To run 32-bit binaries
+ * on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these
+ * macros appropriately and then #includes binfmt_elf.c, which then includes
+ * this file.
+ */
+#ifndef ELF_CLASS
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ *
+ * Note that this header file is used by default in fs/binfmt_elf.c. So
+ * the following macros are for the default case. However, for the 64
+ * bit kernel we also support 32 bit parisc binaries. To do that
+ * arch/parisc/kernel/binfmt_elf32.c defines its own set of these
+ * macros, and then it includes fs/binfmt_elf.c to provide an alternate
+ * elf binary handler for 32 bit binaries (on the 64 bit kernel).
+ */
+#ifdef CONFIG_64BIT
+#define ELF_CLASS   ELFCLASS64
+#else
+#define ELF_CLASS	ELFCLASS32
+#endif
+
+typedef unsigned long elf_greg_t;
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM  ("PARISC\0")
+
+#define SET_PERSONALITY(ex) \
+	set_personality((current->personality & ~PER_MASK) | PER_LINUX); \
+	current->thread.map_base = DEFAULT_MAP_BASE; \
+	current->thread.task_size = DEFAULT_TASK_SIZE \
+
+/*
+ * Fill in general registers in a core dump.  This saves pretty
+ * much the same registers as hp-ux, although in a different order.
+ * Registers marked # below are not currently saved in pt_regs, so
+ * we use their current values here.
+ *
+ * 	gr0..gr31
+ * 	sr0..sr7
+ * 	iaoq0..iaoq1
+ * 	iasq0..iasq1
+ * 	cr11 (sar)
+ * 	cr19 (iir)
+ * 	cr20 (isr)
+ * 	cr21 (ior)
+ *  #	cr22 (ipsw)
+ *  #	cr0 (recovery counter)
+ *  #	cr24..cr31 (temporary registers)
+ *  #	cr8,9,12,13 (protection IDs)
+ *  #	cr10 (scr/ccr)
+ *  #	cr15 (ext int enable mask)
+ *
+ */
+
+#define ELF_CORE_COPY_REGS(dst, pt)	\
+	memset(dst, 0, sizeof(dst));	/* don't leak any "random" bits */ \
+	memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
+	memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
+	memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
+	memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
+	dst[44] = pt->sar;   dst[45] = pt->iir; \
+	dst[46] = pt->isr;   dst[47] = pt->ior; \
+	dst[48] = mfctl(22); dst[49] = mfctl(0); \
+	dst[50] = mfctl(24); dst[51] = mfctl(25); \
+	dst[52] = mfctl(26); dst[53] = mfctl(27); \
+	dst[54] = mfctl(28); dst[55] = mfctl(29); \
+	dst[56] = mfctl(30); dst[57] = mfctl(31); \
+	dst[58] = mfctl( 8); dst[59] = mfctl( 9); \
+	dst[60] = mfctl(12); dst[61] = mfctl(13); \
+	dst[62] = mfctl(10); dst[63] = mfctl(15);
+
+#endif /* ! ELF_CLASS */
+
+#define ELF_NGREG 80	/* We only need 64 at present, but leave space
+			   for expansion. */
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#define ELF_NFPREG 32
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+struct task_struct;
+
+extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+
+struct pt_regs;	/* forward declaration... */
+
+
+#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_DATA	ELFDATA2MSB
+#define ELF_ARCH	EM_PARISC
+#define ELF_OSABI 	ELFOSABI_LINUX
+
+/* %r23 is set by ld.so to a pointer to a function which might be 
+   registered using atexit.  This provides a means for the dynamic
+   linker to call DT_FINI functions for shared libraries that have
+   been loaded before the code runs.
+
+   So that we can use the same startup file with static executables,
+   we start programs with a value of 0 to indicate that there is no
+   such function.  */
+#define ELF_PLAT_INIT(_r, load_addr)       _r->gr[23] = 0
+
+#define ELF_EXEC_PAGESIZE	4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.
+
+   (2 * TASK_SIZE / 3) turns into something undefined when run through a
+   32 bit preprocessor and in some cases results in the kernel trying to map
+   ld.so to the kernel virtual base. Use a sane value instead. /Jes 
+  */
+
+#define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports.  This could be done in user space,
+   but it's not easy, and we've already done it here.  */
+
+#define ELF_HWCAP	0
+
+/* Masks for stack and mmap randomization */
+#define BRK_RND_MASK	(is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK	(is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+#define STACK_RND_MASK	MMAP_RND_MASK
+
+struct mm_struct;
+extern unsigned long arch_randomize_brk(struct mm_struct *);
+#define arch_randomize_brk arch_randomize_brk
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/fb.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/fb.h
new file mode 100644
index 0000000..c4cd636
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/fb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/fixmap.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/fixmap.h
new file mode 100644
index 0000000..f7c3a09
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/fixmap.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+/*
+ * This file defines the locations of the fixed mappings on parisc.
+ *
+ * All of the values in this file are machine virtual addresses.
+ *
+ * All of the values in this file must be <4GB (because of assembly
+ * loading restrictions).  If you place this region anywhere above
+ * __PAGE_OFFSET, you must adjust the memory map accordingly */
+
+/* The alias region is used in kernel space to do copy/clear to or
+ * from areas congruently mapped with user space.  It is 8MB large
+ * and must be 16MB aligned */
+#define TMPALIAS_MAP_START	((__PAGE_OFFSET) - 16*1024*1024)
+/* This is the kernel area for all maps (vmalloc, dma etc.)  most
+ * usually, it extends up to TMPALIAS_MAP_START.  Virtual addresses
+ * 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
+#define KERNEL_MAP_START	(GATEWAY_PAGE_SIZE)
+#define KERNEL_MAP_END		(TMPALIAS_MAP_START)
+
+#ifndef __ASSEMBLY__
+extern void *parisc_vmalloc_start;
+#define PCXL_DMA_MAP_SIZE	(8*1024*1024)
+#define VMALLOC_START		((unsigned long)parisc_vmalloc_start)
+#define VMALLOC_END		(KERNEL_MAP_END)
+#endif /*__ASSEMBLY__*/
+
+#endif /*_ASM_FIXMAP_H*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/floppy.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/floppy.h
new file mode 100644
index 0000000..6d8276c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/floppy.h
@@ -0,0 +1,271 @@
+/*    Architecture specific parts of the Floppy driver
+ *
+ *    Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *    Copyright (C) 2000 Matthew Wilcox (willy a debian . org)
+ *    Copyright (C) 2000 Dave Kennedy
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_PARISC_FLOPPY_H
+#define __ASM_PARISC_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a,s,vdma) \
+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
+
+
+#define SW fd_routine[use_virtual_dma&1]
+#define CSW fd_routine[can_use_virtual_dma & 1]
+
+
+#define fd_inb(port)			readb(port)
+#define fd_outb(value, port)		writeb(value, port)
+
+#define fd_request_dma()        CSW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma()           CSW._free_dma(FLOPPY_DMA)
+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue()    SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size)	SW._dma_mem_alloc(size)
+#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
+
+#define FLOPPY_CAN_FALLBACK_ON_NODMA
+
+static int virtual_dma_count=0;
+static int virtual_dma_residue=0;
+static char *virtual_dma_addr=0;
+static int virtual_dma_mode=0;
+static int doing_pdma=0;
+
+static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+	register unsigned char st;
+
+#undef TRACE_FLPY_INT
+
+#ifdef TRACE_FLPY_INT
+	static int calls=0;
+	static int bytes=0;
+	static int dma_wait=0;
+#endif
+	if (!doing_pdma) {
+		floppy_interrupt(irq, dev_id, regs);
+		return;
+	}
+
+#ifdef TRACE_FLPY_INT
+	if(!calls)
+		bytes = virtual_dma_count;
+#endif
+
+	{
+		register int lcount;
+		register char *lptr = virtual_dma_addr;
+
+		for (lcount = virtual_dma_count; lcount; lcount--) {
+			st = fd_inb(virtual_dma_port+4) & 0xa0 ;
+			if (st != 0xa0) 
+				break;
+			if (virtual_dma_mode) {
+				fd_outb(*lptr, virtual_dma_port+5);
+			} else {
+				*lptr = fd_inb(virtual_dma_port+5);
+			}
+			lptr++;
+		}
+		virtual_dma_count = lcount;
+		virtual_dma_addr = lptr;
+		st = fd_inb(virtual_dma_port+4);
+	}
+
+#ifdef TRACE_FLPY_INT
+	calls++;
+#endif
+	if (st == 0x20)
+		return;
+	if (!(st & 0x20)) {
+		virtual_dma_residue += virtual_dma_count;
+		virtual_dma_count = 0;
+#ifdef TRACE_FLPY_INT
+		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 
+		       virtual_dma_count, virtual_dma_residue, calls, bytes,
+		       dma_wait);
+		calls = 0;
+		dma_wait=0;
+#endif
+		doing_pdma = 0;
+		floppy_interrupt(irq, dev_id, regs);
+		return;
+	}
+#ifdef TRACE_FLPY_INT
+	if (!virtual_dma_count)
+		dma_wait++;
+#endif
+}
+
+static void fd_disable_dma(void)
+{
+	if(! (can_use_virtual_dma & 1))
+		disable_dma(FLOPPY_DMA);
+	doing_pdma = 0;
+	virtual_dma_residue += virtual_dma_count;
+	virtual_dma_count=0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+{
+	return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+	return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+	if(can_use_virtual_dma)
+		return request_irq(FLOPPY_IRQ, floppy_hardint,
+				   0, "floppy", NULL);
+	else
+		return request_irq(FLOPPY_IRQ, floppy_interrupt,
+				   0, "floppy", NULL);
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+	return __get_dma_pages(GFP_KERNEL, get_order(size));
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+	return (unsigned long) vmalloc(size);
+
+}
+
+#define nodma_mem_alloc(size) vdma_mem_alloc(size)
+
+static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+	if((unsigned int) addr >= (unsigned int) high_memory)
+		return vfree((void *)addr);
+	else
+		free_pages(addr, get_order(size));		
+}
+
+#define fd_dma_mem_free(addr, size)  _fd_dma_mem_free(addr, size) 
+
+static void _fd_chose_dma_mode(char *addr, unsigned long size)
+{
+	if(can_use_virtual_dma == 2) {
+		if((unsigned int) addr >= (unsigned int) high_memory ||
+		   virt_to_bus(addr) >= 0x1000000 ||
+		   _CROSS_64KB(addr, size, 0))
+			use_virtual_dma = 1;
+		else
+			use_virtual_dma = 0;
+	} else {
+		use_virtual_dma = can_use_virtual_dma & 1;
+	}
+}
+
+#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
+
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+	doing_pdma = 1;
+	virtual_dma_port = io;
+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
+	virtual_dma_addr = addr;
+	virtual_dma_count = size;
+	virtual_dma_residue = 0;
+	return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+#ifdef FLOPPY_SANITY_CHECK
+	if (CROSS_64KB(addr, size)) {
+		printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
+		return -1;
+	}
+#endif
+	/* actual, physical DMA */
+	doing_pdma = 0;
+	clear_dma_ff(FLOPPY_DMA);
+	set_dma_mode(FLOPPY_DMA,mode);
+	set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
+	set_dma_count(FLOPPY_DMA,size);
+	enable_dma(FLOPPY_DMA);
+	return 0;
+}
+
+static struct fd_routine_l {
+	int (*_request_dma)(unsigned int dmanr, const char * device_id);
+	void (*_free_dma)(unsigned int dmanr);
+	int (*_get_dma_residue)(unsigned int dummy);
+	unsigned long (*_dma_mem_alloc) (unsigned long size);
+	int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+} fd_routine[] = {
+	{
+		request_dma,
+		free_dma,
+		get_dma_residue,
+		dma_mem_alloc,
+		hard_dma_setup
+	},
+	{
+		vdma_request_dma,
+		vdma_nop,
+		vdma_get_dma_residue,
+		vdma_mem_alloc,
+		vdma_dma_setup
+	}
+};
+
+
+static int FDC1 = 0x3f0; /* Lies.  Floppy controller is memory mapped, not io mapped */
+static int FDC2 = -1;
+
+#define FLOPPY0_TYPE	0
+#define FLOPPY1_TYPE	0
+
+#define N_FDC 1
+#define N_DRIVE 8
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* __ASM_PARISC_FLOPPY_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ftrace.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ftrace.h
new file mode 100644
index 0000000..42b2c75
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ftrace.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_FTRACE_H
+#define _ASM_PARISC_FTRACE_H
+
+#ifndef __ASSEMBLY__
+extern void mcount(void);
+
+#define MCOUNT_INSN_SIZE 4
+
+extern unsigned long sys_call_table[];
+
+extern unsigned long return_address(unsigned int);
+
+#define ftrace_return_address(n) return_address(n)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/futex.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/futex.h
new file mode 100644
index 0000000..cf7ba05
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/futex.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_FUTEX_H
+#define _ASM_PARISC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <asm/errno.h>
+
+/* The following has to match the LWS code in syscall.S.  We have
+   sixteen four-word locks. */
+
+static inline void
+_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
+{
+	extern u32 lws_lock_start[];
+	long index = ((long)uaddr & 0xf0) >> 2;
+	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+	local_irq_save(*flags);
+	arch_spin_lock(s);
+}
+
+static inline void
+_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
+{
+	extern u32 lws_lock_start[];
+	long index = ((long)uaddr & 0xf0) >> 2;
+	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+	arch_spin_unlock(s);
+	local_irq_restore(*flags);
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+	unsigned long int flags;
+	int oldval, ret;
+	u32 tmp;
+
+	_futex_spin_lock_irqsave(uaddr, &flags);
+	pagefault_disable();
+
+	ret = -EFAULT;
+	if (unlikely(get_user(oldval, uaddr) != 0))
+		goto out_pagefault_enable;
+
+	ret = 0;
+	tmp = oldval;
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		tmp = oparg;
+		break;
+	case FUTEX_OP_ADD:
+		tmp += oparg;
+		break;
+	case FUTEX_OP_OR:
+		tmp |= oparg;
+		break;
+	case FUTEX_OP_ANDN:
+		tmp &= ~oparg;
+		break;
+	case FUTEX_OP_XOR:
+		tmp ^= oparg;
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+		ret = -EFAULT;
+
+out_pagefault_enable:
+	pagefault_enable();
+	_futex_spin_unlock_irqrestore(uaddr, &flags);
+
+	if (!ret)
+		*oval = oldval;
+
+	return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+			      u32 oldval, u32 newval)
+{
+	u32 val;
+	unsigned long flags;
+
+	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
+	 * our gateway page, and causes no end of trouble...
+	 */
+	if (uaccess_kernel() && !uaddr)
+		return -EFAULT;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	/* HPPA has no cmpxchg in hardware and therefore the
+	 * best we can do here is use an array of locks. The
+	 * lock selected is based on a hash of the userspace
+	 * address. This should scale to a couple of CPUs.
+	 */
+
+	_futex_spin_lock_irqsave(uaddr, &flags);
+	if (unlikely(get_user(val, uaddr) != 0)) {
+		_futex_spin_unlock_irqrestore(uaddr, &flags);
+		return -EFAULT;
+	}
+
+	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+		_futex_spin_unlock_irqrestore(uaddr, &flags);
+		return -EFAULT;
+	}
+
+	*uval = val;
+	_futex_spin_unlock_irqrestore(uaddr, &flags);
+
+	return 0;
+}
+
+#endif /*__KERNEL__*/
+#endif /*_ASM_PARISC_FUTEX_H*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/grfioctl.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/grfioctl.h
new file mode 100644
index 0000000..671e060
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/grfioctl.h
@@ -0,0 +1,113 @@
+/*  Architecture specific parts of HP's STI (framebuffer) driver.
+ *  Structures are HP-UX compatible for XFree86 usage.
+ * 
+ *    Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *    Copyright (C) 2001 Helge Deller (deller a parisc-linux org)
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ASM_PARISC_GRFIOCTL_H
+#define __ASM_PARISC_GRFIOCTL_H
+
+/* upper 32 bits of graphics id (HP/UX identifier) */
+
+#define GRFGATOR		8
+#define S9000_ID_S300		9
+#define GRFBOBCAT		9
+#define	GRFCATSEYE		9
+#define S9000_ID_98720		10
+#define GRFRBOX			10
+#define S9000_ID_98550		11
+#define GRFFIREEYE		11
+#define S9000_ID_A1096A		12
+#define GRFHYPERION		12
+#define S9000_ID_FRI		13
+#define S9000_ID_98730		14
+#define GRFDAVINCI		14
+#define S9000_ID_98705		0x26C08070	/* Tigershark */
+#define S9000_ID_98736		0x26D148AB
+#define S9000_ID_A1659A		0x26D1482A	/* CRX 8 plane color (=ELK) */
+#define S9000_ID_ELK		S9000_ID_A1659A
+#define S9000_ID_A1439A		0x26D148EE	/* CRX24 = CRX+ (24-plane color) */
+#define S9000_ID_A1924A		0x26D1488C	/* GRX gray-scale */
+#define S9000_ID_ELM		S9000_ID_A1924A
+#define S9000_ID_98765		0x27480DEF
+#define S9000_ID_ELK_768	0x27482101
+#define S9000_ID_STINGER	0x27A4A402
+#define S9000_ID_TIMBER		0x27F12392	/* Bushmaster (710) Graphics */
+#define S9000_ID_TOMCAT		0x27FCCB6D	/* dual-headed ELK (Dual CRX) */
+#define S9000_ID_ARTIST		0x2B4DED6D	/* Artist (Gecko/712 & 715) onboard Graphics */
+#define S9000_ID_HCRX		0x2BCB015A	/* Hyperdrive/Hyperbowl (A4071A) Graphics */
+#define CRX24_OVERLAY_PLANES	0x920825AA	/* Overlay planes on CRX24 */
+
+#define CRT_ID_ELK_1024		S9000_ID_ELK_768 /* Elk 1024x768  CRX */
+#define CRT_ID_ELK_1280		S9000_ID_A1659A	/* Elk 1280x1024 CRX */
+#define CRT_ID_ELK_1024DB	0x27849CA5      /* Elk 1024x768 double buffer */
+#define CRT_ID_ELK_GS		S9000_ID_A1924A	/* Elk 1280x1024 GreyScale    */
+#define CRT_ID_CRX24		S9000_ID_A1439A	/* Piranha */
+#define CRT_ID_VISUALIZE_EG	0x2D08C0A7      /* Graffiti, A4450A (built-in B132+/B160L) */
+#define CRT_ID_THUNDER		0x2F23E5FC      /* Thunder 1 VISUALIZE 48*/
+#define CRT_ID_THUNDER2		0x2F8D570E      /* Thunder 2 VISUALIZE 48 XP*/
+#define CRT_ID_HCRX		S9000_ID_HCRX	/* Hyperdrive HCRX */
+#define CRT_ID_CRX48Z		S9000_ID_STINGER /* Stinger */
+#define CRT_ID_DUAL_CRX		S9000_ID_TOMCAT	/* Tomcat */
+#define CRT_ID_PVRX		S9000_ID_98705	/* Tigershark */
+#define CRT_ID_TIMBER		S9000_ID_TIMBER	/* Timber (710 builtin) */
+#define CRT_ID_TVRX		S9000_ID_98765	/* TVRX (gto/falcon) */
+#define CRT_ID_ARTIST		S9000_ID_ARTIST	/* Artist */
+#define CRT_ID_SUMMIT		0x2FC1066B      /* Summit FX2, FX4, FX6 ... */
+#define CRT_ID_LEGO		0x35ACDA30	/* Lego FX5, FX10 ... */
+#define CRT_ID_PINNACLE		0x35ACDA16	/* Pinnacle FXe */ 
+
+/* structure for ioctl(GCDESCRIBE) */
+
+#define gaddr_t unsigned long	/* FIXME: PA2.0 (64bit) portable ? */
+
+struct	grf_fbinfo {
+	unsigned int	id;		/* upper 32 bits of graphics id */
+	unsigned int	mapsize;	/* mapped size of framebuffer */
+	unsigned int	dwidth, dlength;/* x and y sizes */
+	unsigned int	width, length;	/* total x and total y size */
+	unsigned int	xlen;		/* x pitch size */
+	unsigned int	bpp, bppu;	/* bits per pixel and used bpp */
+	unsigned int	npl, nplbytes;	/* # of planes and bytes per plane */
+	char		name[32];	/* name of the device (from ROM) */
+	unsigned int	attr;		/* attributes */
+	gaddr_t 	fbbase, regbase;/* framebuffer and register base addr */
+	gaddr_t		regions[6];	/* region bases */
+};
+
+#define	GCID		_IOR('G', 0, int)
+#define	GCON		_IO('G', 1)
+#define	GCOFF		_IO('G', 2)
+#define	GCAON		_IO('G', 3)
+#define	GCAOFF		_IO('G', 4)
+#define	GCMAP		_IOWR('G', 5, int)
+#define	GCUNMAP		_IOWR('G', 6, int)
+#define	GCMAP_HPUX	_IO('G', 5)
+#define	GCUNMAP_HPUX	_IO('G', 6)
+#define	GCLOCK		_IO('G', 7)
+#define	GCUNLOCK	_IO('G', 8)
+#define	GCLOCK_MINIMUM	_IO('G', 9)
+#define	GCUNLOCK_MINIMUM _IO('G', 10)
+#define	GCSTATIC_CMAP	_IO('G', 11)
+#define	GCVARIABLE_CMAP _IO('G', 12)
+#define GCTERM		_IOWR('G',20,int)	/* multi-headed Tomcat */ 
+#define GCDESCRIBE	_IOR('G', 21, struct grf_fbinfo)
+#define GCFASTLOCK	_IO('G', 26)
+
+#endif /* __ASM_PARISC_GRFIOCTL_H */
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/hardirq.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/hardirq.h
new file mode 100644
index 0000000..0778151
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/hardirq.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* hardirq.h: PA-RISC hard IRQ support.
+ *
+ * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ */
+
+#ifndef _PARISC_HARDIRQ_H
+#define _PARISC_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#ifdef CONFIG_IRQSTACKS
+#define __ARCH_HAS_DO_SOFTIRQ
+#endif
+
+typedef struct {
+	unsigned int __softirq_pending;
+	unsigned int kernel_stack_usage;
+	unsigned int irq_stack_usage;
+#ifdef CONFIG_SMP
+	unsigned int irq_resched_count;
+#endif
+	unsigned int irq_unaligned_count;
+	unsigned int irq_fpassist_count;
+	unsigned int irq_tlb_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
+#define inc_irq_stat(member)	this_cpu_inc(irq_stat.member)
+#define __inc_irq_stat(member)	__this_cpu_inc(irq_stat.member)
+#define local_softirq_pending()	this_cpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x)	\
+		this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x)	this_cpu_or(irq_stat.__softirq_pending, (x))
+
+#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)
+
+#endif /* _PARISC_HARDIRQ_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/hardware.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/hardware.h
new file mode 100644
index 0000000..d6e1ed1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/hardware.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_HARDWARE_H
+#define _PARISC_HARDWARE_H
+
+#include <linux/mod_devicetable.h>
+
+#define HWTYPE_ANY_ID		PA_HWTYPE_ANY_ID
+#define HVERSION_ANY_ID		PA_HVERSION_ANY_ID
+#define HVERSION_REV_ANY_ID	PA_HVERSION_REV_ANY_ID
+#define SVERSION_ANY_ID		PA_SVERSION_ANY_ID
+
+struct hp_hardware {
+	unsigned short	hw_type:5;	/* HPHW_xxx */
+	unsigned short	hversion;
+	unsigned long	sversion:28;
+	unsigned short	opt;
+	const char	name[80];	/* The hardware description */
+};
+
+struct parisc_device;
+
+enum cpu_type {
+	pcx	= 0, /* pa7000		pa 1.0  */
+	pcxs	= 1, /* pa7000		pa 1.1a */
+	pcxt	= 2, /* pa7100		pa 1.1b */
+	pcxt_	= 3, /* pa7200	(t')	pa 1.1c */
+	pcxl	= 4, /* pa7100lc	pa 1.1d */
+	pcxl2	= 5, /* pa7300lc	pa 1.1e */
+	pcxu	= 6, /* pa8000		pa 2.0  */
+	pcxu_	= 7, /* pa8200	(u+)	pa 2.0  */
+	pcxw	= 8, /* pa8500		pa 2.0  */
+	pcxw_	= 9, /* pa8600	(w+)	pa 2.0  */
+	pcxw2	= 10, /* pa8700		pa 2.0  */
+	mako	= 11, /* pa8800		pa 2.0  */
+	mako2	= 12  /* pa8900		pa 2.0  */
+};
+
+extern const char * const cpu_name_version[][2]; /* mapping from enum cpu_type to strings */
+
+struct parisc_driver;
+
+struct io_module {
+        volatile uint32_t nothing;		/* reg 0 */
+        volatile uint32_t io_eim;
+        volatile uint32_t io_dc_adata;
+        volatile uint32_t io_ii_cdata;
+        volatile uint32_t io_dma_link;		/* reg 4 */
+        volatile uint32_t io_dma_command;
+        volatile uint32_t io_dma_address;
+        volatile uint32_t io_dma_count;
+        volatile uint32_t io_flex;		/* reg 8 */
+        volatile uint32_t io_spa_address;
+        volatile uint32_t reserved1[2];
+        volatile uint32_t io_command;		/* reg 12 */
+        volatile uint32_t io_status;
+        volatile uint32_t io_control;
+        volatile uint32_t io_data;
+        volatile uint32_t reserved2;		/* reg 16 */
+        volatile uint32_t chain_addr;
+        volatile uint32_t sub_mask_clr;
+        volatile uint32_t reserved3[13];
+        volatile uint32_t undefined[480];
+        volatile uint32_t unpriv[512];
+};
+
+struct bc_module {
+        volatile uint32_t unused1[12];
+        volatile uint32_t io_command;
+        volatile uint32_t io_status;
+        volatile uint32_t io_control;
+        volatile uint32_t unused2[1];
+        volatile uint32_t io_err_resp;
+        volatile uint32_t io_err_info;
+        volatile uint32_t io_err_req;
+        volatile uint32_t unused3[11];
+        volatile uint32_t io_io_low;
+        volatile uint32_t io_io_high;
+};
+
+#define HPHW_NPROC     0 
+#define HPHW_MEMORY    1       
+#define HPHW_B_DMA     2
+#define HPHW_OBSOLETE  3
+#define HPHW_A_DMA     4
+#define HPHW_A_DIRECT  5
+#define HPHW_OTHER     6
+#define HPHW_BCPORT    7
+#define HPHW_CIO       8
+#define HPHW_CONSOLE   9
+#define HPHW_FIO       10
+#define HPHW_BA        11
+#define HPHW_IOA       12
+#define HPHW_BRIDGE    13
+#define HPHW_FABRIC    14
+#define HPHW_MC	       15
+#define HPHW_FAULTY    31
+
+struct parisc_device_id;
+
+/* hardware.c: */
+extern const char *parisc_hardware_description(struct parisc_device_id *id);
+extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
+
+struct pci_dev;
+struct hardware_path;
+
+/* drivers.c: */
+extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
+		struct hardware_path *path);
+extern int register_parisc_device(struct parisc_device *dev);
+extern int register_parisc_driver(struct parisc_driver *driver);
+extern int count_parisc_driver(struct parisc_driver *driver);
+extern int unregister_parisc_driver(struct parisc_driver *driver);
+extern void walk_central_bus(void);
+extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int);
+extern void print_parisc_devices(void);
+extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
+extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
+extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
+extern void init_parisc_bus(void);
+extern struct device *hwpath_to_device(struct hardware_path *modpath);
+extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
+
+
+/* inventory.c: */
+extern void do_memory_inventory(void);
+extern void do_device_inventory(void);
+
+#endif /* _PARISC_HARDWARE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/hash.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/hash.h
new file mode 100644
index 0000000..525950e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/hash.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * HP-PA only implements integer multiply in the FPU.  However, for
+ * integer multiplies by constant, it has a number of shift-and-add
+ * (but no shift-and-subtract, sigh!) instructions that a compiler
+ * can synthesize a code sequence with.
+ *
+ * Unfortunately, GCC isn't very efficient at using them.  For example
+ * it uses three instructions for "x *= 21" when only two are needed.
+ * But we can find a sequence manually.
+ */
+
+#define HAVE_ARCH__HASH_32 1
+
+/*
+ * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the
+ * PA7100 pairing rules.  This is an in-order 2-way superscalar processor.
+ * Only one instruction in a pair may be a shift (by more than 3 bits),
+ * but other than that, simple ALU ops (including shift-and-add by up
+ * to 3 bits) may be paired arbitrarily.
+ *
+ * PA8xxx processors also dual-issue ALU instructions, although with
+ * fewer constraints, so this schedule is good for them, too.
+ *
+ * This 6-step sequence was found by Yevgen Voronenko's implementation
+ * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+	u32 a, b, c;
+
+	/*
+	 * Phase 1: Compute  a = (x << 19) + x,
+	 * b = (x << 9) + a, c = (x << 23) + b.
+	 */
+	a = x << 19;		/* Two shifts can't be paired */
+	b = x << 9;	a += x;
+	c = x << 23;	b += a;
+			c += b;
+	/* Phase 2: Return (b<<11) + (c<<6) + (a<<3) - c */
+	b <<= 11;
+	a += c << 3;	b -= c;
+	return (a << 3) + b;
+}
+
+#if BITS_PER_LONG == 64
+
+#define HAVE_ARCH_HASH_64 1
+
+/*
+ * Finding a good shift-and-add chain for GOLDEN_RATIO_64 is tricky,
+ * because available software for the purpose chokes on constants this
+ * large.  (It's mostly designed for compiling FIR filter coefficients
+ * into FPGAs.)
+ *
+ * However, Jason Thong pointed out a work-around.  The Hcub software
+ * (http://spiral.ece.cmu.edu/mcm/gen.html) is designed for *multiple*
+ * constant multiplication, and is good at finding shift-and-add chains
+ * which share common terms.
+ *
+ * Looking at 0x0x61C8864680B583EB in binary:
+ * 0110000111001000100001100100011010000000101101011000001111101011
+ *  \______________/    \__________/       \_______/     \________/
+ *   \____________________________/         \____________________/
+ * you can see the non-zero bits are divided into several well-separated
+ * blocks.  Hcub can find algorithms for those terms separately, which
+ * can then be shifted and added together.
+ *
+ * Dividing the input into 2, 3 or 4 blocks, Hcub can find solutions
+ * with 10, 9 or 8 adds, respectively, making a total of 11 for the
+ * whole number.
+ *
+ * Using just two large blocks, 0xC3910C8D << 31 in the high bits,
+ * and 0xB583EB in the low bits, produces as good an algorithm as any,
+ * and with one more small shift than alternatives.
+ *
+ * The high bits are a larger number and more work to compute, as well
+ * as needing one extra cycle to shift left 31 bits before the final
+ * addition, so they are the critical path for scheduling.  The low bits
+ * can fit into the scheduling slots left over.
+ */
+
+
+/*
+ * This _ASSIGN(dst, src) macro performs "dst = src", but prevents GCC
+ * from inferring anything about the value assigned to "dest".
+ *
+ * This prevents it from mis-optimizing certain sequences.
+ * In particular, gcc is annoyingly eager to combine consecutive shifts.
+ * Given "x <<= 19; y += x; z += x << 1;", GCC will turn this into
+ * "y += x << 19; z += x << 20;" even though the latter sequence needs
+ * an additional instruction and temporary register.
+ *
+ * Because no actual assembly code is generated, this construct is
+ * usefully portable across all GCC platforms, and so can be test-compiled
+ * on non-PA systems.
+ *
+ * In two places, additional unused input dependencies are added.  This
+ * forces GCC's scheduling so it does not rearrange instructions too much.
+ * Because the PA-8xxx is out of order, I'm not sure how much this matters,
+ * but why make it more difficult for the processor than necessary?
+ */
+#define _ASSIGN(dst, src, ...) asm("" : "=r" (dst) : "0" (src), ##__VA_ARGS__)
+
+/*
+ * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily
+ * optimized shift-and-add sequence.
+ *
+ * Without the final shift, the multiply proper is 19 instructions,
+ * 10 cycles and uses only 4 temporaries.  Whew!
+ *
+ * You are not expected to understand this.
+ */
+static __always_inline u32 __attribute_const__
+hash_64(u64 a, unsigned int bits)
+{
+	u64 b, c, d;
+
+	/*
+	 * Encourage GCC to move a dynamic shift to %sar early,
+	 * thereby freeing up an additional temporary register.
+	 */
+	if (!__builtin_constant_p(bits))
+		asm("" : "=q" (bits) : "0" (64 - bits));
+	else
+		bits = 64 - bits;
+
+	_ASSIGN(b, a*5);	c = a << 13;
+	b = (b << 2) + a;	_ASSIGN(d, a << 17);
+	a = b + (a << 1);	c += d;
+	d = a << 10;		_ASSIGN(a, a << 19);
+	d = a - d;		_ASSIGN(a, a << 4, "X" (d));
+	c += b;			a += b;
+	d -= c;			c += a << 1;
+	a += c << 3;		_ASSIGN(b, b << (7+31), "X" (c), "X" (d));
+	a <<= 31;		b += d;
+	a += b;
+	return a >> bits;
+}
+#undef _ASSIGN	/* We're a widely-used header file, so don't litter! */
+
+#endif /* BITS_PER_LONG == 64 */
+
+#endif /* _ASM_HASH_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/hugetlb.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 0000000..58e0f46
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC64_HUGETLB_H
+#define _ASM_PARISC64_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+					 unsigned long addr,
+					 unsigned long len) {
+	return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+			unsigned long addr, unsigned long len)
+{
+	if (len & ~HPAGE_MASK)
+		return -EINVAL;
+	if (addr & ~HPAGE_MASK)
+		return -EINVAL;
+	return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+					  unsigned long addr, unsigned long end,
+					  unsigned long floor,
+					  unsigned long ceiling)
+{
+	free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+	return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+	return pte_wrprotect(pte);
+}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep);
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+					     unsigned long addr, pte_t *ptep,
+					     pte_t pte, int dirty);
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ide.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ide.h
new file mode 100644
index 0000000..34cdac0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ide.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  linux/include/asm-parisc/ide.h
+ *
+ *  Copyright (C) 1994-1996  Linus Torvalds & authors
+ */
+
+/*
+ *  This file contains the PARISC architecture specific IDE code.
+ */
+
+#ifndef __ASM_PARISC_IDE_H
+#define __ASM_PARISC_IDE_H
+
+#ifdef __KERNEL__
+
+/* Generic I/O and MEMIO string operations.  */
+
+#define __ide_insw	insw
+#define __ide_insl	insl
+#define __ide_outsw	outsw
+#define __ide_outsl	outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+	while (count--) {
+		*(u16 *)addr = __raw_readw(port);
+		addr += 2;
+	}
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+	while (count--) {
+		*(u32 *)addr = __raw_readl(port);
+		addr += 4;
+	}
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+	while (count--) {
+		__raw_writew(*(u16 *)addr, port);
+		addr += 2;
+	}
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count)
+{
+	while (count--) {
+		__raw_writel(*(u32 *)addr, port);
+		addr += 4;
+	}
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_PARISC_IDE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/io.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/io.h
new file mode 100644
index 0000000..27c62ba
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/io.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/types.h>
+#include <asm/pgtable.h>
+
+#define virt_to_phys(a) ((unsigned long)__pa(a))
+#define phys_to_virt(a) __va(a)
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+static inline unsigned long isa_bus_to_virt(unsigned long addr) {
+	BUG();
+	return 0;
+}
+
+static inline unsigned long isa_virt_to_bus(void *addr) {
+	BUG();
+	return 0;
+}
+
+/*
+ * Memory mapped I/O
+ *
+ * readX()/writeX() do byteswapping and take an ioremapped address
+ * __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address.
+ * gsc_*() don't byteswap and operate on physical addresses;
+ *   eg dev->hpa or 0xfee00000.
+ */
+
+static inline unsigned char gsc_readb(unsigned long addr)
+{
+	long flags;
+	unsigned char ret;
+
+	__asm__ __volatile__(
+	"	rsm	%3,%0\n"
+	"	ldbx	0(%2),%1\n"
+	"	mtsm	%0\n"
+	: "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) );
+
+	return ret;
+}
+
+static inline unsigned short gsc_readw(unsigned long addr)
+{
+	long flags;
+	unsigned short ret;
+
+	__asm__ __volatile__(
+	"	rsm	%3,%0\n"
+	"	ldhx	0(%2),%1\n"
+	"	mtsm	%0\n"
+	: "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) );
+
+	return ret;
+}
+
+static inline unsigned int gsc_readl(unsigned long addr)
+{
+	u32 ret;
+
+	__asm__ __volatile__(
+	"	ldwax	0(%1),%0\n"
+	: "=r" (ret) : "r" (addr) );
+
+	return ret;
+}
+
+static inline unsigned long long gsc_readq(unsigned long addr)
+{
+	unsigned long long ret;
+
+#ifdef CONFIG_64BIT
+	__asm__ __volatile__(
+	"	ldda	0(%1),%0\n"
+	:  "=r" (ret) : "r" (addr) );
+#else
+	/* two reads may have side effects.. */
+	ret = ((u64) gsc_readl(addr)) << 32;
+	ret |= gsc_readl(addr+4);
+#endif
+	return ret;
+}
+
+static inline void gsc_writeb(unsigned char val, unsigned long addr)
+{
+	long flags;
+	__asm__ __volatile__(
+	"	rsm	%3,%0\n"
+	"	stbs	%1,0(%2)\n"
+	"	mtsm	%0\n"
+	: "=&r" (flags) :  "r" (val), "r" (addr), "i" (PSW_SM_D) );
+}
+
+static inline void gsc_writew(unsigned short val, unsigned long addr)
+{
+	long flags;
+	__asm__ __volatile__(
+	"	rsm	%3,%0\n"
+	"	sths	%1,0(%2)\n"
+	"	mtsm	%0\n"
+	: "=&r" (flags) :  "r" (val), "r" (addr), "i" (PSW_SM_D) );
+}
+
+static inline void gsc_writel(unsigned int val, unsigned long addr)
+{
+	__asm__ __volatile__(
+	"	stwas	%0,0(%1)\n"
+	: :  "r" (val), "r" (addr) );
+}
+
+static inline void gsc_writeq(unsigned long long val, unsigned long addr)
+{
+#ifdef CONFIG_64BIT
+	__asm__ __volatile__(
+	"	stda	%0,0(%1)\n"
+	: :  "r" (val), "r" (addr) );
+#else
+	/* two writes may have side effects.. */
+	gsc_writel(val >> 32, addr);
+	gsc_writel(val, addr+4);
+#endif
+}
+
+/*
+ * The standard PCI ioremap interfaces
+ */
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/* Most machines react poorly to I/O-space being cacheable... Instead let's
+ * define ioremap() in terms of ioremap_nocache().
+ */
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+	return __ioremap(offset, size, _PAGE_NO_CACHE);
+}
+#define ioremap_nocache(off, sz)	ioremap((off), (sz))
+#define ioremap_wc			ioremap_nocache
+#define ioremap_uc			ioremap_nocache
+
+extern void iounmap(const volatile void __iomem *addr);
+
+static inline unsigned char __raw_readb(const volatile void __iomem *addr)
+{
+	return (*(volatile unsigned char __force *) (addr));
+}
+static inline unsigned short __raw_readw(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int __raw_readl(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned int __force *) addr;
+}
+static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long long __force *) addr;
+}
+
+static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
+{
+	*(volatile unsigned char __force *) addr = b;
+}
+static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
+{
+	*(volatile unsigned short __force *) addr = b;
+}
+static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
+{
+	*(volatile unsigned int __force *) addr = b;
+}
+static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
+{
+	*(volatile unsigned long long __force *) addr = b;
+}
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+	return __raw_readb(addr);
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+	return le16_to_cpu(__raw_readw(addr));
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+	return le32_to_cpu(__raw_readl(addr));
+}
+static inline unsigned long long readq(const volatile void __iomem *addr)
+{
+	return le64_to_cpu(__raw_readq(addr));
+}
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+	__raw_writeb(b, addr);
+}
+static inline void writew(unsigned short w, volatile void __iomem *addr)
+{
+	__raw_writew(cpu_to_le16(w), addr);
+}
+static inline void writel(unsigned int l, volatile void __iomem *addr)
+{
+	__raw_writel(cpu_to_le32(l), addr);
+}
+static inline void writeq(unsigned long long q, volatile void __iomem *addr)
+{
+	__raw_writeq(cpu_to_le64(q), addr);
+}
+
+#define	readb	readb
+#define	readw	readw
+#define	readl	readl
+#define readq	readq
+#define writeb	writeb
+#define writew	writew
+#define writel	writel
+#define writeq	writeq
+
+#define readb_relaxed(addr)	readb(addr)
+#define readw_relaxed(addr)	readw(addr)
+#define readl_relaxed(addr)	readl(addr)
+#define readq_relaxed(addr)	readq(addr)
+#define writeb_relaxed(b, addr)	writeb(b, addr)
+#define writew_relaxed(w, addr)	writew(w, addr)
+#define writel_relaxed(l, addr)	writel(l, addr)
+#define writeq_relaxed(q, addr)	writeq(q, addr)
+
+#define mmiowb() do { } while (0)
+
+void memset_io(volatile void __iomem *addr, unsigned char val, int count);
+void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
+void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
+
+/* Port-space IO */
+
+#define inb_p inb
+#define inw_p inw
+#define inl_p inl
+#define outb_p outb
+#define outw_p outw
+#define outl_p outl
+
+extern unsigned char eisa_in8(unsigned short port);
+extern unsigned short eisa_in16(unsigned short port);
+extern unsigned int eisa_in32(unsigned short port);
+extern void eisa_out8(unsigned char data, unsigned short port);
+extern void eisa_out16(unsigned short data, unsigned short port);
+extern void eisa_out32(unsigned int data, unsigned short port);
+
+#if defined(CONFIG_PCI)
+extern unsigned char inb(int addr);
+extern unsigned short inw(int addr);
+extern unsigned int inl(int addr);
+
+extern void outb(unsigned char b, int addr);
+extern void outw(unsigned short b, int addr);
+extern void outl(unsigned int b, int addr);
+#elif defined(CONFIG_EISA)
+#define inb eisa_in8
+#define inw eisa_in16
+#define inl eisa_in32
+#define outb eisa_out8
+#define outw eisa_out16
+#define outl eisa_out32
+#else
+static inline char inb(unsigned long addr)
+{
+	BUG();
+	return -1;
+}
+
+static inline short inw(unsigned long addr)
+{
+	BUG();
+	return -1;
+}
+
+static inline int inl(unsigned long addr)
+{
+	BUG();
+	return -1;
+}
+
+#define outb(x, y)	BUG()
+#define outw(x, y)	BUG()
+#define outl(x, y)	BUG()
+#endif
+
+/*
+ * String versions of in/out ops:
+ */
+extern void insb (unsigned long port, void *dst, unsigned long count);
+extern void insw (unsigned long port, void *dst, unsigned long count);
+extern void insl (unsigned long port, void *dst, unsigned long count);
+extern void outsb (unsigned long port, const void *src, unsigned long count);
+extern void outsw (unsigned long port, const void *src, unsigned long count);
+extern void outsl (unsigned long port, const void *src, unsigned long count);
+
+
+/* IO Port space is :      BBiiii   where BB is HBA number. */
+#define IO_SPACE_LIMIT 0x00ffffff
+
+/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
+ * bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
+ * mode (essentially just sign extending.  This macro takes in a 32
+ * bit I/O address (still with the leading f) and outputs the correct
+ * value for either 32 or 64 bit mode */
+#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
+
+#include <asm-generic/iomap.h>
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/irq.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/irq.h
new file mode 100644
index 0000000..959e79c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/irq.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/asm-parisc/irq.h
+ *
+ * Copyright 2005 Matthew Wilcox <matthew@wil.cx>
+ */
+
+#ifndef _ASM_PARISC_IRQ_H
+#define _ASM_PARISC_IRQ_H
+
+#include <linux/cpumask.h>
+#include <asm/types.h>
+
+#define NO_IRQ		(-1)
+
+#ifdef CONFIG_GSC
+#define GSC_IRQ_BASE	16
+#define GSC_IRQ_MAX	63
+#define CPU_IRQ_BASE	64
+#else
+#define CPU_IRQ_BASE	16
+#endif
+
+#define TIMER_IRQ	(CPU_IRQ_BASE + 0)
+#define	IPI_IRQ		(CPU_IRQ_BASE + 1)
+#define CPU_IRQ_MAX	(CPU_IRQ_BASE + (BITS_PER_LONG - 1))
+
+#define NR_IRQS		(CPU_IRQ_MAX + 1)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+	return (irq == 2) ? 9 : irq;
+}
+
+struct irq_chip;
+struct irq_data;
+
+void cpu_ack_irq(struct irq_data *d);
+void cpu_eoi_irq(struct irq_data *d);
+
+extern int txn_alloc_irq(unsigned int nbits);
+extern int txn_claim_irq(int);
+extern unsigned int txn_alloc_data(unsigned int);
+extern unsigned long txn_alloc_addr(unsigned int);
+extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
+
+extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
+extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
+
+/* soft power switch support (power.c) */
+extern struct tasklet_struct power_tasklet;
+
+#endif	/* _ASM_PARISC_IRQ_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/irqflags.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/irqflags.h
new file mode 100644
index 0000000..38a19c0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/irqflags.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_IRQFLAGS_H
+#define __PARISC_IRQFLAGS_H
+
+#include <linux/types.h>
+#include <asm/psw.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	unsigned long flags;
+	asm volatile("ssm 0, %0" : "=r" (flags) : : "memory");
+	return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory");
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+	asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory");
+	return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	asm volatile("mtsm %0" : : "r" (flags) : "memory");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+	return (flags & PSW_I) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* __PARISC_IRQFLAGS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/kbdleds.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/kbdleds.h
new file mode 100644
index 0000000..50fcce8
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/kbdleds.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_KBDLEDS_H
+#define _ASM_PARISC_KBDLEDS_H
+
+/*
+ * On HIL keyboards of PARISC machines there is no NumLock key and
+ * everyone expects the keypad to be used for numbers. That's why
+ * we can safely turn on the NUMLOCK bit.
+ */
+
+static inline int kbd_defleds(void)
+{
+#if defined(CONFIG_KEYBOARD_HIL) || defined(CONFIG_KEYBOARD_HIL_OLD)
+	return 1 << VC_NUMLOCK;
+#else
+	return 0;
+#endif
+}
+
+#endif /* _ASM_PARISC_KBDLEDS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/kmap_types.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/kmap_types.h
new file mode 100644
index 0000000..3e70b5c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/kmap_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define  __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ldcw.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ldcw.h
new file mode 100644
index 0000000..3eb4bfc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ldcw.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_LDCW_H
+#define __PARISC_LDCW_H
+
+#ifndef CONFIG_PA20
+/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
+   and GCC only guarantees 8-byte alignment for stack locals, we can't
+   be assured of 16-byte alignment for atomic lock data even if we
+   specify "__attribute ((aligned(16)))" in the type declaration.  So,
+   we use a struct containing an array of four ints for the atomic lock
+   type and dynamically select the 16-byte aligned int from the array
+   for the semaphore.  */
+
+#define __PA_LDCW_ALIGNMENT	16
+#define __PA_LDCW_ALIGN_ORDER	4
+#define __ldcw_align(a) ({					\
+	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
+	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
+		& ~(__PA_LDCW_ALIGNMENT - 1);			\
+	(volatile unsigned int *) __ret;			\
+})
+#define __LDCW	"ldcw"
+
+#else /*CONFIG_PA20*/
+/* From: "Jim Hull" <jim.hull of hp.com>
+   I've attached a summary of the change, but basically, for PA 2.0, as
+   long as the ",CO" (coherent operation) completer is specified, then the
+   16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
+   they only require "natural" alignment (4-byte for ldcw, 8-byte for
+   ldcd). */
+
+#define __PA_LDCW_ALIGNMENT	4
+#define __PA_LDCW_ALIGN_ORDER	2
+#define __ldcw_align(a) (&(a)->slock)
+#define __LDCW	"ldcw,co"
+
+#endif /*!CONFIG_PA20*/
+
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+   We don't explicitly expose that "*a" may be written as reload
+   fails to find a register in class R1_REGS when "a" needs to be
+   reloaded when generating 64-bit PIC code.  Instead, we clobber
+   memory to indicate to the compiler that the assembly code reads
+   or writes to items other than those listed in the input and output
+   operands.  This may pessimize the code somewhat but __ldcw is
+   usually used within code blocks surrounded by memory barriers.  */
+#define __ldcw(a) ({						\
+	unsigned __ret;						\
+	__asm__ __volatile__(__LDCW " 0(%1),%0"			\
+		: "=r" (__ret) : "r" (a) : "memory");		\
+	__ret;							\
+})
+
+#ifdef CONFIG_SMP
+# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+#endif
+
+#endif /* __PARISC_LDCW_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/led.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/led.h
new file mode 100644
index 0000000..6de13d0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/led.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LED_H
+#define LED_H
+
+#define	LED7		0x80		/* top (or furthest right) LED */
+#define	LED6		0x40
+#define	LED5		0x20
+#define	LED4		0x10
+#define	LED3		0x08
+#define	LED2		0x04
+#define	LED1		0x02
+#define	LED0		0x01		/* bottom (or furthest left) LED */
+
+#define	LED_LAN_TX	LED0		/* for LAN transmit activity */
+#define	LED_LAN_RCV	LED1		/* for LAN receive activity */
+#define	LED_DISK_IO	LED2		/* for disk activity */
+#define	LED_HEARTBEAT	LED3		/* heartbeat */
+
+/* values for pdc_chassis_lcd_info_ret_block.model: */
+#define DISPLAY_MODEL_LCD  0		/* KittyHawk LED or LCD */
+#define DISPLAY_MODEL_NONE 1		/* no LED or LCD */
+#define DISPLAY_MODEL_LASI 2		/* LASI style 8 bit LED */
+#define DISPLAY_MODEL_OLD_ASP 0x7F	/* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
+
+#define LED_CMD_REG_NONE 0		/* NULL == no addr for the cmd register */
+
+/* register_led_driver() */
+int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
+
+/* registers the LED regions for procfs */
+void __init register_led_regions(void);
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+/* writes a string to the LCD display (if possible on this h/w) */
+int lcd_print(const char *str);
+#else
+#define lcd_print(str)
+#endif
+
+/* main LED initialization function (uses PDC) */ 
+int __init led_init(void);
+
+#endif /* LED_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/linkage.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/linkage.h
new file mode 100644
index 0000000..9a69bf6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/linkage.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PARISC_LINKAGE_H
+#define __ASM_PARISC_LINKAGE_H
+
+#include <asm/dwarf.h>
+
+#ifndef __ALIGN
+#define __ALIGN         .align 4
+#define __ALIGN_STR     ".align 4"
+#endif
+
+/*
+ * In parisc assembly a semicolon marks a comment while a
+ * exclamation mark is used to separate independent lines.
+ */
+#define ASM_NL	!
+
+#ifdef __ASSEMBLY__
+
+#define ENTRY(name) \
+	.export name !\
+	ALIGN !\
+name:
+
+#ifdef CONFIG_64BIT
+#define ENDPROC(name) \
+	END(name)
+#else
+#define ENDPROC(name) \
+	.type name, @function !\
+	END(name)
+#endif
+
+#define ENTRY_CFI(name) \
+	ENTRY(name)	ASM_NL\
+	CFI_STARTPROC
+
+#define ENDPROC_CFI(name) \
+	ENDPROC(name)	ASM_NL\
+	CFI_ENDPROC
+
+#endif /* __ASSEMBLY__ */
+
+#endif  /* __ASM_PARISC_LINKAGE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/machdep.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/machdep.h
new file mode 100644
index 0000000..215d2c4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/machdep.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_MACHDEP_H
+#define _PARISC_MACHDEP_H
+
+#include <linux/notifier.h>
+
+#define	MACH_RESTART	1
+#define	MACH_HALT	2
+#define MACH_POWER_ON	3
+#define	MACH_POWER_OFF	4
+
+extern struct notifier_block *mach_notifier;
+extern void pa7300lc_init(void);
+
+extern void (*cpu_lpmc)(int, struct pt_regs *);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/mckinley.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/mckinley.h
new file mode 100644
index 0000000..eb84dbe
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/mckinley.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PARISC_MCKINLEY_H
+#define ASM_PARISC_MCKINLEY_H
+#ifdef __KERNEL__
+
+/* declared in arch/parisc/kernel/setup.c */
+extern struct proc_dir_entry * proc_mckinley_root;
+
+#endif /*__KERNEL__*/
+#endif /*ASM_PARISC_MCKINLEY_H*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu.h
new file mode 100644
index 0000000..3fb70a6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_MMU_H_
+#define _PARISC_MMU_H_
+
+/* On parisc, we store the space id here */
+typedef unsigned long mm_context_t;
+
+#endif /* _PARISC_MMU_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu_context.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu_context.h
new file mode 100644
index 0000000..697a906
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmu_context.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_MMU_CONTEXT_H
+#define __PARISC_MMU_CONTEXT_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm-generic/mm_hooks.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/* on PA-RISC, we actually have enough contexts to justify an allocator
+ * for them.  prumpf */
+
+extern unsigned long alloc_sid(void);
+extern void free_sid(unsigned long);
+
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	BUG_ON(atomic_read(&mm->mm_users) != 1);
+
+	mm->context = alloc_sid();
+	return 0;
+}
+
+static inline void
+destroy_context(struct mm_struct *mm)
+{
+	free_sid(mm->context);
+	mm->context = 0;
+}
+
+static inline unsigned long __space_to_prot(mm_context_t context)
+{
+#if SPACEID_SHIFT == 0
+	return context << 1;
+#else
+	return context >> (SPACEID_SHIFT - 1);
+#endif
+}
+
+static inline void load_context(mm_context_t context)
+{
+	mtsp(context, 3);
+	mtctl(__space_to_prot(context), 8);
+}
+
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
+{
+	if (prev != next) {
+		mtctl(__pa(next->pgd), 25);
+		load_context(next->context);
+	}
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	if (prev == next)
+		return;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	/*
+	 * Activate_mm is our one chance to allocate a space id
+	 * for a new mm created in the exec path. There's also
+	 * some lazy tlb stuff, which is currently dead code, but
+	 * we only allocate a space id if one hasn't been allocated
+	 * already, so we should be OK.
+	 */
+
+	BUG_ON(next == &init_mm); /* Should never happen */
+
+	if (next->context == 0)
+	    next->context = alloc_sid();
+
+	switch_mm(prev,next,current);
+}
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/mmzone.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmzone.h
new file mode 100644
index 0000000..fafa389
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/mmzone.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_MMZONE_H
+#define _PARISC_MMZONE_H
+
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern int npmem_ranges;
+
+struct node_map_data {
+    pg_data_t pg_data;
+};
+
+extern struct node_map_data node_data[];
+
+#define NODE_DATA(nid)          (&node_data[nid].pg_data)
+
+/* We have these possible memory map layouts:
+ * Astro: 0-3.75, 67.75-68, 4-64
+ * zx1: 0-1, 257-260, 4-256
+ * Stretch (N-class): 0-2, 4-32, 34-xxx
+ */
+
+/* Since each 1GB can only belong to one region (node), we can create
+ * an index table for pfn to nid lookup; each entry in pfnnid_map 
+ * represents 1GB, and contains the node that the memory belongs to. */
+
+#define PFNNID_SHIFT (30 - PAGE_SHIFT)
+#define PFNNID_MAP_MAX  512     /* support 512GB */
+extern signed char pfnnid_map[PFNNID_MAP_MAX];
+
+#ifndef CONFIG_64BIT
+#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
+#else
+/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
+#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
+#endif
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+	unsigned int i;
+
+	if (unlikely(pfn_is_io(pfn)))
+		return 0;
+
+	i = pfn >> PFNNID_SHIFT;
+	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
+
+	return pfnnid_map[i];
+}
+
+static inline int pfn_valid(int pfn)
+{
+	int nid = pfn_to_nid(pfn);
+
+	if (nid >= 0)
+		return (pfn < node_end_pfn(nid));
+	return 0;
+}
+
+#endif
+#endif /* _PARISC_MMZONE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/module.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/module.h
new file mode 100644
index 0000000..c8c131a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/module.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_MODULE_H
+#define _ASM_PARISC_MODULE_H
+
+#include <asm-generic/module.h>
+
+/*
+ * This file contains the parisc architecture specific module code.
+ */
+
+struct unwind_table;
+
+struct mod_arch_specific
+{
+	unsigned long got_offset, got_count, got_max;
+	unsigned long fdesc_offset, fdesc_count, fdesc_max;
+	struct {
+		unsigned long stub_offset;
+		unsigned int stub_entries;
+		} *section;
+	int unwind_section;
+	struct unwind_table *unwind;
+};
+
+#endif /* _ASM_PARISC_MODULE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/page.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/page.h
new file mode 100644
index 0000000..af00fe9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/page.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_PAGE_H
+#define _PARISC_PAGE_H
+
+#include <linux/const.h>
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define PAGE_SHIFT	12
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define PAGE_SHIFT	14
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define PAGE_SHIFT	16
+#else
+# error "unknown default kernel page size"
+#endif
+#define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/cache.h>
+
+#define clear_page(page)	clear_page_asm((void *)(page))
+#define copy_page(to, from)	copy_page_asm((void *)(to), (void *)(from))
+
+struct page;
+
+void clear_page_asm(void *page);
+void copy_page_asm(void *to, void *from);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+			struct page *pg);
+
+/*
+ * These are used to make use of C type-checking..
+ */
+#define STRICT_MM_TYPECHECKS
+#ifdef STRICT_MM_TYPECHECKS
+typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */
+
+/* NOTE: even on 64 bits, these entries are __u32 because we allocate
+ * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
+typedef struct { __u32 pmd; } pmd_t;
+typedef struct { __u32 pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x)	((x).pte)
+/* These do not work lvalues, so make sure we don't use them as such. */
+#define pmd_val(x)	((x).pmd + 0)
+#define pgd_val(x)	((x).pgd + 0)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x)	((pte_t) { (x) } )
+#define __pmd(x)	((pmd_t) { (x) } )
+#define __pgd(x)	((pgd_t) { (x) } )
+#define __pgprot(x)	((pgprot_t) { (x) } )
+
+#define __pmd_val_set(x,n) (x).pmd = (n)
+#define __pgd_val_set(x,n) (x).pgd = (n)
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef         __u32 pmd_t;
+typedef         __u32 pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x)      (x)
+#define pmd_val(x)      (x)
+#define pgd_val(x)      (x)
+#define pgprot_val(x)   (x)
+
+#define __pte(x)        (x)
+#define __pmd(x)	(x)
+#define __pgd(x)        (x)
+#define __pgprot(x)     (x)
+
+#define __pmd_val_set(x,n) (x) = (n)
+#define __pgd_val_set(x,n) (x) = (n)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+typedef struct page *pgtable_t;
+
+typedef struct __physmem_range {
+	unsigned long start_pfn;
+	unsigned long pages;       /* PAGE_SIZE pages */
+} physmem_range_t;
+
+extern physmem_range_t pmem_ranges[];
+extern int npmem_ranges;
+
+#endif /* !__ASSEMBLY__ */
+
+/* WARNING: The definitions below must match exactly to sizeof(pte_t)
+ * etc
+ */
+#ifdef CONFIG_64BIT
+#define BITS_PER_PTE_ENTRY	3
+#define BITS_PER_PMD_ENTRY	2
+#define BITS_PER_PGD_ENTRY	2
+#else
+#define BITS_PER_PTE_ENTRY	2
+#define BITS_PER_PMD_ENTRY	2
+#define BITS_PER_PGD_ENTRY	BITS_PER_PMD_ENTRY
+#endif
+#define PGD_ENTRY_SIZE	(1UL << BITS_PER_PGD_ENTRY)
+#define PMD_ENTRY_SIZE	(1UL << BITS_PER_PMD_ENTRY)
+#define PTE_ENTRY_SIZE	(1UL << BITS_PER_PTE_ENTRY)
+
+#define LINUX_GATEWAY_SPACE     0
+
+/* This governs the relationship between virtual and physical addresses.
+ * If you alter it, make sure to take care of our various fixed mapping
+ * segments in fixmap.h */
+#if defined(BOOTLOADER)
+#define __PAGE_OFFSET	(0)		/* bootloader uses physical addresses */
+#else
+#ifdef CONFIG_64BIT
+#define __PAGE_OFFSET	(0x40000000)	/* 1GB */
+#else
+#define __PAGE_OFFSET	(0x10000000)	/* 256MB */
+#endif
+#endif /* BOOTLOADER */
+
+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
+
+/* The size of the gateway page (we leave lots of room for expansion) */
+#define GATEWAY_PAGE_SIZE	0x4000
+
+/* The start of the actual kernel binary---used in vmlinux.lds.S
+ * Leave some space after __PAGE_OFFSET for detecting kernel null
+ * ptr derefs */
+#define KERNEL_BINARY_TEXT_START	(__PAGE_OFFSET + 0x100000)
+
+/* These macros don't work for 64-bit C code -- don't allow in C at all */
+#ifdef __ASSEMBLY__
+#   define PA(x)	((x)-__PAGE_OFFSET)
+#   define VA(x)	((x)+__PAGE_OFFSET)
+#endif
+#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pfn_valid(pfn)		((pfn) < max_mapnr)
+#endif /* CONFIG_DISCONTIGMEM */
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SHIFT		PMD_SHIFT /* fixed for transparent huge pages */
+#define HPAGE_SIZE      	((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT	20 /* 20 = 1MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
+#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT	22 /* 22 = 4MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
+#else
+# define REAL_HPAGE_SHIFT	24 /* 24 = 16MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
+#endif
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+#include <asm/pdc.h>
+
+#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+
+/* DEFINITION OF THE ZERO-PAGE (PAG0) */
+/* based on work by Jason Eckhardt (jason@equator.com) */
+
+#endif /* _PARISC_PAGE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/parisc-device.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/parisc-device.h
new file mode 100644
index 0000000..d02d144
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/parisc-device.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_PARISC_DEVICE_H_
+#define _ASM_PARISC_PARISC_DEVICE_H_
+
+#include <linux/device.h>
+
+struct parisc_device {
+	struct resource hpa;		/* Hard Physical Address */
+	struct parisc_device_id id;
+	struct parisc_driver *driver;	/* Driver for this device */
+	char		name[80];	/* The hardware description */
+	int		irq;
+	int		aux_irq;	/* Some devices have a second IRQ */
+
+	char		hw_path;        /* The module number on this bus */
+	unsigned int	num_addrs;	/* some devices have additional address ranges. */
+	unsigned long	*addr;          /* which will be stored here */
+ 
+#ifdef CONFIG_64BIT
+	/* parms for pdc_pat_cell_module() call */
+	unsigned long	pcell_loc;	/* Physical Cell location */
+	unsigned long	mod_index;	/* PAT specific - Misc Module info */
+
+	/* generic info returned from pdc_pat_cell_module() */
+	unsigned long	mod_info;	/* PAT specific - Misc Module info */
+	unsigned long	pmod_loc;	/* physical Module location */
+	unsigned long	mod0;
+#endif
+	u64		dma_mask;	/* DMA mask for I/O */
+	struct device 	dev;
+};
+
+struct parisc_driver {
+	struct parisc_driver *next;
+	char *name; 
+	const struct parisc_device_id *id_table;
+	int (*probe) (struct parisc_device *dev); /* New device discovered */
+	int (*remove) (struct parisc_device *dev);
+	struct device_driver drv;
+};
+
+
+#define to_parisc_device(d)	container_of(d, struct parisc_device, dev)
+#define to_parisc_driver(d)	container_of(d, struct parisc_driver, drv)
+#define parisc_parent(d)	to_parisc_device(d->dev.parent)
+
+static inline const char *parisc_pathname(struct parisc_device *d)
+{
+	return dev_name(&d->dev);
+}
+
+static inline void
+parisc_set_drvdata(struct parisc_device *d, void *p)
+{
+	dev_set_drvdata(&d->dev, p);
+}
+
+static inline void *
+parisc_get_drvdata(struct parisc_device *d)
+{
+	return dev_get_drvdata(&d->dev);
+}
+
+extern struct bus_type parisc_bus_type;
+
+int iosapic_serial_irq(struct parisc_device *dev);
+
+#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/parport.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/parport.h
new file mode 100644
index 0000000..2c8e232
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/parport.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* 
+ *
+ * parport.h: ia32-compatible parport initialisation
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+#ifndef _ASM_PARPORT_H
+#define _ASM_PARPORT_H 1
+
+
+static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+	/* nothing ! */
+	return 0;
+}
+
+
+#endif /* !(_ASM_PARPORT_H) */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pci.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pci.h
new file mode 100644
index 0000000..6108e9d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pci.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PARISC_PCI_H
+#define __ASM_PARISC_PCI_H
+
+#include <linux/scatterlist.h>
+
+
+
+/*
+** HP PCI platforms generally support multiple bus adapters.
+**    (workstations 1-~4, servers 2-~32)
+**
+** Newer platforms number the busses across PCI bus adapters *sparsely*.
+** E.g. 0, 8, 16, ...
+**
+** Under a PCI bus, most HP platforms support PPBs up to two or three
+** levels deep. See "Bit3" product line. 
+*/
+#define PCI_MAX_BUSSES	256
+
+
+/* To be used as: mdelay(pci_post_reset_delay);
+ *
+ * post_reset is the time the kernel should stall to prevent anyone from
+ * accessing the PCI bus once #RESET is de-asserted. 
+ * PCI spec somewhere says 1 second but with multi-PCI bus systems,
+ * this makes the boot time much longer than necessary.
+ * 20ms seems to work for all the HP PCI implementations to date.
+ */
+#define pci_post_reset_delay 50
+
+
+/*
+** pci_hba_data (aka H2P_OBJECT in HP/UX)
+**
+** This is the "common" or "base" data structure which HBA drivers
+** (eg Dino or LBA) are required to place at the top of their own
+** platform_data structure.  I've heard this called "C inheritance" too.
+**
+** Data needed by pcibios layer belongs here.
+*/
+struct pci_hba_data {
+	void __iomem   *base_addr;	/* aka Host Physical Address */
+	const struct parisc_device *dev; /* device from PA bus walk */
+	struct pci_bus *hba_bus;	/* primary PCI bus below HBA */
+	int		hba_num;	/* I/O port space access "key" */
+	struct resource bus_num;	/* PCI bus numbers */
+	struct resource io_space;	/* PIOP */
+	struct resource lmmio_space;	/* bus addresses < 4Gb */
+	struct resource elmmio_space;	/* additional bus addresses < 4Gb */
+	struct resource gmmio_space;	/* bus addresses > 4Gb */
+
+	/* NOTE: Dino code assumes it can use *all* of the lmmio_space,
+	 * elmmio_space and gmmio_space as a contiguous array of
+	 * resources.  This #define represents the array size */
+	#define DINO_MAX_LMMIO_RESOURCES	3
+
+	unsigned long   lmmio_space_offset;  /* CPU view - PCI view */
+	void *          iommu;          /* IOMMU this device is under */
+	/* REVISIT - spinlock to protect resources? */
+
+	#define HBA_NAME_SIZE 16
+	char io_name[HBA_NAME_SIZE];
+	char lmmio_name[HBA_NAME_SIZE];
+	char elmmio_name[HBA_NAME_SIZE];
+	char gmmio_name[HBA_NAME_SIZE];
+};
+
+#define HBA_DATA(d)		((struct pci_hba_data *) (d))
+
+/* 
+** We support 2^16 I/O ports per HBA.  These are set up in the form
+** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
+** space address.
+*/
+#define HBA_PORT_SPACE_BITS	16
+
+#define HBA_PORT_BASE(h)	((h) << HBA_PORT_SPACE_BITS)
+#define HBA_PORT_SPACE_SIZE	(1UL << HBA_PORT_SPACE_BITS)
+
+#define PCI_PORT_HBA(a)		((a) >> HBA_PORT_SPACE_BITS)
+#define PCI_PORT_ADDR(a)	((a) & (HBA_PORT_SPACE_SIZE - 1))
+
+#ifdef CONFIG_64BIT
+#define PCI_F_EXTEND		0xffffffff00000000UL
+#else	/* !CONFIG_64BIT */
+#define PCI_F_EXTEND		0UL
+#endif /* !CONFIG_64BIT */
+
+/*
+** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
+** (This eliminates some of the warnings).
+*/
+struct pci_bus;
+struct pci_dev;
+
+/*
+ * If the PCI device's view of memory is the same as the CPU's view of memory,
+ * PCI_DMA_BUS_IS_PHYS is true.  The networking and block device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#ifdef CONFIG_PA20
+/* All PA-2.0 machines have an IOMMU. */
+#define PCI_DMA_BUS_IS_PHYS	0
+#define parisc_has_iommu()	do { } while (0)
+#else
+
+#if defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)
+extern int parisc_bus_is_phys; 	/* in arch/parisc/kernel/setup.c */
+#define PCI_DMA_BUS_IS_PHYS	parisc_bus_is_phys
+#define parisc_has_iommu()	do { parisc_bus_is_phys = 0; } while (0)
+#else
+#define PCI_DMA_BUS_IS_PHYS	1
+#define parisc_has_iommu()	do { } while (0)
+#endif
+
+#endif	/* !CONFIG_PA20 */
+
+
+/*
+** Most PCI devices (eg Tulip, NCR720) also export the same registers
+** to both MMIO and I/O port space.  Due to poor performance of I/O Port
+** access under HP PCI bus adapters, strongly recommend the use of MMIO
+** address space.
+**
+** While I'm at it more PA programming notes:
+**
+** 1) MMIO stores (writes) are posted operations. This means the processor
+**    gets an "ACK" before the write actually gets to the device. A read
+**    to the same device (or typically the bus adapter above it) will
+**    force in-flight write transaction(s) out to the targeted device
+**    before the read can complete.
+**
+** 2) The Programmed I/O (PIO) data may not always be strongly ordered with
+**    respect to DMA on all platforms. Ie PIO data can reach the processor
+**    before in-flight DMA reaches memory. Since most SMP PA platforms
+**    are I/O coherent, it generally doesn't matter...but sometimes
+**    it does.
+**
+** I've helped device driver writers debug both types of problems.
+*/
+struct pci_port_ops {
+	  u8 (*inb)  (struct pci_hba_data *hba, u16 port);
+	 u16 (*inw)  (struct pci_hba_data *hba, u16 port);
+	 u32 (*inl)  (struct pci_hba_data *hba, u16 port);
+	void (*outb) (struct pci_hba_data *hba, u16 port,  u8 data);
+	void (*outw) (struct pci_hba_data *hba, u16 port, u16 data);
+	void (*outl) (struct pci_hba_data *hba, u16 port, u32 data);
+};
+
+
+struct pci_bios_ops {
+	void (*init)(void);
+	void (*fixup_bus)(struct pci_bus *bus);
+};
+
+/*
+** Stuff declared in arch/parisc/kernel/pci.c
+*/
+extern struct pci_port_ops *pci_port;
+extern struct pci_bios_ops *pci_bios;
+
+#ifdef CONFIG_PCI
+extern void pcibios_register_hba(struct pci_hba_data *);
+extern void pcibios_set_master(struct pci_dev *);
+#else
+static inline void pcibios_register_hba(struct pci_hba_data *x)
+{
+}
+#endif
+extern void pcibios_init_bridge(struct pci_dev *);
+
+/*
+ * pcibios_assign_all_busses() is used in drivers/pci/pci.c:pci_do_scan_bus()
+ *   0 == check if bridge is numbered before re-numbering.
+ *   1 == pci_do_scan_bus() should automatically number all PCI-PCI bridges.
+ *
+ *   We *should* set this to zero for "legacy" platforms and one
+ *   for PAT platforms.
+ *
+ *   But legacy platforms also need to renumber the busses below a Host
+ *   Bus controller.  Adding a 4-port Tulip card on the first PCI root
+ *   bus of a C200 resulted in the secondary bus being numbered as 1.
+ *   The second PCI host bus controller's root bus had already been
+ *   assigned bus number 1 by firmware and sysfs complained.
+ *
+ *   Firmware isn't doing anything wrong here since each controller
+ *   is its own PCI domain.  It's simpler and easier for us to renumber
+ *   the busses rather than treat each Dino as a separate PCI domain.
+ *   Eventually, we may want to introduce PCI domains for Superdome or
+ *   rp7420/8420 boxes and then revisit this issue.
+ */
+#define pcibios_assign_all_busses()     (1)
+
+#define PCIBIOS_MIN_IO          0x10
+#define PCIBIOS_MIN_MEM         0x1000 /* NBPG - but pci/setup-res.c dies */
+
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+	return channel ? 15 : 14;
+}
+
+#define HAVE_PCI_MMAP
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#endif /* __ASM_PARISC_PCI_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc.h
new file mode 100644
index 0000000..efee44a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_PDC_H
+#define _PARISC_PDC_H
+
+#include <uapi/asm/pdc.h>
+
+#if !defined(__ASSEMBLY__)
+
+extern int parisc_narrow_firmware;
+
+extern int pdc_type;
+extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */
+extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT)	   */
+
+/* Values for pdc_type */
+#define PDC_TYPE_ILLEGAL	-1
+#define PDC_TYPE_PAT		 0 /* 64-bit PAT-PDC */
+#define PDC_TYPE_SYSTEM_MAP	 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
+#define PDC_TYPE_SNAKE		 2 /* Doesn't support SYSTEM_MAP */
+
+struct pdc_chassis_info {       /* for PDC_CHASSIS_INFO */
+	unsigned long actcnt;   /* actual number of bytes returned */
+	unsigned long maxcnt;   /* maximum number of bytes that could be returned */
+};
+
+struct pdc_coproc_cfg {         /* for PDC_COPROC_CFG */
+        unsigned long ccr_functional;
+        unsigned long ccr_present;
+        unsigned long revision;
+        unsigned long model;
+};
+
+struct pdc_model {		/* for PDC_MODEL */
+	unsigned long hversion;
+	unsigned long sversion;
+	unsigned long hw_id;
+	unsigned long boot_id;
+	unsigned long sw_id;
+	unsigned long sw_cap;
+	unsigned long arch_rev;
+	unsigned long pot_key;
+	unsigned long curr_key;
+};
+
+struct pdc_cache_cf {		/* for PDC_CACHE  (I/D-caches) */
+    unsigned long
+#ifdef CONFIG_64BIT
+		cc_padW:32,
+#endif
+		cc_alias: 4,	/* alias boundaries for virtual addresses   */
+		cc_block: 4,	/* to determine most efficient stride */
+		cc_line	: 3,	/* maximum amount written back as a result of store (multiple of 16 bytes) */
+		cc_shift: 2,	/* how much to shift cc_block left */
+		cc_wt	: 1,	/* 0 = WT-Dcache, 1 = WB-Dcache */
+		cc_sh	: 2,	/* 0 = separate I/D-cache, else shared I/D-cache */
+		cc_cst  : 3,	/* 0 = incoherent D-cache, 1=coherent D-cache */
+		cc_pad1 : 10,	/* reserved */
+		cc_hv   : 3;	/* hversion dependent */
+};
+
+struct pdc_tlb_cf {		/* for PDC_CACHE (I/D-TLB's) */
+    unsigned long tc_pad0:12,	/* reserved */
+#ifdef CONFIG_64BIT
+		tc_padW:32,
+#endif
+		tc_sh	: 2,	/* 0 = separate I/D-TLB, else shared I/D-TLB */
+		tc_hv   : 1,	/* HV */
+		tc_page : 1,	/* 0 = 2K page-size-machine, 1 = 4k page size */
+		tc_cst  : 3,	/* 0 = incoherent operations, else coherent operations */
+		tc_aid  : 5,	/* ITLB: width of access ids of processor (encoded!) */
+		tc_sr   : 8;	/* ITLB: width of space-registers (encoded) */
+};
+
+struct pdc_cache_info {		/* main-PDC_CACHE-structure (caches & TLB's) */
+	/* I-cache */
+	unsigned long	ic_size;	/* size in bytes */
+	struct pdc_cache_cf ic_conf;	/* configuration */
+	unsigned long	ic_base;	/* base-addr */
+	unsigned long	ic_stride;
+	unsigned long	ic_count;
+	unsigned long	ic_loop;
+	/* D-cache */
+	unsigned long	dc_size;	/* size in bytes */
+	struct pdc_cache_cf dc_conf;	/* configuration */
+	unsigned long	dc_base;	/* base-addr */
+	unsigned long	dc_stride;
+	unsigned long	dc_count;
+	unsigned long	dc_loop;
+	/* Instruction-TLB */
+	unsigned long	it_size;	/* number of entries in I-TLB */
+	struct pdc_tlb_cf it_conf;	/* I-TLB-configuration */
+	unsigned long	it_sp_base;
+	unsigned long	it_sp_stride;
+	unsigned long	it_sp_count;
+	unsigned long	it_off_base;
+	unsigned long	it_off_stride;
+	unsigned long	it_off_count;
+	unsigned long	it_loop;
+	/* data-TLB */
+	unsigned long	dt_size;	/* number of entries in D-TLB */
+	struct pdc_tlb_cf dt_conf;	/* D-TLB-configuration */
+	unsigned long	dt_sp_base;
+	unsigned long	dt_sp_stride;
+	unsigned long	dt_sp_count;
+	unsigned long	dt_off_base;
+	unsigned long	dt_off_stride;
+	unsigned long	dt_off_count;
+	unsigned long	dt_loop;
+};
+
+#if 0
+/* If you start using the next struct, you'll have to adjust it to
+ * work with 64-bit firmware I think -PB
+ */
+struct pdc_iodc {     /* PDC_IODC */
+	unsigned char   hversion_model;
+	unsigned char 	hversion;
+	unsigned char 	spa;
+	unsigned char 	type;
+	unsigned int	sversion_rev:4;
+	unsigned int	sversion_model:19;
+	unsigned int	sversion_opt:8;
+	unsigned char	rev;
+	unsigned char	dep;
+	unsigned char	features;
+	unsigned char	pad1;
+	unsigned int	checksum:16;
+	unsigned int	length:16;
+	unsigned int    pad[15];
+} __attribute__((aligned(8))) ;
+#endif
+
+#ifndef CONFIG_PA20
+/* no BLTBs in pa2.0 processors */
+struct pdc_btlb_info_range {
+	__u8 res00;
+	__u8 num_i;
+	__u8 num_d;
+	__u8 num_comb;
+};
+
+struct pdc_btlb_info {	/* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
+	unsigned int min_size;	/* minimum size of BTLB in pages */
+	unsigned int max_size;	/* maximum size of BTLB in pages */
+	struct pdc_btlb_info_range fixed_range_info;
+	struct pdc_btlb_info_range variable_range_info;
+};
+
+#endif /* !CONFIG_PA20 */
+
+struct pdc_mem_retinfo { /* PDC_MEM/PDC_MEM_MEMINFO (return info) */
+	unsigned long pdt_size;
+	unsigned long pdt_entries;
+	unsigned long pdt_status;
+	unsigned long first_dbe_loc;
+	unsigned long good_mem;
+};
+
+struct pdc_mem_read_pdt { /* PDC_MEM/PDC_MEM_READ_PDT (return info) */
+	unsigned long pdt_entries;
+};
+
+#ifdef CONFIG_64BIT
+struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
+	unsigned long entries_returned;
+	unsigned long entries_total;
+};
+
+struct pdc_memory_table {       /* PDC_MEM/PDC_MEM_TABLE (arguments) */
+	unsigned long paddr;
+	unsigned int  pages;
+	unsigned int  reserved;
+};
+#endif /* CONFIG_64BIT */
+
+struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
+	unsigned long mod_addr;
+	unsigned long mod_pgs;
+	unsigned long add_addrs;
+};
+
+struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
+	unsigned long mod_addr;
+	unsigned long mod_pgs;
+};
+
+struct pdc_initiator { /* PDC_INITIATOR */
+	int host_id;
+	int factor;
+	int width;
+	int mode;
+};
+
+struct hardware_path {
+	char  flags;	/* see bit definitions below */
+	char  bc[6];	/* Bus Converter routing info to a specific */
+			/* I/O adaptor (< 0 means none, > 63 resvd) */
+	char  mod;	/* fixed field of specified module */
+};
+
+/*
+ * Device path specifications used by PDC.
+ */
+struct pdc_module_path {
+	struct hardware_path path;
+	unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
+};
+
+#ifndef CONFIG_PA20
+/* Only used on some pre-PA2.0 boxes */
+struct pdc_memory_map {		/* PDC_MEMORY_MAP */
+	unsigned long hpa;	/* mod's register set address */
+	unsigned long more_pgs;	/* number of additional I/O pgs */
+};
+#endif
+
+struct pdc_tod {
+	unsigned long tod_sec; 
+	unsigned long tod_usec;
+};
+
+/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
+
+struct pdc_hpmc_pim_11 { /* PDC_PIM */
+	__u32 gr[32];
+	__u32 cr[32];
+	__u32 sr[8];
+	__u32 iasq_back;
+	__u32 iaoq_back;
+	__u32 check_type;
+	__u32 cpu_state;
+	__u32 rsvd1;
+	__u32 cache_check;
+	__u32 tlb_check;
+	__u32 bus_check;
+	__u32 assists_check;
+	__u32 rsvd2;
+	__u32 assist_state;
+	__u32 responder_addr;
+	__u32 requestor_addr;
+	__u32 path_info;
+	__u64 fr[32];
+};
+
+/*
+ * architected results from PDC_PIM/transfer hpmc on a PA2.0 machine
+ *
+ * Note that PDC_PIM doesn't care whether or not wide mode was enabled
+ * so the results are different on  PA1.1 vs. PA2.0 when in narrow mode.
+ *
+ * Note also that there are unarchitected results available, which
+ * are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since
+ * the firmware is probably the best way of printing hversion dependent
+ * data.
+ */
+
+struct pdc_hpmc_pim_20 { /* PDC_PIM */
+	__u64 gr[32];
+	__u64 cr[32];
+	__u64 sr[8];
+	__u64 iasq_back;
+	__u64 iaoq_back;
+	__u32 check_type;
+	__u32 cpu_state;
+	__u32 cache_check;
+	__u32 tlb_check;
+	__u32 bus_check;
+	__u32 assists_check;
+	__u32 assist_state;
+	__u32 path_info;
+	__u64 responder_addr;
+	__u64 requestor_addr;
+	__u64 fr[32];
+};
+
+void pdc_console_init(void);	/* in pdc_console.c */
+void pdc_console_restart(void);
+
+void setup_pdc(void);		/* in inventory.c */
+
+/* wrapper-functions from pdc.c */
+
+int pdc_add_valid(unsigned long address);
+int pdc_instr(unsigned int *instr);
+int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
+int pdc_chassis_disp(unsigned long disp);
+int pdc_chassis_warn(unsigned long *warn);
+int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info);
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info);
+int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
+		  void *iodc_data, unsigned int iodc_data_size);
+int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
+			     struct pdc_module_path *mod_path, long mod_index);
+int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
+			      long mod_index, long addr_index);
+int pdc_model_info(struct pdc_model *model);
+int pdc_model_sysmodel(char *name);
+int pdc_model_cpuid(unsigned long *cpu_id);
+int pdc_model_versions(unsigned long *versions, int id);
+int pdc_model_capabilities(unsigned long *capabilities);
+int pdc_cache_info(struct pdc_cache_info *cache);
+int pdc_spaceid_bits(unsigned long *space_bits);
+#ifndef CONFIG_PA20
+int pdc_btlb_info(struct pdc_btlb_info *btlb);
+int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
+#endif /* !CONFIG_PA20 */
+int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
+
+int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count);
+int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count);
+int pdc_stable_get_size(unsigned long *size);
+int pdc_stable_verify_contents(void);
+int pdc_stable_initialize(void);
+
+int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa);
+int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl);
+
+int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *);
+int pdc_tod_read(struct pdc_tod *tod);
+int pdc_tod_set(unsigned long sec, unsigned long usec);
+
+void pdc_pdt_init(void);	/* in pdt.c */
+int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo);
+int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *rpdt_read,
+		unsigned long *pdt_entries_ptr);
+#ifdef CONFIG_64BIT
+int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
+		struct pdc_memory_table *tbl, unsigned long entries);
+#endif
+
+void set_firmware_width(void);
+void set_firmware_width_unlocked(void);
+int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
+int pdc_do_reset(void);
+int pdc_soft_power_info(unsigned long *power_reg);
+int pdc_soft_power_button(int sw_control);
+void pdc_io_reset(void);
+void pdc_io_reset_devices(void);
+int pdc_iodc_getc(void);
+int pdc_iodc_print(const unsigned char *str, unsigned count);
+
+void pdc_emergency_unlock(void);
+int pdc_sti_call(unsigned long func, unsigned long flags,
+                 unsigned long inptr, unsigned long outputr,
+                 unsigned long glob_cfg);
+
+static inline char * os_id_to_string(u16 os_id) {
+	switch(os_id) {
+	case OS_ID_NONE:	return "No OS";
+	case OS_ID_HPUX:	return "HP-UX";
+	case OS_ID_MPEXL:	return "MPE-iX";
+	case OS_ID_OSF:		return "OSF";
+	case OS_ID_HPRT:	return "HP-RT";
+	case OS_ID_NOVEL:	return "Novell Netware";
+	case OS_ID_LINUX:	return "Linux";
+	default:	return "Unknown";
+	}
+}
+
+#endif /* !defined(__ASSEMBLY__) */
+#endif /* _PARISC_PDC_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc_chassis.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc_chassis.h
new file mode 100644
index 0000000..a609273
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdc_chassis.h
@@ -0,0 +1,381 @@
+/*
+ *	include/asm-parisc/pdc_chassis.h
+ *
+ *	Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
+ *	Copyright (C) 2002 Thibaut Varene <varenet@parisc-linux.org>
+ *
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License, version 2, as
+ *      published by the Free Software Foundation.
+ *      
+ *      This program is distributed in the hope that it will be useful,
+ *      but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *      GNU General Public License for more details.
+ *      
+ *      You should have received a copy of the GNU General Public License
+ *      along with this program; if not, write to the Free Software
+ *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *      TODO:	- handle processor number on SMP systems (Reporting Entity ID)
+ *      	- handle message ID
+ *      	- handle timestamps
+ */
+ 
+
+#ifndef _PARISC_PDC_CHASSIS_H
+#define _PARISC_PDC_CHASSIS_H
+
+/*
+ * ----------
+ * Prototypes
+ * ----------
+ */
+
+int pdc_chassis_send_status(int message);
+void parisc_pdc_chassis_init(void);
+
+
+/*
+ * -----------------
+ * Direct call names
+ * -----------------
+ * They setup everything for you, the Log message and the corresponding LED state
+ */
+
+#define PDC_CHASSIS_DIRECT_BSTART	0
+#define PDC_CHASSIS_DIRECT_BCOMPLETE	1
+#define PDC_CHASSIS_DIRECT_SHUTDOWN	2
+#define PDC_CHASSIS_DIRECT_PANIC	3
+#define PDC_CHASSIS_DIRECT_HPMC		4
+#define PDC_CHASSIS_DIRECT_LPMC		5
+#define PDC_CHASSIS_DIRECT_DUMP		6	/* not yet implemented */
+#define PDC_CHASSIS_DIRECT_OOPS		7	/* not yet implemented */
+
+
+/*
+ * ------------
+ * LEDs control
+ * ------------
+ * Set the three LEDs -- Run, Attn, and Fault.
+ */
+
+/* Old PDC LED control */
+#define PDC_CHASSIS_DISP_DATA(v)	((unsigned long)(v) << 17)
+
+/* 
+ * Available PDC PAT LED states
+ */
+
+#define PDC_CHASSIS_LED_RUN_OFF		(0ULL << 4)
+#define PDC_CHASSIS_LED_RUN_FLASH	(1ULL << 4)
+#define PDC_CHASSIS_LED_RUN_ON		(2ULL << 4)
+#define PDC_CHASSIS_LED_RUN_NC		(3ULL << 4)
+#define PDC_CHASSIS_LED_ATTN_OFF	(0ULL << 6)
+#define PDC_CHASSIS_LED_ATTN_FLASH	(1ULL << 6)
+#define PDC_CHASSIS_LED_ATTN_NC		(3ULL << 6)	/* ATTN ON is invalid */
+#define PDC_CHASSIS_LED_FAULT_OFF	(0ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_FLASH	(1ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_ON	(2ULL << 8)
+#define PDC_CHASSIS_LED_FAULT_NC	(3ULL << 8)
+#define PDC_CHASSIS_LED_VALID		(1ULL << 10)
+
+/* 
+ * Valid PDC PAT LED states combinations
+ */
+
+/* System running normally */
+#define PDC_CHASSIS_LSTATE_RUN_NORMAL	(PDC_CHASSIS_LED_RUN_ON		| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* System crashed and rebooted itself successfully */
+#define PDC_CHASSIS_LSTATE_RUN_CRASHREC	(PDC_CHASSIS_LED_RUN_ON		| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_FLASH	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* There was a system interruption that did not take the system down */
+#define PDC_CHASSIS_LSTATE_RUN_SYSINT	(PDC_CHASSIS_LED_RUN_ON		| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* System running and unexpected reboot or non-critical error detected */
+#define PDC_CHASSIS_LSTATE_RUN_NCRIT	(PDC_CHASSIS_LED_RUN_ON		| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_FLASH	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Boot failed - Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS_BFAIL	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_ON	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Unexpected reboot occurred - Executing non-OS code */
+#define PDC_CHASSIS_LSTATE_NONOS_UNEXP	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_FLASH	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_NONOS_NCRIT	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Boot failed - Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_BFAIL_NCRIT	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_ON	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Unexpected reboot/recovering - Executing non-OS code - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_UNEXP_NCRIT	(PDC_CHASSIS_LED_RUN_FLASH	| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_FLASH	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Cannot execute PDC */
+#define PDC_CHASSIS_LSTATE_CANNOT_PDC	(PDC_CHASSIS_LED_RUN_OFF	| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Boot failed - OS not up - PDC has detected a failure that prevents boot */
+#define PDC_CHASSIS_LSTATE_FATAL_BFAIL	(PDC_CHASSIS_LED_RUN_OFF	| \
+					 PDC_CHASSIS_LED_ATTN_OFF	| \
+					 PDC_CHASSIS_LED_FAULT_ON	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* No code running - Non-critical error detected (double fault situation) */
+#define PDC_CHASSIS_LSTATE_NOCODE_NCRIT	(PDC_CHASSIS_LED_RUN_OFF	| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_OFF	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* Boot failed - OS not up - Fatal failure detected - Non-critical error detected */
+#define PDC_CHASSIS_LSTATE_FATAL_NCRIT	(PDC_CHASSIS_LED_RUN_OFF	| \
+					 PDC_CHASSIS_LED_ATTN_FLASH	| \
+					 PDC_CHASSIS_LED_FAULT_ON	| \
+					 PDC_CHASSIS_LED_VALID		)
+/* All other states are invalid */
+
+
+/*
+ * --------------
+ * PDC Log events
+ * --------------
+ * Here follows bits needed to fill up the log event sent to PDC_CHASSIS
+ * The log message contains: Alert level, Source, Source detail,
+ * Source ID, Problem detail, Caller activity, Activity status, 
+ * Caller subactivity, Reporting entity type, Reporting entity ID,
+ * Data type, Unique message ID and EOM. 
+ */
+
+/* Alert level */
+#define PDC_CHASSIS_ALERT_FORWARD	(0ULL << 36)	/* no failure detected */
+#define PDC_CHASSIS_ALERT_SERPROC	(1ULL << 36)	/* service proc - no failure */
+#define PDC_CHASSIS_ALERT_NURGENT	(2ULL << 36)	/* non-urgent operator attn */
+#define PDC_CHASSIS_ALERT_BLOCKED	(3ULL << 36)	/* system blocked */
+#define PDC_CHASSIS_ALERT_CONF_CHG	(4ULL << 36)	/* unexpected configuration change */
+#define PDC_CHASSIS_ALERT_ENV_PB	(5ULL << 36)	/* boot possible, environmental pb */
+#define PDC_CHASSIS_ALERT_PENDING	(6ULL << 36)	/* boot possible, pending failure */
+#define PDC_CHASSIS_ALERT_PERF_IMP	(8ULL << 36)	/* boot possible, performance impaired */
+#define PDC_CHASSIS_ALERT_FUNC_IMP	(10ULL << 36)	/* boot possible, functionality impaired */
+#define PDC_CHASSIS_ALERT_SOFT_FAIL	(12ULL << 36)	/* software failure */
+#define PDC_CHASSIS_ALERT_HANG		(13ULL << 36)	/* system hang */
+#define PDC_CHASSIS_ALERT_ENV_FATAL	(14ULL << 36)	/* fatal power or environmental pb */
+#define PDC_CHASSIS_ALERT_HW_FATAL	(15ULL << 36)	/* fatal hardware problem */
+
+/* Source */
+#define PDC_CHASSIS_SRC_NONE		(0ULL << 28)	/* unknown, no source stated */
+#define PDC_CHASSIS_SRC_PROC		(1ULL << 28)	/* processor */
+/* For later use ? */
+#define PDC_CHASSIS_SRC_PROC_CACHE	(2ULL << 28)	/* processor cache*/
+#define PDC_CHASSIS_SRC_PDH		(3ULL << 28)	/* processor dependent hardware */
+#define PDC_CHASSIS_SRC_PWR		(4ULL << 28)	/* power */
+#define PDC_CHASSIS_SRC_FAB		(5ULL << 28)	/* fabric connector */
+#define PDC_CHASSIS_SRC_PLATi		(6ULL << 28)	/* platform */
+#define PDC_CHASSIS_SRC_MEM		(7ULL << 28)	/* memory */
+#define PDC_CHASSIS_SRC_IO		(8ULL << 28)	/* I/O */
+#define PDC_CHASSIS_SRC_CELL		(9ULL << 28)	/* cell */
+#define PDC_CHASSIS_SRC_PD		(10ULL << 28)	/* protected domain */
+
+/* Source detail field */
+#define PDC_CHASSIS_SRC_D_PROC		(1ULL << 24)	/* processor general */
+
+/* Source ID - platform dependent */
+#define PDC_CHASSIS_SRC_ID_UNSPEC	(0ULL << 16)
+
+/* Problem detail - problem source dependent */
+#define PDC_CHASSIS_PB_D_PROC_NONE	(0ULL << 32)	/* no problem detail */
+#define PDC_CHASSIS_PB_D_PROC_TIMEOUT	(4ULL << 32)	/* timeout */
+
+/* Caller activity */
+#define PDC_CHASSIS_CALL_ACT_HPUX_BL	(7ULL << 12)	/* Boot Loader */
+#define PDC_CHASSIS_CALL_ACT_HPUX_PD	(8ULL << 12)	/* SAL_PD activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_EVENT	(9ULL << 12)	/* SAL_EVENTS activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_IO	(10ULL << 12)	/* SAL_IO activities */
+#define PDC_CHASSIS_CALL_ACT_HPUX_PANIC	(11ULL << 12)	/* System panic */
+#define PDC_CHASSIS_CALL_ACT_HPUX_INIT	(12ULL << 12)	/* System initialization */
+#define PDC_CHASSIS_CALL_ACT_HPUX_SHUT	(13ULL << 12)	/* System shutdown */
+#define PDC_CHASSIS_CALL_ACT_HPUX_WARN	(14ULL << 12)	/* System warning */
+#define PDC_CHASSIS_CALL_ACT_HPUX_DU	(15ULL << 12)	/* Display_Activity() update */
+
+/* Activity status - implementation dependent */
+#define PDC_CHASSIS_ACT_STATUS_UNSPEC	(0ULL << 0)
+
+/* Caller subactivity - implementation dependent */
+/* FIXME: other subactivities ? */
+#define PDC_CHASSIS_CALL_SACT_UNSPEC	(0ULL << 4)	/* implementation dependent */
+
+/* Reporting entity type */
+#define PDC_CHASSIS_RET_GENERICOS	(12ULL << 52)	/* generic OSes */
+#define PDC_CHASSIS_RET_IA64_NT		(13ULL << 52)	/* IA-64 NT */
+#define PDC_CHASSIS_RET_HPUX		(14ULL << 52)	/* HP-UX */
+#define PDC_CHASSIS_RET_DIAG		(15ULL << 52)	/* offline diagnostics & utilities */
+
+/* Reporting entity ID */
+#define PDC_CHASSIS_REID_UNSPEC		(0ULL << 44)
+
+/* Data type */
+#define PDC_CHASSIS_DT_NONE		(0ULL << 59)	/* data field unused */
+/* For later use ? Do we need these ? */
+#define PDC_CHASSIS_DT_PHYS_ADDR	(1ULL << 59)	/* physical address */
+#define PDC_CHASSIS_DT_DATA_EXPECT	(2ULL << 59)	/* expected data */
+#define PDC_CHASSIS_DT_ACTUAL		(3ULL << 59)	/* actual data */
+#define PDC_CHASSIS_DT_PHYS_LOC		(4ULL << 59)	/* physical location */
+#define PDC_CHASSIS_DT_PHYS_LOC_EXT	(5ULL << 59)	/* physical location extension */
+#define PDC_CHASSIS_DT_TAG		(6ULL << 59)	/* tag */
+#define PDC_CHASSIS_DT_SYNDROME		(7ULL << 59)	/* syndrome */
+#define PDC_CHASSIS_DT_CODE_ADDR	(8ULL << 59)	/* code address */
+#define PDC_CHASSIS_DT_ASCII_MSG	(9ULL << 59)	/* ascii message */
+#define PDC_CHASSIS_DT_POST		(10ULL << 59)	/* POST code */
+#define PDC_CHASSIS_DT_TIMESTAMP	(11ULL << 59)	/* timestamp */
+#define PDC_CHASSIS_DT_DEV_STAT		(12ULL << 59)	/* device status */
+#define PDC_CHASSIS_DT_DEV_TYPE		(13ULL << 59)	/* device type */
+#define PDC_CHASSIS_DT_PB_DET		(14ULL << 59)	/* problem detail */
+#define PDC_CHASSIS_DT_ACT_LEV		(15ULL << 59)	/* activity level/timeout */
+#define PDC_CHASSIS_DT_SER_NUM		(16ULL << 59)	/* serial number */
+#define PDC_CHASSIS_DT_REV_NUM		(17ULL << 59)	/* revision number */
+#define PDC_CHASSIS_DT_INTERRUPT	(18ULL << 59)	/* interruption information */
+#define PDC_CHASSIS_DT_TEST_NUM		(19ULL << 59)	/* test number */
+#define PDC_CHASSIS_DT_STATE_CHG	(20ULL << 59)	/* major changes in system state */
+#define PDC_CHASSIS_DT_PROC_DEALLOC	(21ULL << 59)	/* processor deallocate */
+#define PDC_CHASSIS_DT_RESET		(30ULL << 59)	/* reset type and cause */
+#define PDC_CHASSIS_DT_PA_LEGACY	(31ULL << 59)	/* legacy PA hex chassis code */
+
+/* System states - part of major changes in system state data field */
+#define PDC_CHASSIS_SYSTATE_BSTART	(0ULL << 0)	/* boot start */
+#define PDC_CHASSIS_SYSTATE_BCOMP	(1ULL << 0)	/* boot complete */
+#define PDC_CHASSIS_SYSTATE_CHANGE	(2ULL << 0)	/* major change */
+#define PDC_CHASSIS_SYSTATE_LED		(3ULL << 0)	/* LED change */
+#define PDC_CHASSIS_SYSTATE_PANIC	(9ULL << 0)	/* OS Panic */
+#define PDC_CHASSIS_SYSTATE_DUMP	(10ULL << 0)	/* memory dump */
+#define PDC_CHASSIS_SYSTATE_HPMC	(11ULL << 0)	/* processing HPMC */
+#define PDC_CHASSIS_SYSTATE_HALT	(15ULL << 0)	/* system halted */
+
+/* Message ID */
+#define PDC_CHASSIS_MSG_ID		(0ULL << 40)	/* we do not handle msg IDs atm */
+
+/* EOM - separates log entries */
+#define PDC_CHASSIS_EOM_CLEAR		(0ULL << 43)
+#define PDC_CHASSIS_EOM_SET		(1ULL << 43)
+
+/*
+ * Preformated well known messages
+ */
+
+/* Boot started */
+#define PDC_CHASSIS_PMSG_BSTART		(PDC_CHASSIS_ALERT_SERPROC	| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_INIT	| \
+					 PDC_CHASSIS_ACT_STATUS_UNSPEC	| \
+					 PDC_CHASSIS_CALL_SACT_UNSPEC	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_REID_UNSPEC	| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_BSTART	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+/* Boot complete */
+#define PDC_CHASSIS_PMSG_BCOMPLETE	(PDC_CHASSIS_ALERT_SERPROC	| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_INIT	| \
+					 PDC_CHASSIS_ACT_STATUS_UNSPEC	| \
+					 PDC_CHASSIS_CALL_SACT_UNSPEC	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_REID_UNSPEC	| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_BCOMP	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+/* Shutdown */
+#define PDC_CHASSIS_PMSG_SHUTDOWN	(PDC_CHASSIS_ALERT_SERPROC	| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_SHUT	| \
+					 PDC_CHASSIS_ACT_STATUS_UNSPEC	| \
+					 PDC_CHASSIS_CALL_SACT_UNSPEC	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_REID_UNSPEC	| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_HALT	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+/* Panic */
+#define PDC_CHASSIS_PMSG_PANIC		(PDC_CHASSIS_ALERT_SOFT_FAIL	| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_PANIC| \
+					 PDC_CHASSIS_ACT_STATUS_UNSPEC	| \
+					 PDC_CHASSIS_CALL_SACT_UNSPEC	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_REID_UNSPEC	| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_PANIC	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+// FIXME: extrapolated data
+/* HPMC */
+#define PDC_CHASSIS_PMSG_HPMC		(PDC_CHASSIS_ALERT_CONF_CHG /*?*/	| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_WARN	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_HPMC	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+/* LPMC */
+#define PDC_CHASSIS_PMSG_LPMC		(PDC_CHASSIS_ALERT_BLOCKED /*?*/| \
+					 PDC_CHASSIS_SRC_PROC		| \
+					 PDC_CHASSIS_SRC_D_PROC		| \
+					 PDC_CHASSIS_SRC_ID_UNSPEC	| \
+					 PDC_CHASSIS_PB_D_PROC_NONE	| \
+					 PDC_CHASSIS_CALL_ACT_HPUX_WARN	| \
+					 PDC_CHASSIS_ACT_STATUS_UNSPEC	| \
+					 PDC_CHASSIS_CALL_SACT_UNSPEC	| \
+					 PDC_CHASSIS_RET_HPUX		| \
+					 PDC_CHASSIS_REID_UNSPEC	| \
+					 PDC_CHASSIS_DT_STATE_CHG	| \
+					 PDC_CHASSIS_SYSTATE_CHANGE	| \
+					 PDC_CHASSIS_MSG_ID		| \
+					 PDC_CHASSIS_EOM_SET		)
+
+#endif /* _PARISC_PDC_CHASSIS_H */
+/* vim: set ts=8 */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pdcpat.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdcpat.h
new file mode 100644
index 0000000..a468a17
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pdcpat.h
@@ -0,0 +1,353 @@
+#ifndef __PARISC_PATPDC_H
+#define __PARISC_PATPDC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 2000 (c) Hewlett Packard (Paul Bame <bame()spam.parisc-linux.org>)
+ * Copyright 2000,2004 (c) Grant Grundler <grundler()nahspam.parisc-linux.org>
+ */
+
+
+#define PDC_PAT_CELL           	64L   /* Interface for gaining and 
+                                         * manipulatin g cell state within PD */
+#define PDC_PAT_CELL_GET_NUMBER    0L   /* Return Cell number */
+#define PDC_PAT_CELL_GET_INFO      1L   /* Returns info about Cell */
+#define PDC_PAT_CELL_MODULE        2L   /* Returns info about Module */
+#define PDC_PAT_CELL_SET_ATTENTION 9L   /* Set Cell Attention indicator */
+#define PDC_PAT_CELL_NUMBER_TO_LOC 10L   /* Cell Number -> Location */
+#define PDC_PAT_CELL_WALK_FABRIC   11L   /* Walk the Fabric */
+#define PDC_PAT_CELL_GET_RDT_SIZE  12L   /* Return Route Distance Table Sizes */
+#define PDC_PAT_CELL_GET_RDT       13L   /* Return Route Distance Tables */
+#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size */
+#define PDC_PAT_CELL_SET_LOCAL_PDH    15L  /* Write Local PDH Buffer */
+#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
+#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
+#define PDC_PAT_CELL_GET_DBG_INFO   128L  /* Return DBG Buffer Info */
+#define PDC_PAT_CELL_CHANGE_ALIAS   129L  /* Change Non-Equivalent Alias Chacking */
+
+
+/*
+** Arg to PDC_PAT_CELL_MODULE memaddr[4]
+**
+** Addresses on the Merced Bus != all Runway Bus addresses.
+** This is intended for programming SBA/LBA chips range registers.
+*/
+#define IO_VIEW      0UL
+#define PA_VIEW      1UL
+
+/* PDC_PAT_CELL_MODULE entity type values */
+#define	PAT_ENTITY_CA	0	/* central agent */
+#define	PAT_ENTITY_PROC	1	/* processor */
+#define	PAT_ENTITY_MEM	2	/* memory controller */
+#define	PAT_ENTITY_SBA	3	/* system bus adapter */
+#define	PAT_ENTITY_LBA	4	/* local bus adapter */
+#define	PAT_ENTITY_PBC	5	/* processor bus converter */
+#define	PAT_ENTITY_XBC	6	/* crossbar fabric connect */
+#define	PAT_ENTITY_RC	7	/* fabric interconnect */
+
+/* PDC_PAT_CELL_MODULE address range type values */
+#define PAT_PBNUM           0         /* PCI Bus Number */
+#define PAT_LMMIO           1         /* < 4G MMIO Space */
+#define PAT_GMMIO           2         /* > 4G MMIO Space */
+#define PAT_NPIOP           3         /* Non Postable I/O Port Space */
+#define PAT_PIOP            4         /* Postable I/O Port Space */
+#define PAT_AHPA            5         /* Addional HPA Space */
+#define PAT_UFO             6         /* HPA Space (UFO for Mariposa) */
+#define PAT_GNIP            7         /* GNI Reserved Space */
+
+
+
+/* PDC PAT CHASSIS LOG -- Platform logging & forward progress functions */
+
+#define PDC_PAT_CHASSIS_LOG		65L
+#define PDC_PAT_CHASSIS_WRITE_LOG    	0L /* Write Log Entry */
+#define PDC_PAT_CHASSIS_READ_LOG     	1L /* Read  Log Entry */
+
+
+/* PDC PAT CPU  -- CPU configuration within the protection domain */
+
+#define PDC_PAT_CPU                	67L
+#define PDC_PAT_CPU_INFO            	0L /* Return CPU config info */
+#define PDC_PAT_CPU_DELETE          	1L /* Delete CPU */
+#define PDC_PAT_CPU_ADD             	2L /* Add    CPU */
+#define PDC_PAT_CPU_GET_NUMBER      	3L /* Return CPU Number */
+#define PDC_PAT_CPU_GET_HPA         	4L /* Return CPU HPA */
+#define PDC_PAT_CPU_STOP            	5L /* Stop   CPU */
+#define PDC_PAT_CPU_RENDEZVOUS      	6L /* Rendezvous CPU */
+#define PDC_PAT_CPU_GET_CLOCK_INFO  	7L /* Return CPU Clock info */
+#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_PLUNGE_FABRIC 	128L /* Plunge Fabric */
+#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache 
+                                                 * Cleansing Mode */
+/*  PDC PAT EVENT -- Platform Events */
+
+#define PDC_PAT_EVENT              	68L
+#define PDC_PAT_EVENT_GET_CAPS     	0L /* Get Capabilities */
+#define PDC_PAT_EVENT_SET_MODE     	1L /* Set Notification Mode */
+#define PDC_PAT_EVENT_SCAN         	2L /* Scan Event */
+#define PDC_PAT_EVENT_HANDLE       	3L /* Handle Event */
+#define PDC_PAT_EVENT_GET_NB_CALL  	4L /* Get Non-Blocking call Args */
+
+/*  PDC PAT HPMC -- Cause processor to go into spin loop, and wait
+ *  			for wake up from Monarch Processor.
+ */
+
+#define PDC_PAT_HPMC               70L
+#define PDC_PAT_HPMC_RENDEZ_CPU     0L /* go into spin loop */
+#define PDC_PAT_HPMC_SET_PARAMS     1L /* Allows OS to specify intr which PDC 
+                                        * will use to interrupt OS during
+                                        * machine check rendezvous */
+
+/* parameters for PDC_PAT_HPMC_SET_PARAMS: */
+#define HPMC_SET_PARAMS_INTR 	    1L /* Rendezvous Interrupt */
+#define HPMC_SET_PARAMS_WAKE 	    2L /* Wake up processor */
+
+
+/*  PDC PAT IO  -- On-line services for I/O modules */
+
+#define PDC_PAT_IO                  71L
+#define PDC_PAT_IO_GET_SLOT_STATUS   	5L /* Get Slot Status Info*/
+#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
+                                            /* Hardware Path */
+#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from 
+                                             * Physical Location */
+#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
+                                               * Address from Hardware Path */
+#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path 
+                                               * from PCI Configuration Address */
+#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L  /* Read Host Bridge State Info */
+#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table 
+                                                   * Size */
+#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE  16L /* Get PCI INT Routing Table */
+#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 	17L /* Get Hint Table Size */
+#define PDC_PAT_IO_GET_HINT_TABLE   	18L /* Get Hint Table */
+#define PDC_PAT_IO_PCI_CONFIG_READ  	19L /* PCI Config Read */
+#define PDC_PAT_IO_PCI_CONFIG_WRITE 	20L /* PCI Config Write */
+#define PDC_PAT_IO_GET_NUM_IO_SLOTS 	21L /* Get Number of I/O Bay Slots in 
+                                       		  * Cabinet */
+#define PDC_PAT_IO_GET_LOC_IO_SLOTS 	22L /* Get Physical Location of I/O */
+                                   		     /* Bay Slots in Cabinet */
+#define PDC_PAT_IO_BAY_STATUS_INFO  	28L /* Get I/O Bay Slot Status Info */
+#define PDC_PAT_IO_GET_PROC_VIEW        29L /* Get Processor view of IO address */
+#define PDC_PAT_IO_PROG_SBA_DIR_RANGE   30L /* Program directed range */
+
+
+/* PDC PAT MEM  -- Manage memory page deallocation */
+
+#define PDC_PAT_MEM            72L
+#define PDC_PAT_MEM_PD_INFO     	0L /* Return PDT info for PD       */
+#define PDC_PAT_MEM_PD_CLEAR    	1L /* Clear PDT for PD             */
+#define PDC_PAT_MEM_PD_READ     	2L /* Read PDT entries for PD      */
+#define PDC_PAT_MEM_PD_RESET    	3L /* Reset clear bit for PD       */
+#define PDC_PAT_MEM_CELL_INFO   	5L /* Return PDT info For Cell     */
+#define PDC_PAT_MEM_CELL_CLEAR  	6L /* Clear PDT For Cell           */
+#define PDC_PAT_MEM_CELL_READ   	7L /* Read PDT entries For Cell    */
+#define PDC_PAT_MEM_CELL_RESET  	8L /* Reset clear bit For Cell     */
+#define PDC_PAT_MEM_SETGM		9L /* Set Good Memory value        */
+#define PDC_PAT_MEM_ADD_PAGE		10L /* ADDs a page to the cell      */
+#define PDC_PAT_MEM_ADDRESS		11L /* Get Physical Location From   */
+					    /* Memory Address               */
+#define PDC_PAT_MEM_GET_TXT_SIZE   	12L /* Get Formatted Text Size   */
+#define PDC_PAT_MEM_GET_PD_TXT     	13L /* Get PD Formatted Text     */
+#define PDC_PAT_MEM_GET_CELL_TXT   	14L /* Get Cell Formatted Text   */
+#define PDC_PAT_MEM_RD_STATE_INFO  	15L /* Read Mem Module State Info*/
+#define PDC_PAT_MEM_CLR_STATE_INFO 	16L /*Clear Mem Module State Info*/
+#define PDC_PAT_MEM_CLEAN_RANGE    	128L /*Clean Mem in specific range*/
+#define PDC_PAT_MEM_GET_TBL_SIZE   	131L /* Get Memory Table Size     */
+#define PDC_PAT_MEM_GET_TBL        	132L /* Get Memory Table          */
+
+
+/* PDC PAT NVOLATILE  --  Access Non-Volatile Memory */
+
+#define PDC_PAT_NVOLATILE	73L
+#define PDC_PAT_NVOLATILE_READ		0L /* Read Non-Volatile Memory   */
+#define PDC_PAT_NVOLATILE_WRITE		1L /* Write Non-Volatile Memory  */
+#define PDC_PAT_NVOLATILE_GET_SIZE	2L /* Return size of NVM         */
+#define PDC_PAT_NVOLATILE_VERIFY	3L /* Verify contents of NVM     */
+#define PDC_PAT_NVOLATILE_INIT		4L /* Initialize NVM             */
+
+/* PDC PAT PD */
+#define PDC_PAT_PD		74L         /* Protection Domain Info   */
+#define PDC_PAT_PD_GET_ADDR_MAP		0L  /* Get Address Map          */
+
+/* PDC_PAT_PD_GET_ADDR_MAP entry types */
+#define PAT_MEMORY_DESCRIPTOR		1   
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory types */
+#define PAT_MEMTYPE_MEMORY		0
+#define PAT_MEMTYPE_FIRMWARE		4
+
+/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
+#define PAT_MEMUSE_GENERAL		0
+#define PAT_MEMUSE_GI			128
+#define PAT_MEMUSE_GNI			129
+
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+#ifdef CONFIG_64BIT
+#define is_pdc_pat()	(PDC_TYPE_PAT == pdc_type)
+extern int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
+extern int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
+#else	/* ! CONFIG_64BIT */
+/* No PAT support for 32-bit kernels...sorry */
+#define is_pdc_pat()	(0)
+#define pdc_pat_get_irt_size(num_entries, cell_numn)	PDC_BAD_PROC
+#define pdc_pat_get_irt(r_addr, cell_num)		PDC_BAD_PROC
+#endif	/* ! CONFIG_64BIT */
+
+
+struct pdc_pat_cell_num {
+	unsigned long cell_num;
+	unsigned long cell_loc;
+};
+
+struct pdc_pat_cpu_num {
+	unsigned long cpu_num;
+	unsigned long cpu_loc;
+};
+
+struct pdc_pat_mem_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_INFO (return info) */
+	unsigned int ke;	/* bit 0: memory inside good memory? */
+	unsigned int current_pdt_entries:16;
+	unsigned int max_pdt_entries:16;
+	unsigned long Cs_bitmap;
+	unsigned long Ic_bitmap;
+	unsigned long good_mem;
+	unsigned long first_dbe_loc; /* first location of double bit error */
+	unsigned long clear_time; /* last PDT clear time (since Jan 1970) */
+};
+
+struct pdc_pat_mem_cell_pdt_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_CELL_INFO */
+	u64 reserved:32;
+	u64 cs:1;		/* clear status: cleared since the last call? */
+	u64 current_pdt_entries:15;
+	u64 ic:1;		/* interleaving had to be changed ? */
+	u64 max_pdt_entries:15;
+	unsigned long good_mem;
+	unsigned long first_dbe_loc; /* first location of double bit error */
+	unsigned long clear_time; /* last PDT clear time (since Jan 1970) */
+};
+
+
+struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */
+	unsigned long actual_count_bytes;
+	unsigned long pdt_entries;
+};
+
+struct pdc_pat_mem_phys_mem_location { /* PDC_PAT_MEM/PDC_PAT_MEM_ADDRESS */
+	u64 cabinet:8;
+	u64 ign1:8;
+	u64 ign2:8;
+	u64 cell_slot:8;
+	u64 ign3:8;
+	u64 dimm_slot:8; /* DIMM slot, e.g. 0x1A, 0x2B, show user hex value! */
+	u64 ign4:8;
+	u64 source:4; /* for mem: always 0x07 */
+	u64 source_detail:4; /* for mem: always 0x04 (SIMM or DIMM) */
+};
+
+struct pdc_pat_pd_addr_map_entry {
+	unsigned char entry_type;       /* 1 = Memory Descriptor Entry Type */
+	unsigned char reserve1[5];
+	unsigned char memory_type;
+	unsigned char memory_usage;
+	unsigned long paddr;
+	unsigned int  pages;            /* Length in 4K pages */
+	unsigned int  reserve2;
+	unsigned long cell_map;
+};
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
+* ----------------------------------------------------------
+* Bit  0 to 51 - conf_base_addr
+* Bit 52 to 62 - reserved
+* Bit       63 - endianess bit
+********************************************************************/
+#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
+
+/********************************************************************
+* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
+* ----------------------------------------------------
+* Bit  0 to  7 - entity type
+*    0 = central agent,            1 = processor,
+*    2 = memory controller,        3 = system bus adapter,
+*    4 = local bus adapter,        5 = processor bus converter,
+*    6 = crossbar fabric connect,  7 = fabric interconnect,
+*    8 to 254 reserved,            255 = unknown.
+* Bit  8 to 15 - DVI
+* Bit 16 to 23 - IOC functions
+* Bit 24 to 39 - reserved
+* Bit 40 to 63 - mod_pages
+*    number of 4K pages a module occupies starting at conf_base_addr
+********************************************************************/
+#define PAT_GET_ENTITY(value)	(((value) >> 56) & 0xffUL)
+#define PAT_GET_DVI(value)	(((value) >> 48) & 0xffUL)
+#define PAT_GET_IOC(value)	(((value) >> 40) & 0xffUL)
+#define PAT_GET_MOD_PAGES(value) ((value) & 0xffffffUL)
+
+
+/*
+** PDC_PAT_CELL_GET_INFO return block
+*/
+typedef struct pdc_pat_cell_info_rtn_block {
+	unsigned long cpu_info;
+	unsigned long cell_info;
+	unsigned long cell_location;
+	unsigned long reo_location;
+	unsigned long mem_size;
+	unsigned long dimm_status;
+	unsigned long pdc_rev;
+	unsigned long fabric_info0;
+	unsigned long fabric_info1;
+	unsigned long fabric_info2;
+	unsigned long fabric_info3;
+	unsigned long reserved[21];
+} pdc_pat_cell_info_rtn_block_t;
+
+
+/* FIXME: mod[508] should really be a union of the various mod components */
+struct pdc_pat_cell_mod_maddr_block {	/* PDC_PAT_CELL_MODULE */
+	unsigned long cba;		/* func 0 cfg space address */
+	unsigned long mod_info;		/* module information */
+	unsigned long mod_location;	/* physical location of the module */
+	struct hardware_path mod_path;	/* module path (device path - layers) */
+	unsigned long mod[508];		/* PAT cell module components */
+} __attribute__((aligned(8))) ;
+
+typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
+
+
+extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
+extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
+extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
+extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
+
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
+
+extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
+
+extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); 
+extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val); 
+
+extern int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo);
+extern int pdc_pat_mem_pdt_cell_info(struct pdc_pat_mem_cell_pdt_retinfo *rinfo,
+		unsigned long cell);
+extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long max_entries);
+extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long count,
+		unsigned long offset);
+extern int pdc_pat_mem_get_dimm_phys_location(
+                struct pdc_pat_mem_phys_mem_location *pret,
+                unsigned long phys_addr);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ! __PARISC_PATPDC_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/perf.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/perf.h
new file mode 100644
index 0000000..2a5a60a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/perf.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PERF_H_
+#define _ASM_PERF_H_
+
+/* ioctls */
+#define PA_PERF_ON	_IO('p', 1)
+#define PA_PERF_OFF	_IOR('p', 2, unsigned int)
+#define PA_PERF_VERSION	_IOR('p', 3, int)
+
+#define PA_PERF_DEV	"perf"
+#define PA_PERF_MINOR	146
+
+/* Interface types */
+#define UNKNOWN_INTF    255
+#define ONYX_INTF         0
+#define CUDA_INTF         1
+
+/* Common Onyx and Cuda images */
+#define CPI                 0
+#define BUSUTIL             1
+#define TLBMISS             2
+#define TLBHANDMISS         3
+#define PTKN                4
+#define PNTKN               5
+#define IMISS               6
+#define DMISS               7
+#define DMISS_ACCESS        8 
+#define BIG_CPI 	    9
+#define BIG_LS		   10  
+#define BR_ABORT	   11
+#define ISNT		   12 
+#define QUADRANT           13
+#define RW_PDFET           14
+#define RW_WDFET           15
+#define SHLIB_CPI          16
+
+/* Cuda only Images */
+#define FLOPS              17
+#define CACHEMISS          18 
+#define BRANCHES           19             
+#define CRSTACK            20 
+#define I_CACHE_SPEC       21 
+#define MAX_CUDA_IMAGES    22 
+
+/* Onyx only Images */
+#define ADDR_INV_ABORT_ALU 17
+#define BRAD_STALL	   18 
+#define CNTL_IN_PIPEL	   19 
+#define DSNT_XFH	   20 
+#define FET_SIG1	   21 
+#define FET_SIG2	   22 
+#define G7_1		   23 
+#define G7_2		   24 
+#define G7_3 		   25
+#define G7_4		   26
+#define MPB_LABORT         27
+#define PANIC              28
+#define RARE_INST          29 
+#define RW_DFET            30 
+#define RW_IFET            31 
+#define RW_SDFET           32 
+#define SPEC_IFET          33 
+#define ST_COND0           34 
+#define ST_COND1           35 
+#define ST_COND2           36
+#define ST_COND3           37
+#define ST_COND4           38
+#define ST_UNPRED0         39 
+#define ST_UNPRED1         40 
+#define UNPRED             41 
+#define GO_STORE           42
+#define SHLIB_CALL         43
+#define MAX_ONYX_IMAGES    44
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/perf_event.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/perf_event.h
new file mode 100644
index 0000000..1e0fd8b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/perf_event.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_PARISC_PERF_EVENT_H
+#define __ASM_PARISC_PERF_EVENT_H
+
+/* Empty, just to avoid compiling error */
+
+#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pgalloc.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pgalloc.h
new file mode 100644
index 0000000..cf13275
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pgalloc.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+
+#include <asm/cache.h>
+
+/* Allocate the top level pgd (page directory)
+ *
+ * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
+ * allocate the first pmd adjacent to the pgd.  This means that we can
+ * subtract a constant offset to get to it.  The pmd and pgd sizes are
+ * arranged so that a single pmd covers 4GB (giving a full 64-bit
+ * process access to 8TB) so our lookups are effectively L2 for the
+ * first 4GB of the kernel (i.e. for all ILP32 processes and all the
+ * kernel for machines with under 4GB of memory) */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
+					       PGD_ALLOC_ORDER);
+	pgd_t *actual_pgd = pgd;
+
+	if (likely(pgd != NULL)) {
+		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
+#if CONFIG_PGTABLE_LEVELS == 3
+		actual_pgd += PTRS_PER_PGD;
+		/* Populate first pmd with allocated memory.  We mark it
+		 * with PxD_FLAG_ATTACHED as a signal to the system that this
+		 * pmd entry may not be cleared. */
+		__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | 
+				        PxD_FLAG_VALID | 
+					PxD_FLAG_ATTACHED) 
+			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
+		/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
+		 * a signal that this pmd may not be freed */
+		__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
+#endif
+	}
+	return actual_pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#if CONFIG_PGTABLE_LEVELS == 3
+	pgd -= PTRS_PER_PGD;
+#endif
+	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
+}
+
+#if CONFIG_PGTABLE_LEVELS == 3
+
+/* Three Level Page Table Support for pmd's */
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+	__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
+		        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
+	if (pmd)
+		memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
+	return pmd;
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
+		/*
+		 * This is the permanent pmd attached to the pgd;
+		 * cannot free it.
+		 * Increment the counter to compensate for the decrement
+		 * done by generic mm code.
+		 */
+		mm_inc_nr_pmds(mm);
+		return;
+	}
+	free_pages((unsigned long)pmd, PMD_ORDER);
+}
+
+#else
+
+/* Two Level Page Table Support for pmd's */
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(mm, x)			do { } while (0)
+#define pgd_populate(mm, pmd, pte)	BUG()
+
+#endif
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+#if CONFIG_PGTABLE_LEVELS == 3
+	/* preserve the gateway marker if this is the beginning of
+	 * the permanent pmd */
+	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
+				 PxD_FLAG_VALID |
+				 PxD_FLAG_ATTACHED) 
+			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+	else
+#endif
+		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) 
+			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+	pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
+	return page;
+}
+
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+{
+	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+	return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+	pgtable_page_dtor(pte);
+	pte_free_kernel(mm, page_address(pte));
+}
+
+#define check_pgt_cache()	do { } while (0)
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/pgtable.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/pgtable.h
new file mode 100644
index 0000000..fa6b7c7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/pgtable.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_PGTABLE_H
+#define _PARISC_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+#include <asm/fixmap.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * we simulate an x86-style page table for the linux mm code
+ */
+
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/mm_types.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+extern spinlock_t pa_tlb_lock;
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory.  For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error.  Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for parisc.
+ */
+#define kern_addr_valid(addr)	(1)
+
+/* Purge data and instruction TLB entries.  Must be called holding
+ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+
+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+	mtsp(mm->context, 1);
+	pdtlb(addr);
+	if (unlikely(split_tlb))
+		pitlb(addr);
+}
+
+/* Certain architectures need to do special things when PTEs
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval)                                 \
+        do{                                                     \
+                *(pteptr) = (pteval);                           \
+        } while(0)
+
+#define pte_inserted(x)						\
+	((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED))		\
+	 == (_PAGE_PRESENT|_PAGE_ACCESSED))
+
+#define set_pte_at(mm, addr, ptep, pteval)			\
+	do {							\
+		pte_t old_pte;					\
+		unsigned long flags;				\
+		spin_lock_irqsave(&pa_tlb_lock, flags);		\
+		old_pte = *ptep;				\
+		if (pte_inserted(old_pte))			\
+			purge_tlb_entries(mm, addr);		\
+		set_pte(ptep, pteval);				\
+		spin_unlock_irqrestore(&pa_tlb_lock, flags);	\
+	} while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#include <asm/page.h>
+
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
+#define pgd_ERROR(e) \
+	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+/* This is the size of the initially mapped kernel memory */
+#if defined(CONFIG_64BIT)
+#define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
+#else
+#define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
+#endif
+#define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
+
+#if CONFIG_PGTABLE_LEVELS == 3
+#define PGD_ORDER	1 /* Number of pages per pgd */
+#define PMD_ORDER	1 /* Number of pages per pmd */
+#define PGD_ALLOC_ORDER	2 /* first pgd contains pmd */
+#else
+#define PGD_ORDER	1 /* Number of pages per pgd */
+#define PGD_ALLOC_ORDER	PGD_ORDER
+#endif
+
+/* Definitions for 3rd level (we use PLD here for Page Lower directory
+ * because PTE_SHIFT is used lower down to mean shift that has to be
+ * done to get usable bits out of the PTE) */
+#define PLD_SHIFT	PAGE_SHIFT
+#define PLD_SIZE	PAGE_SIZE
+#define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
+#define PTRS_PER_PTE    (1UL << BITS_PER_PTE)
+
+/* Definitions for 2nd level */
+#define pgtable_cache_init()	do { } while (0)
+
+#define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
+#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_MASK	(~(PMD_SIZE-1))
+#if CONFIG_PGTABLE_LEVELS == 3
+#define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
+#else
+#define __PAGETABLE_PMD_FOLDED
+#define BITS_PER_PMD	0
+#endif
+#define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
+
+/* Definitions for 1st level */
+#define PGDIR_SHIFT	(PMD_SHIFT + BITS_PER_PMD)
+#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
+#define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
+#else
+#define BITS_PER_PGD	(PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
+#endif
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
+#define USER_PTRS_PER_PGD       PTRS_PER_PGD
+
+#ifdef CONFIG_64BIT
+#define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
+#define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
+#define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
+#else
+#define MAX_ADDRBITS	(BITS_PER_LONG)
+#define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
+#define SPACEID_SHIFT	0
+#endif
+
+/* This calculates the number of initial pages we need for the initial
+ * page tables */
+#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
+# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
+#else
+# define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
+#endif
+
+/*
+ * pgd entries used up by user/kernel:
+ */
+
+#define FIRST_USER_ADDRESS	0UL
+
+/* NB: The tlb miss handlers make certain assumptions about the order */
+/*     of the following bits, so be careful (One example, bits 25-31  */
+/*     are moved together in one instruction).                        */
+
+#define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
+#define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
+#define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
+#define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
+#define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
+#define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
+#define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
+#define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
+#define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
+#define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
+#define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
+#define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */
+
+/* N.B. The bits are defined in terms of a 32 bit word above, so the */
+/*      following macro is ok for both 32 and 64 bit.                */
+
+#define xlate_pabit(x) (31 - x)
+
+/* this defines the shift to the usable bits in the PTE it is set so
+ * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
+ * to zero */
+#define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
+
+/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
+#define PFN_PTE_SHIFT		12
+
+#define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
+#define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
+#define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
+#define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
+#define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
+#define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
+#define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
+#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
+#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
+#define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
+#define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
+#define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
+
+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
+#define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
+
+/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
+ * are page-aligned, we don't care about the PAGE_OFFSET bits, except
+ * for a few meta-information bits, so we shift the address to be
+ * able to effectively address 40/42/44-bits of physical address space
+ * depending on 4k/16k/64k PAGE_SIZE */
+#define _PxD_PRESENT_BIT   31
+#define _PxD_ATTACHED_BIT  30
+#define _PxD_VALID_BIT     29
+
+#define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
+#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
+#define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
+#define PxD_FLAG_MASK     (0xf)
+#define PxD_FLAG_SHIFT    (4)
+#define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+/* Others seem to make this executable, I don't know if that's correct
+   or not.  The stack is mapped this way though so this is necessary
+   in the short term - dhd@linuxcare.com, 2000-08-08 */
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_COPY       PAGE_EXECREAD
+#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
+#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
+
+
+/*
+ * We could have an execute only page using "gateway - promote to priv
+ * level 3", but that is kind of silly. So, the way things are defined
+ * now, we must always have read permission for pages with execute
+ * permission. For the fun of it we'll go ahead and support write only
+ * pages.
+ */
+
+	 /*xwr*/
+#define __P000  PAGE_NONE
+#define __P001  PAGE_READONLY
+#define __P010  __P000 /* copy on write */
+#define __P011  __P001 /* copy on write */
+#define __P100  PAGE_EXECREAD
+#define __P101  PAGE_EXECREAD
+#define __P110  __P100 /* copy on write */
+#define __P111  __P101 /* copy on write */
+
+#define __S000  PAGE_NONE
+#define __S001  PAGE_READONLY
+#define __S010  PAGE_WRITEONLY
+#define __S011  PAGE_SHARED
+#define __S100  PAGE_EXECREAD
+#define __S101  PAGE_EXECREAD
+#define __S110  PAGE_RWX
+#define __S111  PAGE_RWX
+
+
+extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
+
+/* initial page tables for 0-8MB for kernel */
+
+extern pte_t pg0[];
+
+/* zero page used for uninitialized stuff */
+
+extern unsigned long *empty_zero_page;
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_none(x)     (pte_val(x) == 0)
+#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
+
+#define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
+#define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+#define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
+#define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+
+#if CONFIG_PGTABLE_LEVELS == 3
+/* The first entry of the permanent pmd is not there if it contains
+ * the gateway marker */
+#define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
+#else
+#define pmd_none(x)	(!pmd_val(x))
+#endif
+#define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
+#define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
+static inline void pmd_clear(pmd_t *pmd) {
+#if CONFIG_PGTABLE_LEVELS == 3
+	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+		/* This is the entry pointing to the permanent pmd
+		 * attached to the pgd; cannot clear it */
+		__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
+	else
+#endif
+		__pmd_val_set(*pmd,  0);
+}
+
+
+
+#if CONFIG_PGTABLE_LEVELS == 3
+#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
+#define pgd_page(pgd)	virt_to_page((void *)pgd_page_vaddr(pgd))
+
+/* For 64 bit we have three level tables */
+
+#define pgd_none(x)     (!pgd_val(x))
+#define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
+#define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
+static inline void pgd_clear(pgd_t *pgd) {
+#if CONFIG_PGTABLE_LEVELS == 3
+	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
+		/* This is the permanent pmd attached to the pgd; cannot
+		 * free it */
+		return;
+#endif
+	__pgd_val_set(*pgd, 0);
+}
+#else
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd)		{ return 0; }
+static inline int pgd_bad(pgd_t pgd)		{ return 0; }
+static inline int pgd_present(pgd_t pgd)	{ return 1; }
+static inline void pgd_clear(pgd_t * pgdp)	{ }
+#endif
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_special(pte_t pte)	{ return 0; }
+
+static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
+
+/*
+ * Huge pte definitions.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
+#define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
+				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
+#else
+#define pte_huge(pte)           (0)
+#define pte_mkhuge(pte)         (pte)
+#endif
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define __mk_pte(addr,pgprot) \
+({									\
+	pte_t __pte;							\
+									\
+	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
+									\
+	__pte;								\
+})
+
+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+	pte_t pte;
+	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
+	return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+
+/* Permanent address of a page.  On parisc we don't have highmem. */
+
+#define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
+
+#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
+
+#define pmd_page_vaddr(pmd)	((unsigned long) __va(pmd_address(pmd)))
+
+#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
+#define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm, address) \
+((mm)->pgd + ((address) >> PGDIR_SHIFT))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Find an entry in the second-level page table.. */
+
+#if CONFIG_PGTABLE_LEVELS == 3
+#define pmd_index(addr)         (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+#define pmd_offset(dir,address) \
+((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
+#else
+#define pmd_offset(dir,addr) ((pmd_t *) dir)
+#endif
+
+/* Find an entry in the third-level page table.. */ 
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
+#define pte_offset_kernel(pmd, address) \
+	((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
+#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+#define pte_unmap(pte) do { } while (0)
+
+#define pte_unmap(pte)			do { } while (0)
+#define pte_unmap_nested(pte)		do { } while (0)
+
+extern void paging_init (void);
+
+/* Used for deferring calls to flush_dcache_page() */
+
+#define PG_dcache_dirty         PG_arch_1
+
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+
+/* Encode and de-code a swap entry */
+
+#define __swp_type(x)                     ((x).val & 0x1f)
+#define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
+					  (((x).val >> 8) & ~0x7) )
+#define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
+					    ((offset &  0x7) << 6) | \
+					    ((offset & ~0x7) << 8) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+	pte_t pte;
+	unsigned long flags;
+
+	if (!pte_young(*ptep))
+		return 0;
+
+	spin_lock_irqsave(&pa_tlb_lock, flags);
+	pte = *ptep;
+	if (!pte_young(pte)) {
+		spin_unlock_irqrestore(&pa_tlb_lock, flags);
+		return 0;
+	}
+	purge_tlb_entries(vma->vm_mm, addr);
+	set_pte(ptep, pte_mkold(pte));
+	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+	return 1;
+}
+
+struct mm_struct;
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	pte_t old_pte;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pa_tlb_lock, flags);
+	old_pte = *ptep;
+	if (pte_inserted(old_pte))
+		purge_tlb_entries(mm, addr);
+	set_pte(ptep, __pte(0));
+	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+
+	return old_pte;
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&pa_tlb_lock, flags);
+	purge_tlb_entries(mm, addr);
+	set_pte(ptep, pte_wrprotect(*ptep));
+	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+}
+
+#define pte_same(A,B)	(pte_val(A) == pte_val(B))
+
+struct seq_file;
+extern void arch_report_meminfo(struct seq_file *m);
+
+#endif /* !__ASSEMBLY__ */
+
+
+/* TLB page size encoding - see table 3-1 in parisc20.pdf */
+#define _PAGE_SIZE_ENCODING_4K		0
+#define _PAGE_SIZE_ENCODING_16K		1
+#define _PAGE_SIZE_ENCODING_64K		2
+#define _PAGE_SIZE_ENCODING_256K	3
+#define _PAGE_SIZE_ENCODING_1M		4
+#define _PAGE_SIZE_ENCODING_4M		5
+#define _PAGE_SIZE_ENCODING_16M		6
+#define _PAGE_SIZE_ENCODING_64M		7
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
+#endif
+
+
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
+
+/* We provide our own get_unmapped_area to provide cache coherency */
+
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _PARISC_PGTABLE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/prefetch.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/prefetch.h
new file mode 100644
index 0000000..6e63f72
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/prefetch.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/asm-parisc/prefetch.h
+ *
+ * PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
+ * In addition, many implementations do hardware prefetching of both
+ * instructions and data.
+ *
+ * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
+ * to gr0 but not in a way that Linux can use.  If the load would cause an
+ * interruption (eg due to prefetching 0), it is suppressed on PA2.0
+ * processors, but not on 7300LC.
+ *
+ */
+
+#ifndef __ASM_PARISC_PREFETCH_H
+#define __ASM_PARISC_PREFETCH_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PREFETCH
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *addr)
+{
+	__asm__(
+#ifndef CONFIG_PA20
+		/* Need to avoid prefetch of NULL on PA7300LC */
+		"	extrw,u,= %0,31,32,%%r0\n"
+#endif
+		"	ldw 0(%0), %%r0" : : "r" (addr));
+}
+
+/* LDD is a PA2.0 addition. */
+#ifdef CONFIG_PA20
+#define ARCH_HAS_PREFETCHW
+static inline void prefetchw(const void *addr)
+{
+	__asm__("ldd 0(%0), %%r0" : : "r" (addr));
+}
+#endif /* CONFIG_PA20 */
+
+#endif /* CONFIG_PREFETCH */
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/processor.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/processor.h
new file mode 100644
index 0000000..2dbe558
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/processor.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/asm-parisc/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ * Copyright (C) 2001 Grant Grundler
+ */
+
+#ifndef __ASM_PARISC_PROCESSOR_H
+#define __ASM_PARISC_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+
+#include <asm/prefetch.h>
+#include <asm/hardware.h>
+#include <asm/pdc.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#ifdef CONFIG_PA20
+#define current_ia(x)	__asm__("mfia %0" : "=r"(x))
+#else /* mfia added in pa2.0 */
+#define current_ia(x)	__asm__("blr 0,%0\n\tnop" : "=r"(x))
+#endif
+#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+#define TASK_SIZE_OF(tsk)       ((tsk)->thread.task_size)
+#define TASK_SIZE	        TASK_SIZE_OF(current)
+#define TASK_UNMAPPED_BASE      (current->thread.map_base)
+
+#define DEFAULT_TASK_SIZE32	(0xFFF00000UL)
+#define DEFAULT_MAP_BASE32	(0x40000000UL)
+
+#ifdef CONFIG_64BIT
+#define DEFAULT_TASK_SIZE       (MAX_ADDRESS-0xf000000)
+#define DEFAULT_MAP_BASE        (0x200000000UL)
+#else
+#define DEFAULT_TASK_SIZE	DEFAULT_TASK_SIZE32
+#define DEFAULT_MAP_BASE	DEFAULT_MAP_BASE32
+#endif
+
+#ifdef __KERNEL__
+
+/* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
+ * prumpf */
+
+#define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	DEFAULT_TASK_SIZE
+
+/* Allow bigger stacks for 64-bit processes */
+#define STACK_SIZE_MAX	(USER_WIDE_MODE					\
+			 ? (1 << 30)	/* 1 GB */			\
+			 : (CONFIG_MAX_STACK_SIZE_MB*1024*1024))
+
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Data detected about CPUs at boot time which is the same for all CPU's.
+ * HP boxes are SMP - ie identical processors.
+ *
+ * FIXME: some CPU rev info may be processor specific...
+ */
+struct system_cpuinfo_parisc {
+	unsigned int	cpu_count;
+	unsigned int	cpu_hz;
+	unsigned int	hversion;
+	unsigned int	sversion;
+	enum cpu_type	cpu_type;
+
+	struct {
+		struct pdc_model model;
+		unsigned long versions;
+		unsigned long cpuid;
+		unsigned long capabilities;
+		char   sys_model_name[81]; /* PDC-ROM returnes this model name */
+	} pdc;
+
+	const char	*cpu_name;	/* e.g. "PA7300LC (PCX-L2)" */
+	const char	*family_name;	/* e.g. "1.1e" */
+};
+
+
+/* Per CPU data structure - ie varies per CPU.  */
+struct cpuinfo_parisc {
+	unsigned long it_value;     /* Interval Timer at last timer Intr */
+	unsigned long irq_count;    /* number of IRQ's since boot */
+	unsigned long cpuid;        /* aka slot_number or set to NO_PROC_ID */
+	unsigned long hpa;          /* Host Physical address */
+	unsigned long txn_addr;     /* MMIO addr of EIR or id_eid */
+#ifdef CONFIG_SMP
+	unsigned long pending_ipi;  /* bitmap of type ipi_message_type */
+#endif
+	unsigned long bh_count;     /* number of times bh was invoked */
+	unsigned long fp_rev;
+	unsigned long fp_model;
+	unsigned long cpu_num;      /* CPU number from PAT firmware */
+	unsigned long cpu_loc;      /* CPU location from PAT firmware */
+	unsigned int state;
+	struct parisc_device *dev;
+	unsigned long loops_per_jiffy;
+};
+
+extern struct system_cpuinfo_parisc boot_cpu_data;
+DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+
+#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
+
+typedef struct {
+	int seg;  
+} mm_segment_t;
+
+#define ARCH_MIN_TASKALIGN	8
+
+struct thread_struct {
+	struct pt_regs regs;
+	unsigned long  task_size;
+	unsigned long  map_base;
+	unsigned long  flags;
+}; 
+
+#define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs))
+
+/* Thread struct flags. */
+#define PARISC_UAC_NOPRINT	(1UL << 0)	/* see prctl and unaligned.c */
+#define PARISC_UAC_SIGBUS	(1UL << 1)
+#define PARISC_KERNEL_DEATH	(1UL << 31)	/* see die_if_kernel()... */
+
+#define PARISC_UAC_SHIFT	0
+#define PARISC_UAC_MASK		(PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
+
+#define SET_UNALIGN_CTL(task,value)                                       \
+        ({                                                                \
+        (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
+                                | (((value) << PARISC_UAC_SHIFT) &        \
+                                   PARISC_UAC_MASK));                     \
+        0;                                                                \
+        })
+
+#define GET_UNALIGN_CTL(task,addr)                                        \
+        ({                                                                \
+        put_user(((task)->thread.flags & PARISC_UAC_MASK)                 \
+                 >> PARISC_UAC_SHIFT, (int __user *) (addr));             \
+        })
+
+#define INIT_THREAD { \
+	.regs = {	.gr	= { 0, }, \
+			.fr	= { 0, }, \
+			.sr	= { 0, }, \
+			.iasq	= { 0, }, \
+			.iaoq	= { 0, }, \
+			.cr27	= 0, \
+		}, \
+	.task_size	= DEFAULT_TASK_SIZE, \
+	.map_base	= DEFAULT_MAP_BASE, \
+	.flags		= 0 \
+	}
+
+struct task_struct;
+void show_trace(struct task_struct *task, unsigned long *stack);
+
+/*
+ * Start user thread in another space.
+ *
+ * Note that we set both the iaoq and r31 to the new pc. When
+ * the kernel initially calls execve it will return through an
+ * rfi path that will use the values in the iaoq. The execve
+ * syscall path will return through the gateway page, and
+ * that uses r31 to branch to.
+ *
+ * For ELF we clear r23, because the dynamic linker uses it to pass
+ * the address of the finalizer function.
+ *
+ * We also initialize sr3 to an illegal value (illegal for our
+ * implementation, not for the architecture).
+ */
+typedef unsigned int elf_caddr_t;
+
+/* The ELF abi wants things done a "wee bit" differently than
+ * som does.  Supporting this behavior here avoids
+ * having our own version of create_elf_tables.
+ *
+ * Oh, and yes, that is not a typo, we are really passing argc in r25
+ * and argv in r24 (rather than r26 and r25).  This is because that's
+ * where __libc_start_main wants them.
+ *
+ * Duplicated from dl-machine.h for the benefit of readers:
+ *
+ *  Our initial stack layout is rather different from everyone else's
+ *  due to the unique PA-RISC ABI.  As far as I know it looks like
+ *  this:
+
+   -----------------------------------  (user startup code creates this frame)
+   |         32 bytes of magic       |
+   |---------------------------------|
+   | 32 bytes argument/sp save area  |
+   |---------------------------------| (bprm->p)
+   |	    ELF auxiliary info	     |
+   |         (up to 28 words)        |
+   |---------------------------------|
+   |		   NULL		     |
+   |---------------------------------|
+   |	   Environment pointers	     |
+   |---------------------------------|
+   |		   NULL		     |
+   |---------------------------------|
+   |        Argument pointers        |
+   |---------------------------------| <- argv
+   |          argc (1 word)          |
+   |---------------------------------| <- bprm->exec (HACK!)
+   |         N bytes of slack        |
+   |---------------------------------|
+   |	filename passed to execve    |
+   |---------------------------------| (mm->env_end)
+   |           env strings           |
+   |---------------------------------| (mm->env_start, mm->arg_end)
+   |           arg strings           |
+   |---------------------------------|
+   | additional faked arg strings if |
+   | we're invoked via binfmt_script |
+   |---------------------------------| (mm->arg_start)
+   stack base is at TASK_SIZE - rlim_max.
+
+on downward growing arches, it looks like this:
+   stack base at TASK_SIZE
+   | filename passed to execve
+   | env strings
+   | arg strings
+   | faked arg strings
+   | slack
+   | ELF
+   | envps
+   | argvs
+   | argc
+
+ *  The pleasant part of this is that if we need to skip arguments we
+ *  can just decrement argc and move argv, because the stack pointer
+ *  is utterly unrelated to the location of the environment and
+ *  argument vectors.
+ *
+ * Note that the S/390 people took the easy way out and hacked their
+ * GCC to make the stack grow downwards.
+ *
+ * Final Note: For entry from syscall, the W (wide) bit of the PSW
+ * is stuffed into the lowest bit of the user sp (%r30), so we fill
+ * it in here from the current->personality
+ */
+
+#ifdef CONFIG_64BIT
+#define USER_WIDE_MODE	(!test_thread_flag(TIF_32BIT))
+#else
+#define USER_WIDE_MODE	0
+#endif
+
+#define start_thread(regs, new_pc, new_sp) do {		\
+	elf_addr_t *sp = (elf_addr_t *)new_sp;		\
+	__u32 spaceid = (__u32)current->mm->context;	\
+	elf_addr_t pc = (elf_addr_t)new_pc | 3;		\
+	elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1;	\
+							\
+	regs->iasq[0] = spaceid;			\
+	regs->iasq[1] = spaceid;			\
+	regs->iaoq[0] = pc;				\
+	regs->iaoq[1] = pc + 4;                         \
+	regs->sr[2] = LINUX_GATEWAY_SPACE;              \
+	regs->sr[3] = 0xffff;				\
+	regs->sr[4] = spaceid;				\
+	regs->sr[5] = spaceid;				\
+	regs->sr[6] = spaceid;				\
+	regs->sr[7] = spaceid;				\
+	regs->gr[ 0] = USER_PSW | (USER_WIDE_MODE ? PSW_W : 0); \
+	regs->fr[ 0] = 0LL;                            	\
+	regs->fr[ 1] = 0LL;                            	\
+	regs->fr[ 2] = 0LL;                            	\
+	regs->fr[ 3] = 0LL;                            	\
+	regs->gr[30] = (((unsigned long)sp + 63) &~ 63) | (USER_WIDE_MODE ? 1 : 0); \
+	regs->gr[31] = pc;				\
+							\
+	get_user(regs->gr[25], (argv - 1));		\
+	regs->gr[24] = (long) argv;			\
+	regs->gr[23] = 0;				\
+} while(0)
+
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk)	((tsk)->thread.regs.iaoq[0])
+#define KSTK_ESP(tsk)	((tsk)->thread.regs.gr[30])
+
+#define cpu_relax()	barrier()
+
+/*
+ * parisc_requires_coherency() is used to identify the combined VIPT/PIPT
+ * cached CPUs which require a guarantee of coherency (no inequivalent aliases
+ * with different data, whether clean or not) to operate
+ */
+#ifdef CONFIG_PA8X00
+extern int _parisc_requires_coherency;
+#define parisc_requires_coherency()	_parisc_requires_coherency
+#else
+#define parisc_requires_coherency()	(0)
+#endif
+
+extern int running_on_qemu;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/psw.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/psw.h
new file mode 100644
index 0000000..76c3011
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/psw.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_PSW_H
+
+
+#define	PSW_I	0x00000001
+#define	PSW_D	0x00000002
+#define	PSW_P	0x00000004
+#define	PSW_Q	0x00000008
+
+#define	PSW_R	0x00000010
+#define	PSW_F	0x00000020
+#define	PSW_G	0x00000040	/* PA1.x only */
+#define PSW_O	0x00000080	/* PA2.0 only */
+
+/* ssm/rsm instructions number PSW_W and PSW_E differently */
+#define PSW_SM_I	PSW_I	/* Enable External Interrupts */
+#define PSW_SM_D	PSW_D
+#define PSW_SM_P	PSW_P
+#define PSW_SM_Q	PSW_Q	/* Enable Interrupt State Collection */
+#define PSW_SM_R	PSW_R	/* Enable Recover Counter Trap */
+#define PSW_SM_W	0x200	/* PA2.0 only : Enable Wide Mode */
+
+#define PSW_SM_QUIET	PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I
+
+#define PSW_CB	0x0000ff00
+
+#define	PSW_M	0x00010000
+#define	PSW_V	0x00020000
+#define	PSW_C	0x00040000
+#define	PSW_B	0x00080000
+
+#define	PSW_X	0x00100000
+#define	PSW_N	0x00200000
+#define	PSW_L	0x00400000
+#define	PSW_H	0x00800000
+
+#define	PSW_T	0x01000000
+#define	PSW_S	0x02000000
+#define	PSW_E	0x04000000
+#define PSW_W	0x08000000	/* PA2.0 only */
+#define PSW_W_BIT       36      /* PA2.0 only */
+
+#define	PSW_Z	0x40000000	/* PA1.x only */
+#define	PSW_Y	0x80000000	/* PA1.x only */
+
+#ifdef CONFIG_64BIT
+#  define PSW_HI_CB 0x000000ff    /* PA2.0 only */
+#endif
+
+#ifdef CONFIG_64BIT
+#  define USER_PSW_HI_MASK	PSW_HI_CB
+#  define WIDE_PSW		PSW_W
+#else 
+#  define WIDE_PSW		0
+#endif
+
+/* Used when setting up for rfi */
+#define KERNEL_PSW    (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D)
+#define REAL_MODE_PSW (WIDE_PSW | PSW_Q)
+#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
+#define USER_PSW      (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
+
+#ifndef __ASSEMBLY__
+
+/* The program status word as bitfields.  */
+struct pa_psw {
+	unsigned int y:1;
+	unsigned int z:1;
+	unsigned int rv:2;
+	unsigned int w:1;
+	unsigned int e:1;
+	unsigned int s:1;
+	unsigned int t:1;
+
+	unsigned int h:1;
+	unsigned int l:1;
+	unsigned int n:1;
+	unsigned int x:1;
+	unsigned int b:1;
+	unsigned int c:1;
+	unsigned int v:1;
+	unsigned int m:1;
+
+	unsigned int cb:8;
+
+	unsigned int o:1;
+	unsigned int g:1;
+	unsigned int f:1;
+	unsigned int r:1;
+	unsigned int q:1;
+	unsigned int p:1;
+	unsigned int d:1;
+	unsigned int i:1;
+};
+
+#ifdef CONFIG_64BIT
+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
+#else
+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ptrace.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ptrace.h
new file mode 100644
index 0000000..c8f70f9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ptrace.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* written by Philipp Rumpf, Copyright (C) 1999 SuSE GmbH Nuernberg
+** Copyright (C) 2000 Grant Grundler, Hewlett-Packard
+*/
+#ifndef _PARISC_PTRACE_H
+#define _PARISC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+
+#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
+
+#define arch_has_single_step()	1
+#define arch_has_block_step()	1
+
+/* XXX should we use iaoq[1] or iaoq[0] ? */
+#define user_mode(regs)			(((regs)->iaoq[0] & 3) ? 1 : 0)
+#define user_space(regs)		(((regs)->iasq[1] != 0) ? 1 : 0)
+#define instruction_pointer(regs)	((regs)->iaoq[0] & ~3)
+#define user_stack_pointer(regs)	((regs)->gr[30])
+unsigned long profile_pc(struct pt_regs *);
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+	return regs->gr[28];
+}
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ropes.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ropes.h
new file mode 100644
index 0000000..8e51c77
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ropes.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_ROPES_H_
+#define _ASM_PARISC_ROPES_H_
+
+#include <asm/parisc-device.h>
+
+#ifdef CONFIG_64BIT
+/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
+#define ZX1_SUPPORT
+#endif
+
+#ifdef CONFIG_PROC_FS
+/* depends on proc fs support. But costs CPU performance */
+#undef SBA_COLLECT_STATS
+#endif
+
+/*
+** The number of pdir entries to "free" before issuing
+** a read to PCOM register to flush out PCOM writes.
+** Interacts with allocation granularity (ie 4 or 8 entries
+** allocated and free'd/purged at a time might make this
+** less interesting).
+*/
+#define DELAYED_RESOURCE_CNT	16
+
+#define MAX_IOC		2	/* per Ike. Pluto/Astro only have 1. */
+#define ROPES_PER_IOC	8	/* per Ike half or Pluto/Astro */
+
+struct ioc {
+	void __iomem	*ioc_hpa;	/* I/O MMU base address */
+	char		*res_map;	/* resource map, bit == pdir entry */
+	u64		*pdir_base;	/* physical base address */
+	unsigned long	ibase;		/* pdir IOV Space base - shared w/lba_pci */
+	unsigned long	imask;		/* pdir IOV Space mask - shared w/lba_pci */
+#ifdef ZX1_SUPPORT
+	unsigned long	iovp_mask;	/* help convert IOVA to IOVP */
+#endif
+	unsigned long	*res_hint;	/* next avail IOVP - circular search */
+	spinlock_t	res_lock;
+	unsigned int	res_bitshift;	/* from the LEFT! */
+	unsigned int	res_size;	/* size of resource map in bytes */
+#ifdef SBA_HINT_SUPPORT
+/* FIXME : DMA HINTs not used */
+	unsigned long	hint_mask_pdir; /* bits used for DMA hints */
+	unsigned int	hint_shift_pdir;
+#endif
+#if DELAYED_RESOURCE_CNT > 0
+	int		saved_cnt;
+	struct sba_dma_pair {
+			dma_addr_t	iova;
+			size_t		size;
+        } saved[DELAYED_RESOURCE_CNT];
+#endif
+
+#ifdef SBA_COLLECT_STATS
+#define SBA_SEARCH_SAMPLE	0x100
+	unsigned long	avg_search[SBA_SEARCH_SAMPLE];
+	unsigned long	avg_idx;	/* current index into avg_search */
+	unsigned long	used_pages;
+	unsigned long	msingle_calls;
+	unsigned long	msingle_pages;
+	unsigned long	msg_calls;
+	unsigned long	msg_pages;
+	unsigned long	usingle_calls;
+	unsigned long	usingle_pages;
+	unsigned long	usg_calls;
+	unsigned long	usg_pages;
+#endif
+        /* STUFF We don't need in performance path */
+	unsigned int	pdir_size;	/* in bytes, determined by IOV Space size */
+};
+
+struct sba_device {
+	struct sba_device	*next;  /* list of SBA's in system */
+	struct parisc_device	*dev;   /* dev found in bus walk */
+	const char		*name;
+	void __iomem		*sba_hpa; /* base address */
+	spinlock_t		sba_lock;
+	unsigned int		flags;  /* state/functionality enabled */
+	unsigned int		hw_rev;  /* HW revision of chip */
+
+	struct resource		chip_resv; /* MMIO reserved for chip */
+	struct resource		iommu_resv; /* MMIO reserved for iommu */
+
+	unsigned int		num_ioc;  /* number of on-board IOC's */
+	struct ioc		ioc[MAX_IOC];
+};
+
+#define ASTRO_RUNWAY_PORT	0x582
+#define IKE_MERCED_PORT		0x803
+#define REO_MERCED_PORT		0x804
+#define REOG_MERCED_PORT	0x805
+#define PLUTO_MCKINLEY_PORT	0x880
+
+static inline int IS_ASTRO(struct parisc_device *d) {
+	return d->id.hversion == ASTRO_RUNWAY_PORT;
+}
+
+static inline int IS_IKE(struct parisc_device *d) {
+	return d->id.hversion == IKE_MERCED_PORT;
+}
+
+static inline int IS_PLUTO(struct parisc_device *d) {
+	return d->id.hversion == PLUTO_MCKINLEY_PORT;
+}
+
+#define PLUTO_IOVA_BASE	(1UL*1024*1024*1024)	/* 1GB */
+#define PLUTO_IOVA_SIZE	(1UL*1024*1024*1024)	/* 1GB */
+#define PLUTO_GART_SIZE	(PLUTO_IOVA_SIZE / 2)
+
+#define SBA_PDIR_VALID_BIT	0x8000000000000000ULL
+
+#define SBA_AGPGART_COOKIE	0x0000badbadc0ffeeULL
+
+#define SBA_FUNC_ID	0x0000	/* function id */
+#define SBA_FCLASS	0x0008	/* function class, bist, header, rev... */
+
+#define SBA_FUNC_SIZE 4096   /* SBA configuration function reg set */
+
+#define ASTRO_IOC_OFFSET	(32 * SBA_FUNC_SIZE)
+#define PLUTO_IOC_OFFSET	(1 * SBA_FUNC_SIZE)
+/* Ike's IOC's occupy functions 2 and 3 */
+#define IKE_IOC_OFFSET(p)	((p+2) * SBA_FUNC_SIZE)
+
+#define IOC_CTRL          0x8	/* IOC_CTRL offset */
+#define IOC_CTRL_TC       (1 << 0) /* TOC Enable */
+#define IOC_CTRL_CE       (1 << 1) /* Coalesce Enable */
+#define IOC_CTRL_DE       (1 << 2) /* Dillon Enable */
+#define IOC_CTRL_RM       (1 << 8) /* Real Mode */
+#define IOC_CTRL_NC       (1 << 9) /* Non Coherent Mode */
+#define IOC_CTRL_D4       (1 << 11) /* Disable 4-byte coalescing */
+#define IOC_CTRL_DD       (1 << 13) /* Disable distr. LMMIO range coalescing */
+
+/*
+** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
+** Firmware programs this stuff. Don't touch it.
+*/
+#define LMMIO_DIRECT0_BASE  0x300
+#define LMMIO_DIRECT0_MASK  0x308
+#define LMMIO_DIRECT0_ROUTE 0x310
+
+#define LMMIO_DIST_BASE  0x360
+#define LMMIO_DIST_MASK  0x368
+#define LMMIO_DIST_ROUTE 0x370
+
+#define IOS_DIST_BASE	0x390
+#define IOS_DIST_MASK	0x398
+#define IOS_DIST_ROUTE	0x3A0
+
+#define IOS_DIRECT_BASE	0x3C0
+#define IOS_DIRECT_MASK	0x3C8
+#define IOS_DIRECT_ROUTE 0x3D0
+
+/*
+** Offsets into I/O TLB (Function 2 and 3 on Ike)
+*/
+#define ROPE0_CTL	0x200  /* "regbus pci0" */
+#define ROPE1_CTL	0x208
+#define ROPE2_CTL	0x210
+#define ROPE3_CTL	0x218
+#define ROPE4_CTL	0x220
+#define ROPE5_CTL	0x228
+#define ROPE6_CTL	0x230
+#define ROPE7_CTL	0x238
+
+#define IOC_ROPE0_CFG	0x500	/* pluto only */
+#define   IOC_ROPE_AO	  0x10	/* Allow "Relaxed Ordering" */
+
+#define HF_ENABLE	0x40
+
+#define IOC_IBASE	0x300	/* IO TLB */
+#define IOC_IMASK	0x308
+#define IOC_PCOM	0x310
+#define IOC_TCNFG	0x318
+#define IOC_PDIR_BASE	0x320
+
+/*
+** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
+** It's safer (avoid memory corruption) to keep DMA page mappings
+** equivalently sized to VM PAGE_SIZE.
+**
+** We really can't avoid generating a new mapping for each
+** page since the Virtual Coherence Index has to be generated
+** and updated for each page.
+**
+** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
+*/
+#define IOVP_SIZE	PAGE_SIZE
+#define IOVP_SHIFT	PAGE_SHIFT
+#define IOVP_MASK	PAGE_MASK
+
+#define SBA_PERF_CFG	0x708	/* Performance Counter stuff */
+#define SBA_PERF_MASK1	0x718
+#define SBA_PERF_MASK2	0x730
+
+/*
+** Offsets into PCI Performance Counters (functions 12 and 13)
+** Controlled by PERF registers in function 2 & 3 respectively.
+*/
+#define SBA_PERF_CNT1	0x200
+#define SBA_PERF_CNT2	0x208
+#define SBA_PERF_CNT3	0x210
+
+/*
+** lba_device: Per instance Elroy data structure
+*/
+struct lba_device {
+	struct pci_hba_data	hba;
+
+	spinlock_t		lba_lock;
+	void			*iosapic_obj;
+
+#ifdef CONFIG_64BIT
+	void __iomem		*iop_base;	/* PA_VIEW - for IO port accessor funcs */
+#endif
+
+	int			flags;		/* state/functionality enabled */
+	int			hw_rev;		/* HW revision of chip */
+};
+
+#define ELROY_HVERS		0x782
+#define MERCURY_HVERS		0x783
+#define QUICKSILVER_HVERS	0x784
+
+static inline int IS_ELROY(struct parisc_device *d) {
+	return (d->id.hversion == ELROY_HVERS);
+}
+
+static inline int IS_MERCURY(struct parisc_device *d) {
+	return (d->id.hversion == MERCURY_HVERS);
+}
+
+static inline int IS_QUICKSILVER(struct parisc_device *d) {
+	return (d->id.hversion == QUICKSILVER_HVERS);
+}
+
+static inline int agp_mode_mercury(void __iomem *hpa) {
+	u64 bus_mode;
+
+	bus_mode = readl(hpa + 0x0620);
+	if (bus_mode & 1)
+		return 1;
+
+	return 0;
+}
+
+/*
+** I/O SAPIC init function
+** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
+** Call setup as part of per instance initialization.
+** (ie *not* init_module() function unless only one is present.)
+** fixup_irq is to initialize PCI IRQ line support and
+** virtualize pcidev->irq value. To be called by pci_fixup_bus().
+*/
+extern void *iosapic_register(unsigned long hpa);
+extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
+
+#define LBA_FUNC_ID	0x0000	/* function id */
+#define LBA_FCLASS	0x0008	/* function class, bist, header, rev... */
+#define LBA_CAPABLE	0x0030	/* capabilities register */
+
+#define LBA_PCI_CFG_ADDR	0x0040	/* poke CFG address here */
+#define LBA_PCI_CFG_DATA	0x0048	/* read or write data here */
+
+#define LBA_PMC_MTLT	0x0050	/* Firmware sets this - read only. */
+#define LBA_FW_SCRATCH	0x0058	/* Firmware writes the PCI bus number here. */
+#define LBA_ERROR_ADDR	0x0070	/* On error, address gets logged here */
+
+#define LBA_ARB_MASK	0x0080	/* bit 0 enable arbitration. PAT/PDC enables */
+#define LBA_ARB_PRI	0x0088	/* firmware sets this. */
+#define LBA_ARB_MODE	0x0090	/* firmware sets this. */
+#define LBA_ARB_MTLT	0x0098	/* firmware sets this. */
+
+#define LBA_MOD_ID	0x0100	/* Module ID. PDC_PAT_CELL reports 4 */
+
+#define LBA_STAT_CTL	0x0108	/* Status & Control */
+#define   LBA_BUS_RESET		0x01	/*  Deassert PCI Bus Reset Signal */
+#define   CLEAR_ERRLOG		0x10	/*  "Clear Error Log" cmd */
+#define   CLEAR_ERRLOG_ENABLE	0x20	/*  "Clear Error Log" Enable */
+#define   HF_ENABLE	0x40	/*    enable HF mode (default is -1 mode) */
+
+#define LBA_LMMIO_BASE	0x0200	/* < 4GB I/O address range */
+#define LBA_LMMIO_MASK	0x0208
+
+#define LBA_GMMIO_BASE	0x0210	/* > 4GB I/O address range */
+#define LBA_GMMIO_MASK	0x0218
+
+#define LBA_WLMMIO_BASE	0x0220	/* All < 4GB ranges under the same *SBA* */
+#define LBA_WLMMIO_MASK	0x0228
+
+#define LBA_WGMMIO_BASE	0x0230	/* All > 4GB ranges under the same *SBA* */
+#define LBA_WGMMIO_MASK	0x0238
+
+#define LBA_IOS_BASE	0x0240	/* I/O port space for this LBA */
+#define LBA_IOS_MASK	0x0248
+
+#define LBA_ELMMIO_BASE	0x0250	/* Extra LMMIO range */
+#define LBA_ELMMIO_MASK	0x0258
+
+#define LBA_EIOS_BASE	0x0260	/* Extra I/O port space */
+#define LBA_EIOS_MASK	0x0268
+
+#define LBA_GLOBAL_MASK	0x0270	/* Mercury only: Global Address Mask */
+#define LBA_DMA_CTL	0x0278	/* firmware sets this */
+
+#define LBA_IBASE	0x0300	/* SBA DMA support */
+#define LBA_IMASK	0x0308
+
+/* FIXME: ignore DMA Hint stuff until we can measure performance */
+#define LBA_HINT_CFG	0x0310
+#define LBA_HINT_BASE	0x0380	/* 14 registers at every 8 bytes. */
+
+#define LBA_BUS_MODE	0x0620
+
+/* ERROR regs are needed for config cycle kluges */
+#define LBA_ERROR_CONFIG 0x0680
+#define     LBA_SMART_MODE 0x20
+#define LBA_ERROR_STATUS 0x0688
+#define LBA_ROPE_CTL     0x06A0
+
+#define LBA_IOSAPIC_BASE	0x800 /* Offset of IRQ logic */
+
+#endif /*_ASM_PARISC_ROPES_H_*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/rt_sigframe.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/rt_sigframe.h
new file mode 100644
index 0000000..2b3010a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/rt_sigframe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_RT_SIGFRAME_H
+#define _ASM_PARISC_RT_SIGFRAME_H
+
+#define SIGRETURN_TRAMP 4
+#define SIGRESTARTBLOCK_TRAMP 5 
+#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
+
+struct rt_sigframe {
+	/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c 
+	        Secondary to that it must protect the ERESTART_RESTARTBLOCK
+		trampoline we left on the stack (we were bad and didn't 
+		change sp so we could run really fast.) */
+	unsigned int tramp[TRAMP_SIZE];
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+#define	SIGFRAME		128
+#define FUNCTIONCALLFRAME	96
+#define PARISC_RT_SIGFRAME_SIZE					\
+	(((sizeof(struct rt_sigframe) + FUNCTIONCALLFRAME) + SIGFRAME) & -SIGFRAME)
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/runway.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/runway.h
new file mode 100644
index 0000000..f3cfe69
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/runway.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PARISC_RUNWAY_H
+#define ASM_PARISC_RUNWAY_H
+#ifdef __KERNEL__
+
+/* declared in arch/parisc/kernel/setup.c */
+extern struct proc_dir_entry * proc_runway_root;
+
+#define RUNWAY_STATUS	0x10
+#define RUNWAY_DEBUG	0x40
+
+#endif /* __KERNEL__ */
+#endif /* ASM_PARISC_RUNWAY_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/sections.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/sections.h
new file mode 100644
index 0000000..accdf40
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/sections.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_SECTIONS_H
+#define _PARISC_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#ifdef CONFIG_64BIT
+#undef dereference_function_descriptor
+void *dereference_function_descriptor(void *);
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/serial.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/serial.h
new file mode 100644
index 0000000..77e9b67
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/serial.h
@@ -0,0 +1,8 @@
+/*
+ * include/asm-parisc/serial.h
+ */
+
+/*
+ * This is used for 16550-compatible UARTs
+ */
+#define BASE_BAUD ( 1843200 / 16 )
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/shmparam.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/shmparam.h
new file mode 100644
index 0000000..74f74e4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/shmparam.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASMPARISC_SHMPARAM_H
+#define _ASMPARISC_SHMPARAM_H
+
+#define SHMLBA	   PAGE_SIZE	/* attach addr a multiple of this */
+#define SHM_COLOUR 0x00400000	/* shared mappings colouring */
+
+#endif /* _ASMPARISC_SHMPARAM_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/signal.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/signal.h
new file mode 100644
index 0000000..eeb5c88
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/signal.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_SIGNAL_H
+#define _ASM_PARISC_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+#define _NSIG		64
+/* bits-per-word, where word apparently means 'long' not 'int' */
+#define _NSIG_BPW	BITS_PER_LONG
+#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+
+# ifndef __ASSEMBLY__
+
+/* Most things should be clean enough to redefine this at will, if care
+   is taken to make libc match.  */
+
+typedef unsigned long old_sigset_t;		/* at least 32 bits */
+
+typedef struct {
+	/* next_signal() assumes this is a long - no choice */
+	unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#ifndef __KERNEL__
+struct sigaction {
+	__sighandler_t sa_handler;
+	unsigned long sa_flags;
+	sigset_t sa_mask;		/* mask last for extensibility */
+};
+#endif
+
+#include <asm/sigcontext.h>
+
+#endif /* !__ASSEMBLY */
+#endif /* _ASM_PARISC_SIGNAL_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/smp.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/smp.h
new file mode 100644
index 0000000..b9a18db
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/smp.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+extern int init_per_cpu(int cpuid);
+
+#if defined(CONFIG_SMP)
+
+/* Page Zero Location PDC will look for the address to branch to when we poke
+** slave CPUs still in "Icache loop".
+*/
+#define PDC_OS_BOOT_RENDEZVOUS     0x10
+#define PDC_OS_BOOT_RENDEZVOUS_HI  0x28
+
+#ifndef ASSEMBLY
+#include <linux/bitops.h>
+#include <linux/threads.h>	/* for NR_CPUS */
+#include <linux/cpumask.h>
+typedef unsigned long address_t;
+
+
+/*
+ *	Private routines/data
+ *
+ *	physical and logical are equivalent until we support CPU hotplug.
+ */
+#define cpu_number_map(cpu)	(cpu)
+#define cpu_logical_map(cpu)	(cpu)
+
+extern void smp_send_all_nop(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#endif /* !ASSEMBLY */
+
+#define raw_smp_processor_id()	(current_thread_info()->cpu)
+
+#else /* CONFIG_SMP */
+
+static inline void smp_send_all_nop(void) { return; }
+
+#endif
+
+#define NO_PROC_ID		0xFF		/* No processor magic marker */
+#define ANY_PROC_ID		0xFF		/* Any processor magic marker */
+static inline int __cpu_disable (void) {
+  return 0;
+}
+static inline void __cpu_die (unsigned int cpu) {
+  while(1)
+    ;
+}
+
+#endif /*  __ASM_SMP_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/socket.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/socket.h
new file mode 100644
index 0000000..79feff1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/socket.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK	0x40000000
+
+#endif /* _ASM_SOCKET_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/special_insns.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/special_insns.h
new file mode 100644
index 0000000..3d4dd68
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/special_insns.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_SPECIAL_INSNS_H
+#define __PARISC_SPECIAL_INSNS_H
+
+#define mfctl(reg)	({		\
+	unsigned long cr;		\
+	__asm__ __volatile__(		\
+		"mfctl " #reg ",%0" :	\
+		 "=r" (cr)		\
+	);				\
+	cr;				\
+})
+
+#define mtctl(gr, cr) \
+	__asm__ __volatile__("mtctl %0,%1" \
+		: /* no outputs */ \
+		: "r" (gr), "i" (cr) : "memory")
+
+/* these are here to de-mystefy the calling code, and to provide hooks */
+/* which I needed for debugging EIEM problems -PB */
+#define get_eiem() mfctl(15)
+static inline void set_eiem(unsigned long val)
+{
+	mtctl(val, 15);
+}
+
+#define mfsp(reg)	({		\
+	unsigned long cr;		\
+	__asm__ __volatile__(		\
+		"mfsp " #reg ",%0" :	\
+		 "=r" (cr)		\
+	);				\
+	cr;				\
+})
+
+#define mtsp(val, cr) \
+	{ if (__builtin_constant_p(val) && ((val) == 0)) \
+	 __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
+	else \
+	 __asm__ __volatile__("mtsp %0,%1" \
+		: /* no outputs */ \
+		: "r" (val), "i" (cr) : "memory"); }
+
+#endif /* __PARISC_SPECIAL_INSNS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock.h
new file mode 100644
index 0000000..a827765
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/ldcw.h>
+#include <asm/processor.h>
+#include <asm/spinlock_types.h>
+
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
+{
+	volatile unsigned int *a = __ldcw_align(x);
+	return *a == 0;
+}
+
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
+					 unsigned long flags)
+{
+	volatile unsigned int *a;
+
+	a = __ldcw_align(x);
+	while (__ldcw(a) == 0)
+		while (*a == 0)
+			if (flags & PSW_SM_I) {
+				local_irq_enable();
+				cpu_relax();
+				local_irq_disable();
+			} else
+				cpu_relax();
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *x)
+{
+	volatile unsigned int *a;
+
+	a = __ldcw_align(x);
+	mb();
+	*a = 1;
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *x)
+{
+	volatile unsigned int *a;
+	int ret;
+
+	a = __ldcw_align(x);
+        ret = __ldcw(a) != 0;
+
+	return ret;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Linux rwlocks are unfair to writers; they can be starved for an indefinite
+ * time by readers.  With care, they can also be taken in interrupt context.
+ *
+ * In the PA-RISC implementation, we have a spinlock and a counter.
+ * Readers use the lock to serialise their access to the counter (which
+ * records how many readers currently hold the lock).
+ * Writers hold the spinlock, preventing any readers or other writers from
+ * grabbing the rwlock.
+ */
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	arch_spin_lock_flags(&rw->lock, flags);
+	rw->counter++;
+	arch_spin_unlock(&rw->lock);
+	local_irq_restore(flags);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	arch_spin_lock_flags(&rw->lock, flags);
+	rw->counter--;
+	arch_spin_unlock(&rw->lock);
+	local_irq_restore(flags);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
+{
+	unsigned long flags;
+ retry:
+	local_irq_save(flags);
+	if (arch_spin_trylock(&rw->lock)) {
+		rw->counter++;
+		arch_spin_unlock(&rw->lock);
+		local_irq_restore(flags);
+		return 1;
+	}
+
+	local_irq_restore(flags);
+	/* If write-locked, we fail to acquire the lock */
+	if (rw->counter < 0)
+		return 0;
+
+	/* Wait until we have a realistic chance at the lock */
+	while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
+		cpu_relax();
+
+	goto retry;
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
+{
+	unsigned long flags;
+retry:
+	local_irq_save(flags);
+	arch_spin_lock_flags(&rw->lock, flags);
+
+	if (rw->counter != 0) {
+		arch_spin_unlock(&rw->lock);
+		local_irq_restore(flags);
+
+		while (rw->counter != 0)
+			cpu_relax();
+
+		goto retry;
+	}
+
+	rw->counter = -1; /* mark as write-locked */
+	mb();
+	local_irq_restore(flags);
+}
+
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
+{
+	rw->counter = 0;
+	arch_spin_unlock(&rw->lock);
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
+{
+	unsigned long flags;
+	int result = 0;
+
+	local_irq_save(flags);
+	if (arch_spin_trylock(&rw->lock)) {
+		if (rw->counter == 0) {
+			rw->counter = -1;
+			result = 1;
+		} else {
+			/* Read-locked.  Oh well. */
+			arch_spin_unlock(&rw->lock);
+		}
+	}
+	local_irq_restore(flags);
+
+	return result;
+}
+
+/*
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
+{
+	return rw->counter >= 0;
+}
+
+/*
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
+{
+	return !rw->counter;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock_types.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..42979c5
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/spinlock_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+#ifdef CONFIG_PA20
+	volatile unsigned int slock;
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+#else
+	volatile unsigned int lock[4];
+# define __ARCH_SPIN_LOCK_UNLOCKED	{ { 1, 1, 1, 1 } }
+#endif
+} arch_spinlock_t;
+
+typedef struct {
+	arch_spinlock_t lock;
+	volatile int counter;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/string.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/string.h
new file mode 100644
index 0000000..f6e1132
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/string.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PA_STRING_H_
+#define _PA_STRING_H_
+
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, size_t);
+
+#define __HAVE_ARCH_MEMCPY
+void * memcpy(void * dest,const void *src,size_t count);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/superio.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/superio.h
new file mode 100644
index 0000000..5e11c11
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/superio.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_SUPERIO_H
+#define _PARISC_SUPERIO_H
+
+#define IC_PIC1    0x20		/* PCI I/O address of master 8259 */
+#define IC_PIC2    0xA0		/* PCI I/O address of slave */
+
+/* Config Space Offsets to configuration and base address registers */
+#define SIO_CR     0x5A		/* Configuration Register */
+#define SIO_ACPIBAR 0x88	/* ACPI BAR */
+#define SIO_FDCBAR 0x90		/* Floppy Disk Controller BAR */
+#define SIO_SP1BAR 0x94		/* Serial 1 BAR */
+#define SIO_SP2BAR 0x98		/* Serial 2 BAR */
+#define SIO_PPBAR  0x9C		/* Parallel BAR */
+
+#define TRIGGER_1  0x67		/* Edge/level trigger register 1 */
+#define TRIGGER_2  0x68		/* Edge/level trigger register 2 */
+
+/* Interrupt Routing Control registers */
+#define CFG_IR_SER    0x69	/* Serial 1 [0:3] and Serial 2 [4:7] */
+#define CFG_IR_PFD    0x6a	/* Parallel [0:3] and Floppy [4:7] */
+#define CFG_IR_IDE    0x6b	/* IDE1     [0:3] and IDE2 [4:7] */
+#define CFG_IR_INTAB  0x6c	/* PCI INTA [0:3] and INT B [4:7] */
+#define CFG_IR_INTCD  0x6d	/* PCI INTC [0:3] and INT D [4:7] */
+#define CFG_IR_PS2    0x6e	/* PS/2 KBINT [0:3] and Mouse [4:7] */
+#define CFG_IR_FXBUS  0x6f	/* FXIRQ[0] [0:3] and FXIRQ[1] [4:7] */
+#define CFG_IR_USB    0x70	/* FXIRQ[2] [0:3] and USB [4:7] */
+#define CFG_IR_ACPI   0x71	/* ACPI SCI [0:3] and reserved [4:7] */
+
+#define CFG_IR_LOW     CFG_IR_SER	/* Lowest interrupt routing reg */
+#define CFG_IR_HIGH    CFG_IR_ACPI	/* Highest interrupt routing reg */
+
+/* 8259 operational control words */
+#define OCW2_EOI   0x20		/* Non-specific EOI */
+#define OCW2_SEOI  0x60		/* Specific EOI */
+#define OCW3_IIR   0x0A		/* Read request register */
+#define OCW3_ISR   0x0B		/* Read service register */
+#define OCW3_POLL  0x0C		/* Poll the PIC for an interrupt vector */
+
+/* Interrupt lines. Only PIC1 is used */
+#define USB_IRQ    1		/* USB */
+#define SP1_IRQ    3		/* Serial port 1 */
+#define SP2_IRQ    4		/* Serial port 2 */
+#define PAR_IRQ    5		/* Parallel port */
+#define FDC_IRQ    6		/* Floppy controller */
+#define IDE_IRQ    7		/* IDE (pri+sec) */
+
+/* ACPI registers */
+#define USB_REG_CR	0x1f	/* USB Regulator Control Register */
+
+#define SUPERIO_NIRQS   8
+
+struct superio_device {
+	u32 fdc_base;
+	u32 sp1_base;
+	u32 sp2_base;
+	u32 pp_base;
+	u32 acpi_base;
+	int suckyio_irq_enabled;
+	struct pci_dev *lio_pdev;       /* pci device for legacy IO (fn 1) */
+	struct pci_dev *usb_pdev;       /* pci device for USB (fn 2) */
+};
+
+/*
+ * Does NS make a 87415 based plug in PCI card? If so, because of this
+ * macro we currently don't support it being plugged into a machine
+ * that contains a SuperIO chip AND has CONFIG_SUPERIO enabled.
+ *
+ * This could be fixed by checking to see if function 1 exists, and
+ * if it is SuperIO Legacy IO; but really now, is this combination
+ * going to EVER happen?
+ */
+
+#define SUPERIO_IDE_FN 0 /* Function number of IDE controller */
+#define SUPERIO_LIO_FN 1 /* Function number of Legacy IO controller */
+#define SUPERIO_USB_FN 2 /* Function number of USB controller */
+
+#define is_superio_device(x) \
+	(((x)->vendor == PCI_VENDOR_ID_NS) && \
+	(  ((x)->device == PCI_DEVICE_ID_NS_87415) \
+	|| ((x)->device == PCI_DEVICE_ID_NS_87560_LIO) \
+	|| ((x)->device == PCI_DEVICE_ID_NS_87560_USB) ) )
+
+extern int superio_fixup_irq(struct pci_dev *pcidev); /* called by iosapic */
+
+#endif /* _PARISC_SUPERIO_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/switch_to.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/switch_to.h
new file mode 100644
index 0000000..f2ac9cc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/switch_to.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_SWITCH_TO_H
+#define __PARISC_SWITCH_TO_H
+
+struct task_struct;
+
+extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);
+
+#define switch_to(prev, next, last) do {			\
+	(last) = _switch_to(prev, next);			\
+} while(0)
+
+#endif /* __PARISC_SWITCH_TO_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/syscall.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/syscall.h
new file mode 100644
index 0000000..8bff1a5
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* syscall.h */
+
+#ifndef _ASM_PARISC_SYSCALL_H_
+#define _ASM_PARISC_SYSCALL_H_
+
+#include <uapi/linux/audit.h>
+#include <linux/compat.h>
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+#define NR_syscalls (__NR_Linux_syscalls)
+
+static inline long syscall_get_nr(struct task_struct *tsk,
+				  struct pt_regs *regs)
+{
+	return regs->gr[20];
+}
+
+static inline void syscall_get_arguments(struct task_struct *tsk,
+					 struct pt_regs *regs, unsigned int i,
+					 unsigned int n, unsigned long *args)
+{
+	BUG_ON(i);
+
+	switch (n) {
+	case 6:
+		args[5] = regs->gr[21];
+	case 5:
+		args[4] = regs->gr[22];
+	case 4:
+		args[3] = regs->gr[23];
+	case 3:
+		args[2] = regs->gr[24];
+	case 2:
+		args[1] = regs->gr[25];
+	case 1:
+		args[0] = regs->gr[26];
+	case 0:
+		break;
+	default:
+		BUG();
+	}
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+						struct pt_regs *regs)
+{
+	return regs->gr[28];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	regs->gr[28] = error ? error : val;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	/* do nothing */
+}
+
+static inline int syscall_get_arch(void)
+{
+	int arch = AUDIT_ARCH_PARISC;
+#ifdef CONFIG_64BIT
+	if (!is_compat_task())
+		arch = AUDIT_ARCH_PARISC64;
+#endif
+	return arch;
+}
+#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/termios.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/termios.h
new file mode 100644
index 0000000..cded9dc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/termios.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_TERMIOS_H
+#define _PARISC_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+
+/*	intr=^C		quit=^\		erase=del	kill=^U
+	eof=^D		vtime=\0	vmin=\1		sxtc=\0
+	start=^Q	stop=^S		susp=^Z		eol=\0
+	reprint=^R	discard=^U	werase=^W	lnext=^V
+	eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+	unsigned short __tmp; \
+	get_user(__tmp,&(termio)->x); \
+	*(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+	put_user((termios)->c_iflag, &(termio)->c_iflag); \
+	put_user((termios)->c_oflag, &(termio)->c_oflag); \
+	put_user((termios)->c_cflag, &(termio)->c_cflag); \
+	put_user((termios)->c_lflag, &(termio)->c_lflag); \
+	put_user((termios)->c_line,  &(termio)->c_line); \
+	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif	/* _PARISC_TERMIOS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/thread_info.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/thread_info.h
new file mode 100644
index 0000000..598c8d6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/thread_info.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_THREAD_INFO_H
+#define _ASM_PARISC_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/special_insns.h>
+
+struct thread_info {
+	struct task_struct *task;	/* main task structure */
+	unsigned long flags;		/* thread_info flags (see TIF_*) */
+	mm_segment_t addr_limit;	/* user-level address space limit */
+	__u32 cpu;			/* current CPU */
+	int preempt_count;		/* 0=premptable, <0=BUG; will also serve as bh-counter */
+};
+
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.task		= &tsk,			\
+	.flags		= 0,			\
+	.cpu		= 0,			\
+	.addr_limit	= KERNEL_DS,		\
+	.preempt_count	= INIT_PREEMPT_COUNT,	\
+}
+
+#define init_thread_info        (init_thread_union.thread_info)
+#define init_stack              (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+#define current_thread_info()	((struct thread_info *)mfctl(30))
+
+#endif /* !__ASSEMBLY */
+
+/* thread information allocation */
+
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER	2 /* PA-RISC requires at least 16k stack */
+#else
+#define THREAD_SIZE_ORDER	3 /* PA-RISC requires at least 32k stack */
+#endif
+
+/* Be sure to hunt all references to this down when you change the size of
+ * the kernel stack */
+#define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT            (PAGE_SHIFT + THREAD_SIZE_ORDER)
+
+/*
+ * thread information flags
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_SIGPENDING		1	/* signal pending */
+#define TIF_NEED_RESCHED	2	/* rescheduling necessary */
+#define TIF_POLLING_NRFLAG	3	/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_32BIT               4       /* 32 bit binary */
+#define TIF_MEMDIE		5	/* is terminating due to OOM killer */
+#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
+#define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
+#define TIF_SINGLESTEP		9	/* single stepping? */
+#define TIF_BLOCKSTEP		10	/* branch stepping? */
+#define TIF_SECCOMP		11	/* secure computing */
+#define TIF_SYSCALL_TRACEPOINT	12	/* syscall tracepoint instrumentation */
+
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
+#define _TIF_32BIT		(1 << TIF_32BIT)
+#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
+#define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
+#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
+
+#define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
+                                 _TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |	\
+				 _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
+				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+
+#ifdef CONFIG_64BIT
+# ifdef CONFIG_COMPAT
+#  define is_32bit_task()	(test_thread_flag(TIF_32BIT))
+# else
+#  define is_32bit_task()	(0)
+# endif
+#else
+# define is_32bit_task()	(1)
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/timex.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/timex.h
new file mode 100644
index 0000000..45537cd
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/timex.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/include/asm-parisc/timex.h
+ *
+ * PARISC architecture timex specifications
+ */
+#ifndef _ASMPARISC_TIMEX_H
+#define _ASMPARISC_TIMEX_H
+
+
+#define CLOCK_TICK_RATE	1193180 /* Underlying HZ */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles (void)
+{
+	return mfctl(16);
+}
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/tlb.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/tlb.h
new file mode 100644
index 0000000..0c881e7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/tlb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_TLB_H
+#define _PARISC_TLB_H
+
+#define tlb_flush(tlb)			\
+do {	if ((tlb)->fullmm)		\
+		flush_tlb_mm((tlb)->mm);\
+} while (0)
+
+#define tlb_start_vma(tlb, vma) \
+do {	if (!(tlb)->fullmm)	\
+		flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
+
+#define tlb_end_vma(tlb, vma)	\
+do {	if (!(tlb)->fullmm)	\
+		flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) \
+	do { } while (0)
+
+#include <asm-generic/tlb.h>
+
+#define __pmd_free_tlb(tlb, pmd, addr)	pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, pte)
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/tlbflush.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/tlbflush.h
new file mode 100644
index 0000000..14668bd
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/tlbflush.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_TLBFLUSH_H
+#define _PARISC_TLBFLUSH_H
+
+/* TLB flushing routines.... */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/mmu_context.h>
+
+
+/* This is for the serialisation of PxTLB broadcasts.  At least on the
+ * N class systems, only one PxTLB inter processor broadcast can be
+ * active at any one time on the Merced bus.  This tlb purge
+ * synchronisation is fairly lightweight and harmless so we activate
+ * it on all systems not just the N class.
+
+ * It is also used to ensure PTE updates are atomic and consistent
+ * with the TLB.
+ */
+extern spinlock_t pa_tlb_lock;
+
+#define purge_tlb_start(flags)	spin_lock_irqsave(&pa_tlb_lock, flags)
+#define purge_tlb_end(flags)	spin_unlock_irqrestore(&pa_tlb_lock, flags)
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_all_local(void *);
+
+#define smp_flush_tlb_all()	flush_tlb_all()
+
+int __flush_tlb_range(unsigned long sid,
+	unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma, start, end) \
+	__flush_tlb_range((vma)->vm_mm->context, start, end)
+
+#define flush_tlb_kernel_range(start, end) \
+	__flush_tlb_range(0, start, end)
+
+/*
+ * flush_tlb_mm()
+ *
+ * The code to switch to a new context is NOT valid for processes
+ * which play with the space id's.  Thus, we have to preserve the
+ * space and just flush the entire tlb.  However, the compilers,
+ * dynamic linker, etc, do not manipulate space id's, so there
+ * could be a significant performance benefit in switching contexts
+ * and not flushing the whole tlb.
+ */
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	BUG_ON(mm == &init_mm); /* Should never happen */
+
+#if 1 || defined(CONFIG_SMP)
+	/* Except for very small threads, flushing the whole TLB is
+	 * faster than using __flush_tlb_range.  The pdtlb and pitlb
+	 * instructions are very slow because of the TLB broadcast.
+	 * It might be faster to do local range flushes on all CPUs
+	 * on PA 2.0 systems.
+	 */
+	flush_tlb_all();
+#else
+	/* FIXME: currently broken, causing space id and protection ids
+	 * to go out of sync, resulting in faults on userspace accesses.
+	 * This approach needs further investigation since running many
+	 * small applications (e.g., GCC testsuite) is faster on HP-UX.
+	 */
+	if (mm) {
+		if (mm->context != 0)
+			free_sid(mm->context);
+		mm->context = alloc_sid();
+		if (mm == current->active_mm)
+			load_context(mm->context);
+	}
+#endif
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+	unsigned long addr)
+{
+	unsigned long flags, sid;
+
+	sid = vma->vm_mm->context;
+	purge_tlb_start(flags);
+	mtsp(sid, 1);
+	pdtlb(addr);
+	if (unlikely(split_tlb))
+		pitlb(addr);
+	purge_tlb_end(flags);
+}
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/traps.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/traps.h
new file mode 100644
index 0000000..e000132
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/traps.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_TRAPS_H
+#define __ASM_TRAPS_H
+
+#ifdef __KERNEL__
+struct pt_regs;
+
+/* traps.c */
+void parisc_terminate(char *msg, struct pt_regs *regs,
+		int code, unsigned long offset) __noreturn __cold;
+
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
+/* mm/fault.c */
+const char *trap_name(unsigned long code);
+void do_page_fault(struct pt_regs *regs, unsigned long code,
+		unsigned long address);
+#endif
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/uaccess.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/uaccess.h
new file mode 100644
index 0000000..ea70e36
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/uaccess.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_UACCESS_H
+#define __PARISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#include <linux/bug.h>
+#include <linux/string.h>
+
+#define KERNEL_DS	((mm_segment_t){0})
+#define USER_DS 	((mm_segment_t){1})
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define get_ds()	(KERNEL_DS)
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
+
+/*
+ * Note that since kernel addresses are in a separate address space on
+ * parisc, we don't need to do anything for access_ok().
+ * We just let the page fault handler do the right thing. This also means
+ * that put_user is the same as __put_user, etc.
+ */
+
+#define access_ok(type, uaddr, size)	\
+	( (uaddr) == (uaddr) )
+
+#define put_user __put_user
+#define get_user __get_user
+
+#if !defined(CONFIG_64BIT)
+#define LDD_USER(val, ptr)	__get_user_asm64(val, ptr)
+#define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
+#else
+#define LDD_USER(val, ptr)	__get_user_asm(val, "ldd", ptr)
+#define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
+#endif
+
+/*
+ * The exception table contains two values: the first is the relative offset to
+ * the address of the instruction that is allowed to fault, and the second is
+ * the relative offset to the address of the fixup routine. Since relative
+ * addresses are used, 32bit values are sufficient even on 64bit kernel.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+struct exception_table_entry {
+	int insn;	/* relative address of insn that is allowed to fault. */
+	int fixup;	/* relative address of fixup routine */
+};
+
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+	".section __ex_table,\"aw\"\n"			   \
+	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+	".previous\n"
+
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
+ * load_sr2() preloads the space register %%sr2 - based on the value of
+ * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
+ * is 0), or with the current value of %%sr3 to access user space (USER_DS)
+ * memory. The following __get_user_asm() and __put_user_asm() functions have
+ * %%sr2 hard-coded to access the requested memory.
+ */
+#define load_sr2() \
+	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
+		" mfsp %%sr3,%0\n\t"		\
+		" mtsp %0,%%sr2\n\t"		\
+		: : "r"(get_fs()) : )
+
+#define __get_user_internal(val, ptr)			\
+({							\
+	register long __gu_err __asm__ ("r8") = 0;	\
+							\
+	switch (sizeof(*(ptr))) {			\
+	case 1: __get_user_asm(val, "ldb", ptr); break;	\
+	case 2: __get_user_asm(val, "ldh", ptr); break; \
+	case 4: __get_user_asm(val, "ldw", ptr); break; \
+	case 8: LDD_USER(val, ptr); break;		\
+	default: BUILD_BUG();				\
+	}						\
+							\
+	__gu_err;					\
+})
+
+#define __get_user(val, ptr)				\
+({							\
+	load_sr2();					\
+	__get_user_internal(val, ptr);			\
+})
+
+#define __get_user_asm(val, ldx, ptr)			\
+{							\
+	register long __gu_val;				\
+							\
+	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
+		"9:\n"					\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
+		: "=r"(__gu_val), "=r"(__gu_err)        \
+		: "r"(ptr), "1"(__gu_err));		\
+							\
+	(val) = (__force __typeof__(*(ptr))) __gu_val;	\
+}
+
+#if !defined(CONFIG_64BIT)
+
+#define __get_user_asm64(val, ptr)			\
+{							\
+	union {						\
+		unsigned long long	l;		\
+		__typeof__(*(ptr))	t;		\
+	} __gu_tmp;					\
+							\
+	__asm__("   copy %%r0,%R0\n"			\
+		"1: ldw 0(%%sr2,%2),%0\n"		\
+		"2: ldw 4(%%sr2,%2),%R0\n"		\
+		"9:\n"					\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
+		: "=&r"(__gu_tmp.l), "=r"(__gu_err)	\
+		: "r"(ptr), "1"(__gu_err));		\
+							\
+	(val) = __gu_tmp.t;				\
+}
+
+#endif /* !defined(CONFIG_64BIT) */
+
+
+#define __put_user_internal(x, ptr)				\
+({								\
+	register long __pu_err __asm__ ("r8") = 0;      	\
+        __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
+								\
+	switch (sizeof(*(ptr))) {				\
+	case 1: __put_user_asm("stb", __x, ptr); break;		\
+	case 2: __put_user_asm("sth", __x, ptr); break;		\
+	case 4: __put_user_asm("stw", __x, ptr); break;		\
+	case 8: STD_USER(__x, ptr); break;			\
+	default: BUILD_BUG();					\
+	}							\
+								\
+	__pu_err;						\
+})
+
+#define __put_user(x, ptr)					\
+({								\
+	load_sr2();						\
+	__put_user_internal(x, ptr);				\
+})
+
+
+/*
+ * The "__put_user/kernel_asm()" macros tell gcc they read from memory
+ * instead of writing. This is because they do not write to any memory
+ * gcc knows about, so there are no aliasing issues. These macros must
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
+ */
+
+#define __put_user_asm(stx, x, ptr)                         \
+	__asm__ __volatile__ (                              \
+		"1: " stx " %2,0(%%sr2,%1)\n"		    \
+		"9:\n"					    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
+		: "=r"(__pu_err)                            \
+		: "r"(ptr), "r"(x), "0"(__pu_err))
+
+
+#if !defined(CONFIG_64BIT)
+
+#define __put_user_asm64(__val, ptr) do {	    	    \
+	__asm__ __volatile__ (				    \
+		"1: stw %2,0(%%sr2,%1)\n"		    \
+		"2: stw %R2,4(%%sr2,%1)\n"		    \
+		"9:\n"					    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
+		: "=r"(__pu_err)                            \
+		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
+} while (0)
+
+#endif /* !defined(CONFIG_64BIT) */
+
+
+/*
+ * Complex access routines -- external declarations
+ */
+
+extern long strncpy_from_user(char *, const char __user *, long);
+extern unsigned lclear_user(void __user *, unsigned long);
+extern long lstrnlen_user(const char __user *, long);
+/*
+ * Complex access routines -- macros
+ */
+#define user_addr_max() (~0UL)
+
+#define strnlen_user lstrnlen_user
+#define clear_user lclear_user
+#define __clear_user lclear_user
+
+unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
+					    unsigned long len);
+unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+					    unsigned long len);
+unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
+					    unsigned long len);
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
+
+struct pt_regs;
+int fixup_exception(struct pt_regs *regs);
+
+#endif /* __PARISC_UACCESS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/ucontext.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/ucontext.h
new file mode 100644
index 0000000..ac7f863
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/ucontext.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_UCONTEXT_H
+#define _ASM_PARISC_UCONTEXT_H
+
+struct ucontext {
+	unsigned int	  uc_flags;
+	struct ucontext  *uc_link;
+	stack_t		  uc_stack;
+	struct sigcontext uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* !_ASM_PARISC_UCONTEXT_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/unaligned.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/unaligned.h
new file mode 100644
index 0000000..e9029c7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/unaligned.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_UNALIGNED_H
+#define _ASM_PARISC_UNALIGNED_H
+
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
+
+#ifdef __KERNEL__
+struct pt_regs;
+void handle_unaligned(struct pt_regs *regs);
+int check_unaligned(struct pt_regs *regs);
+#endif
+
+#endif /* _ASM_PARISC_UNALIGNED_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/unistd.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/unistd.h
new file mode 100644
index 0000000..3d507d0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/unistd.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_UNISTD_H_
+#define _ASM_PARISC_UNISTD_H_
+
+#include <uapi/asm/unistd.h>
+
+#ifndef __ASSEMBLY__
+
+#define SYS_ify(syscall_name)   __NR_##syscall_name
+
+#ifndef ASM_LINE_SEP
+# define ASM_LINE_SEP ;
+#endif
+
+/* Definition taken from glibc 2.3.3
+ * sysdeps/unix/sysv/linux/hppa/sysdep.h
+ */
+
+#ifdef PIC
+/* WARNING: CANNOT BE USED IN A NOP! */
+# define K_STW_ASM_PIC	"       copy %%r19, %%r4\n"
+# define K_LDW_ASM_PIC	"       copy %%r4, %%r19\n"
+# define K_USING_GR4	"%r4",
+#else
+# define K_STW_ASM_PIC	" \n"
+# define K_LDW_ASM_PIC	" \n"
+# define K_USING_GR4
+#endif
+
+/* GCC has to be warned that a syscall may clobber all the ABI
+   registers listed as "caller-saves", see page 8, Table 2
+   in section 2.2.6 of the PA-RISC RUN-TIME architecture
+   document. However! r28 is the result and will conflict with
+   the clobber list so it is left out. Also the input arguments
+   registers r20 -> r26 will conflict with the list so they
+   are treated specially. Although r19 is clobbered by the syscall
+   we cannot say this because it would violate ABI, thus we say
+   r4 is clobbered and use that register to save/restore r19
+   across the syscall. */
+
+#define K_CALL_CLOB_REGS "%r1", "%r2", K_USING_GR4 \
+	        	 "%r20", "%r29", "%r31"
+
+#undef K_INLINE_SYSCALL
+#define K_INLINE_SYSCALL(name, nr, args...)	({			\
+	long __sys_res;							\
+	{								\
+		register unsigned long __res __asm__("r28");		\
+		K_LOAD_ARGS_##nr(args)					\
+		/* FIXME: HACK stw/ldw r19 around syscall */		\
+		__asm__ volatile(					\
+			K_STW_ASM_PIC					\
+			"	ble  0x100(%%sr2, %%r0)\n"		\
+			"	ldi %1, %%r20\n"			\
+			K_LDW_ASM_PIC					\
+			: "=r" (__res)					\
+			: "i" (SYS_ify(name)) K_ASM_ARGS_##nr   	\
+			: "memory", K_CALL_CLOB_REGS K_CLOB_ARGS_##nr	\
+		);							\
+		__sys_res = (long)__res;				\
+	}								\
+	if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){	\
+		errno = -__sys_res;		        		\
+		__sys_res = -1;						\
+	}								\
+	__sys_res;							\
+})
+
+#define K_LOAD_ARGS_0()
+#define K_LOAD_ARGS_1(r26)					\
+	register unsigned long __r26 __asm__("r26") = (unsigned long)(r26);   \
+	K_LOAD_ARGS_0()
+#define K_LOAD_ARGS_2(r26,r25)					\
+	register unsigned long __r25 __asm__("r25") = (unsigned long)(r25);   \
+	K_LOAD_ARGS_1(r26)
+#define K_LOAD_ARGS_3(r26,r25,r24)				\
+	register unsigned long __r24 __asm__("r24") = (unsigned long)(r24);   \
+	K_LOAD_ARGS_2(r26,r25)
+#define K_LOAD_ARGS_4(r26,r25,r24,r23)				\
+	register unsigned long __r23 __asm__("r23") = (unsigned long)(r23);   \
+	K_LOAD_ARGS_3(r26,r25,r24)
+#define K_LOAD_ARGS_5(r26,r25,r24,r23,r22)			\
+	register unsigned long __r22 __asm__("r22") = (unsigned long)(r22);   \
+	K_LOAD_ARGS_4(r26,r25,r24,r23)
+#define K_LOAD_ARGS_6(r26,r25,r24,r23,r22,r21)			\
+	register unsigned long __r21 __asm__("r21") = (unsigned long)(r21);   \
+	K_LOAD_ARGS_5(r26,r25,r24,r23,r22)
+
+/* Even with zero args we use r20 for the syscall number */
+#define K_ASM_ARGS_0
+#define K_ASM_ARGS_1 K_ASM_ARGS_0, "r" (__r26)
+#define K_ASM_ARGS_2 K_ASM_ARGS_1, "r" (__r25)
+#define K_ASM_ARGS_3 K_ASM_ARGS_2, "r" (__r24)
+#define K_ASM_ARGS_4 K_ASM_ARGS_3, "r" (__r23)
+#define K_ASM_ARGS_5 K_ASM_ARGS_4, "r" (__r22)
+#define K_ASM_ARGS_6 K_ASM_ARGS_5, "r" (__r21)
+
+/* The registers not listed as inputs but clobbered */
+#define K_CLOB_ARGS_6
+#define K_CLOB_ARGS_5 K_CLOB_ARGS_6, "%r21"
+#define K_CLOB_ARGS_4 K_CLOB_ARGS_5, "%r22"
+#define K_CLOB_ARGS_3 K_CLOB_ARGS_4, "%r23"
+#define K_CLOB_ARGS_2 K_CLOB_ARGS_3, "%r24"
+#define K_CLOB_ARGS_1 K_CLOB_ARGS_2, "%r25"
+#define K_CLOB_ARGS_0 K_CLOB_ARGS_1, "%r26"
+
+#define _syscall0(type,name)						\
+type name(void)								\
+{									\
+    return K_INLINE_SYSCALL(name, 0);	                                \
+}
+
+#define _syscall1(type,name,type1,arg1)					\
+type name(type1 arg1)							\
+{									\
+    return K_INLINE_SYSCALL(name, 1, arg1);	                        \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2)			\
+type name(type1 arg1, type2 arg2)					\
+{									\
+    return K_INLINE_SYSCALL(name, 2, arg1, arg2);	                \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
+type name(type1 arg1, type2 arg2, type3 arg3)				\
+{									\
+    return K_INLINE_SYSCALL(name, 3, arg1, arg2, arg3);	                \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4)		\
+{									\
+    return K_INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4);	        \
+}
+
+/* select takes 5 arguments */
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)	\
+{									\
+    return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5);	\
+}
+
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+
+#endif /* __ASSEMBLY__ */
+
+#undef STR
+
+#endif /* _ASM_PARISC_UNISTD_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/asm/unwind.h b/src/kernel/linux/v4.14/arch/parisc/include/asm/unwind.h
new file mode 100644
index 0000000..c73a3ee
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/asm/unwind.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UNWIND_H_
+#define _UNWIND_H_
+
+#include <linux/list.h>
+
+/* From ABI specifications */
+struct unwind_table_entry {
+	unsigned int region_start;
+	unsigned int region_end;
+	unsigned int Cannot_unwind:1; /* 0 */
+	unsigned int Millicode:1;	/* 1 */
+	unsigned int Millicode_save_sr0:1;	/* 2 */
+	unsigned int Region_description:2;	/* 3..4 */
+	unsigned int reserved1:1;	/* 5 */
+	unsigned int Entry_SR:1;	/* 6 */
+	unsigned int Entry_FR:4;	/* number saved *//* 7..10 */
+	unsigned int Entry_GR:5;	/* number saved *//* 11..15 */
+	unsigned int Args_stored:1;	/* 16 */
+	unsigned int Variable_Frame:1;	/* 17 */
+	unsigned int Separate_Package_Body:1;	/* 18 */
+	unsigned int Frame_Extension_Millicode:1;	/* 19 */
+	unsigned int Stack_Overflow_Check:1;	/* 20 */
+	unsigned int Two_Instruction_SP_Increment:1;	/* 21 */
+	unsigned int Ada_Region:1;	/* 22 */
+	unsigned int cxx_info:1;	/* 23 */
+	unsigned int cxx_try_catch:1;	/* 24 */
+	unsigned int sched_entry_seq:1;	/* 25 */
+	unsigned int reserved2:1;	/* 26 */
+	unsigned int Save_SP:1;	/* 27 */
+	unsigned int Save_RP:1;	/* 28 */
+	unsigned int Save_MRP_in_frame:1;	/* 29 */
+	unsigned int extn_ptr_defined:1;	/* 30 */
+	unsigned int Cleanup_defined:1;	/* 31 */
+	
+	unsigned int MPE_XL_interrupt_marker:1;	/* 0 */
+	unsigned int HP_UX_interrupt_marker:1;	/* 1 */
+	unsigned int Large_frame:1;	/* 2 */
+	unsigned int Pseudo_SP_Set:1;	/* 3 */
+	unsigned int reserved4:1;	/* 4 */
+	unsigned int Total_frame_size:27;	/* 5..31 */
+};
+
+struct unwind_table {
+	struct list_head list;
+	const char *name;
+	unsigned long gp;
+	unsigned long base_addr;
+	unsigned long start;
+	unsigned long end;
+	const struct unwind_table_entry *table;
+	unsigned long length;
+};
+
+struct unwind_frame_info {
+	struct task_struct *t;
+	/* Eventually we would like to be able to get at any of the registers
+	   available; but for now we only try to get the sp and ip for each
+	   frame */
+	/* struct pt_regs regs; */
+	unsigned long sp, ip, rp, r31;
+	unsigned long prev_sp, prev_ip;
+};
+
+struct unwind_table *
+unwind_table_add(const char *name, unsigned long base_addr, 
+		 unsigned long gp, void *start, void *end);
+void
+unwind_table_remove(struct unwind_table *table);
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
+		       struct pt_regs *regs);
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t);
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs);
+int unwind_once(struct unwind_frame_info *info);
+int unwind_to_user(struct unwind_frame_info *info);
+
+int unwind_init(void);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/Kbuild b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/Kbuild
new file mode 100644
index 0000000..196d2a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/Kbuild
@@ -0,0 +1,8 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += auxvec.h
+generic-y += kvm_para.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/bitsperlong.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/bitsperlong.h
new file mode 100644
index 0000000..307e2ef
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_PARISC_BITSPERLONG_H
+#define __ASM_PARISC_BITSPERLONG_H
+
+#if defined(__LP64__)
+#define __BITS_PER_LONG 64
+#else
+#define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_PARISC_BITSPERLONG_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/byteorder.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/byteorder.h
new file mode 100644
index 0000000..a59d9b7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/byteorder.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_BYTEORDER_H
+#define _PARISC_BYTEORDER_H
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _PARISC_BYTEORDER_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/errno.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/errno.h
new file mode 100644
index 0000000..fc0df35
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_ERRNO_H
+#define _PARISC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define	ENOMSG		35	/* No message of desired type */
+#define	EIDRM		36	/* Identifier removed */
+#define	ECHRNG		37	/* Channel number out of range */
+#define	EL2NSYNC	38	/* Level 2 not synchronized */
+#define	EL3HLT		39	/* Level 3 halted */
+#define	EL3RST		40	/* Level 3 reset */
+#define	ELNRNG		41	/* Link number out of range */
+#define	EUNATCH		42	/* Protocol driver not attached */
+#define	ENOCSI		43	/* No CSI structure available */
+#define	EL2HLT		44	/* Level 2 halted */
+#define	EDEADLK		45	/* Resource deadlock would occur */
+#define	EDEADLOCK	EDEADLK
+#define	ENOLCK		46	/* No record locks available */
+#define	EILSEQ		47	/* Illegal byte sequence */
+
+#define	ENONET		50	/* Machine is not on the network */
+#define	ENODATA		51	/* No data available */
+#define	ETIME		52	/* Timer expired */
+#define	ENOSR		53	/* Out of streams resources */
+#define	ENOSTR		54	/* Device not a stream */
+#define	ENOPKG		55	/* Package not installed */
+
+#define	ENOLINK		57	/* Link has been severed */
+#define	EADV		58	/* Advertise error */
+#define	ESRMNT		59	/* Srmount error */
+#define	ECOMM		60	/* Communication error on send */
+#define	EPROTO		61	/* Protocol error */
+
+#define	EMULTIHOP	64	/* Multihop attempted */
+
+#define	EDOTDOT		66	/* RFS specific error */
+#define	EBADMSG		67	/* Not a data message */
+#define	EUSERS		68	/* Too many users */
+#define	EDQUOT		69	/* Quota exceeded */
+#define	ESTALE		70	/* Stale file handle */
+#define	EREMOTE		71	/* Object is remote */
+#define	EOVERFLOW	72	/* Value too large for defined data type */
+
+/* these errnos are defined by Linux but not HPUX. */
+
+#define	EBADE		160	/* Invalid exchange */
+#define	EBADR		161	/* Invalid request descriptor */
+#define	EXFULL		162	/* Exchange full */
+#define	ENOANO		163	/* No anode */
+#define	EBADRQC		164	/* Invalid request code */
+#define	EBADSLT		165	/* Invalid slot */
+#define	EBFONT		166	/* Bad font file format */
+#define	ENOTUNIQ	167	/* Name not unique on network */
+#define	EBADFD		168	/* File descriptor in bad state */
+#define	EREMCHG		169	/* Remote address changed */
+#define	ELIBACC		170	/* Can not access a needed shared library */
+#define	ELIBBAD		171	/* Accessing a corrupted shared library */
+#define	ELIBSCN		172	/* .lib section in a.out corrupted */
+#define	ELIBMAX		173	/* Attempting to link in too many shared libraries */
+#define	ELIBEXEC	174	/* Cannot exec a shared library directly */
+#define	ERESTART	175	/* Interrupted system call should be restarted */
+#define	ESTRPIPE	176	/* Streams pipe error */
+#define	EUCLEAN		177	/* Structure needs cleaning */
+#define	ENOTNAM		178	/* Not a XENIX named type file */
+#define	ENAVAIL		179	/* No XENIX semaphores available */
+#define	EISNAM		180	/* Is a named type file */
+#define	EREMOTEIO	181	/* Remote I/O error */
+#define	ENOMEDIUM	182	/* No medium found */
+#define	EMEDIUMTYPE	183	/* Wrong medium type */
+#define	ENOKEY		184	/* Required key not available */
+#define	EKEYEXPIRED	185	/* Key has expired */
+#define	EKEYREVOKED	186	/* Key has been revoked */
+#define	EKEYREJECTED	187	/* Key was rejected by service */
+
+/* We now return you to your regularly scheduled HPUX. */
+
+#define ENOSYM		215	/* symbol does not exist in executable */
+#define	ENOTSOCK	216	/* Socket operation on non-socket */
+#define	EDESTADDRREQ	217	/* Destination address required */
+#define	EMSGSIZE	218	/* Message too long */
+#define	EPROTOTYPE	219	/* Protocol wrong type for socket */
+#define	ENOPROTOOPT	220	/* Protocol not available */
+#define	EPROTONOSUPPORT	221	/* Protocol not supported */
+#define	ESOCKTNOSUPPORT	222	/* Socket type not supported */
+#define	EOPNOTSUPP	223	/* Operation not supported on transport endpoint */
+#define	EPFNOSUPPORT	224	/* Protocol family not supported */
+#define	EAFNOSUPPORT	225	/* Address family not supported by protocol */
+#define	EADDRINUSE	226	/* Address already in use */
+#define	EADDRNOTAVAIL	227	/* Cannot assign requested address */
+#define	ENETDOWN	228	/* Network is down */
+#define	ENETUNREACH	229	/* Network is unreachable */
+#define	ENETRESET	230	/* Network dropped connection because of reset */
+#define	ECONNABORTED	231	/* Software caused connection abort */
+#define	ECONNRESET	232	/* Connection reset by peer */
+#define	ENOBUFS		233	/* No buffer space available */
+#define	EISCONN		234	/* Transport endpoint is already connected */
+#define	ENOTCONN	235	/* Transport endpoint is not connected */
+#define	ESHUTDOWN	236	/* Cannot send after transport endpoint shutdown */
+#define	ETOOMANYREFS	237	/* Too many references: cannot splice */
+#define	ETIMEDOUT	238	/* Connection timed out */
+#define	ECONNREFUSED	239	/* Connection refused */
+#define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+#define	EREMOTERELEASE	240	/* Remote peer released connection */
+#define	EHOSTDOWN	241	/* Host is down */
+#define	EHOSTUNREACH	242	/* No route to host */
+
+#define	EALREADY	244	/* Operation already in progress */
+#define	EINPROGRESS	245	/* Operation now in progress */
+#define	EWOULDBLOCK	EAGAIN	/* Operation would block (Not HPUX compliant) */
+#define	ENOTEMPTY	247	/* Directory not empty */
+#define	ENAMETOOLONG	248	/* File name too long */
+#define	ELOOP		249	/* Too many symbolic links encountered */
+#define	ENOSYS		251	/* Function not implemented */
+
+#define ENOTSUP		252	/* Function not implemented (POSIX.4 / HPUX) */
+#define ECANCELLED	253	/* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED	ECANCELLED	/* SuSv3 and Solaris wants one 'L' */
+
+/* for robust mutexes */
+#define EOWNERDEAD	254	/* Owner died */
+#define ENOTRECOVERABLE	255	/* State not recoverable */
+
+#define	ERFKILL		256	/* Operation not possible due to RF-kill */
+
+#define EHWPOISON	257	/* Memory page has hardware error */
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/fcntl.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/fcntl.h
new file mode 100644
index 0000000..03ce20e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/fcntl.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_FCNTL_H
+#define _PARISC_FCNTL_H
+
+#define O_APPEND	000000010
+#define O_BLKSEEK	000000100 /* HPUX only */
+#define O_CREAT		000000400 /* not fcntl */
+#define O_EXCL		000002000 /* not fcntl */
+#define O_LARGEFILE	000004000
+#define __O_SYNC	000100000
+#define O_SYNC		(__O_SYNC|O_DSYNC)
+#define O_NONBLOCK	000200004 /* HPUX has separate NDELAY & NONBLOCK */
+#define O_NOCTTY	000400000 /* not fcntl */
+#define O_DSYNC		001000000 /* HPUX only */
+#define O_RSYNC		002000000 /* HPUX only */
+#define O_NOATIME	004000000
+#define O_CLOEXEC	010000000 /* set close_on_exec */
+
+#define O_DIRECTORY	000010000 /* must be a directory */
+#define O_NOFOLLOW	000000200 /* don't follow links */
+#define O_INVISIBLE	004000000 /* invisible I/O, for DMAPI/XDSM */
+
+#define O_PATH		020000000
+#define __O_TMPFILE	040000000
+
+#define F_GETLK64	8
+#define F_SETLK64	9
+#define F_SETLKW64	10
+
+#define F_GETOWN	11	/*  for sockets. */
+#define F_SETOWN	12	/*  for sockets. */
+#define F_SETSIG	13	/*  for sockets. */
+#define F_GETSIG	14	/*  for sockets. */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK		01
+#define F_WRLCK		02
+#define F_UNLCK		03
+
+#include <asm-generic/fcntl.h>
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctl.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctl.h
new file mode 100644
index 0000000..b509bcc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctl.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ *    Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *    Copyright (C) 1999,2003 Matthew Wilcox < willy at debian . org >
+ *    portions from "linux/ioctl.h for Linux" by H.H. Bergman.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#ifndef _ASM_PARISC_IOCTL_H
+#define _ASM_PARISC_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE	0U
+#define _IOC_WRITE	2U
+#define _IOC_READ	1U
+
+#include <asm-generic/ioctl.h>
+
+#endif /* _ASM_PARISC_IOCTL_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctls.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctls.h
new file mode 100644
index 0000000..aafb1c0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ioctls.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ARCH_PARISC_IOCTLS_H__
+#define __ARCH_PARISC_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS		_IOR('T', 16, struct termios) /* TCGETATTR */
+#define TCSETS		_IOW('T', 17, struct termios) /* TCSETATTR */
+#define TCSETSW		_IOW('T', 18, struct termios) /* TCSETATTRD */
+#define TCSETSF		_IOW('T', 19, struct termios) /* TCSETATTRF */
+#define TCGETA		_IOR('T', 1, struct termio)
+#define TCSETA		_IOW('T', 2, struct termio)
+#define TCSETAW		_IOW('T', 3, struct termio)
+#define TCSETAF		_IOW('T', 4, struct termio)
+#define TCSBRK		_IO('T', 5)
+#define TCXONC		_IO('T', 6)
+#define TCFLSH		_IO('T', 7)
+#define TIOCEXCL	0x540C
+#define TIOCNXCL	0x540D
+#define TIOCSCTTY	0x540E
+#define TIOCGPGRP	_IOR('T', 30, int)
+#define TIOCSPGRP	_IOW('T', 29, int)
+#define TIOCOUTQ	0x5411
+#define TIOCSTI		0x5412
+#define TIOCGWINSZ	0x5413
+#define TIOCSWINSZ	0x5414
+#define TIOCMGET	0x5415
+#define TIOCMBIS	0x5416
+#define TIOCMBIC	0x5417
+#define TIOCMSET	0x5418
+#define TIOCGSOFTCAR	0x5419
+#define TIOCSSOFTCAR	0x541A
+#define FIONREAD	0x541B
+#define TIOCINQ		FIONREAD
+#define TIOCLINUX	0x541C
+#define TIOCCONS	0x541D
+#define TIOCGSERIAL	0x541E
+#define TIOCSSERIAL	0x541F
+#define TIOCPKT		0x5420
+#define FIONBIO		0x5421
+#define TIOCNOTTY	0x5422
+#define TIOCSETD	0x5423
+#define TIOCGETD	0x5424
+#define TCSBRKP		0x5425	/* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK	0x5427  /* BSD compatibility */
+#define TIOCCBRK	0x5428  /* BSD compatibility */
+#define TIOCGSID	_IOR('T', 20, int) /* Return the session ID of FD */
+#define TCGETS2		_IOR('T',0x2A, struct termios2)
+#define TCSETS2		_IOW('T',0x2B, struct termios2)
+#define TCSETSW2	_IOW('T',0x2C, struct termios2)
+#define TCSETSF2	_IOW('T',0x2D, struct termios2)
+#define TIOCGRS485	_IOR('T', 0x2E, struct serial_rs485)
+#define TIOCSRS485	_IOWR('T', 0x2F, struct serial_rs485)
+#define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, int)  /* Get primary device node of /dev/console */
+#define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
+#define TIOCVHANGUP	0x5437
+#define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IO('T', 0x41) /* Safely open the slave */
+
+#define FIONCLEX	0x5450  /* these numbers need to be adjusted. */
+#define FIOCLEX		0x5451
+#define FIOASYNC	0x5452
+#define TIOCSERCONFIG	0x5453
+#define TIOCSERGWILD	0x5454
+#define TIOCSERSWILD	0x5455
+#define TIOCGLCKTRMIOS	0x5456
+#define TIOCSLCKTRMIOS	0x5457
+#define TIOCSERGSTRUCT	0x5458 /* For debugging only */
+#define TIOCSERGETLSR   0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config  */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT	0x545C	/* wait for a change on serial input line(s) */
+#define TIOCGICOUNT	0x545D	/* read serial port inline interrupt counts */
+#define FIOQSIZE	0x5460	/* Get exact space used by quota */
+
+#define TIOCSTART	0x5461
+#define TIOCSTOP	0x5462
+#define TIOCSLTC	0x5462
+
+/* Used for packet mode */
+#define TIOCPKT_DATA		 0
+#define TIOCPKT_FLUSHREAD	 1
+#define TIOCPKT_FLUSHWRITE	 2
+#define TIOCPKT_STOP		 4
+#define TIOCPKT_START		 8
+#define TIOCPKT_NOSTOP		16
+#define TIOCPKT_DOSTOP		32
+#define TIOCPKT_IOCTL		64
+
+#define TIOCSER_TEMT    0x01	/* Transmitter physically empty */
+
+#endif /* _ASM_PARISC_IOCTLS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ipcbuf.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ipcbuf.h
new file mode 100644
index 0000000..edf2662
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ipcbuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __PARISC_IPCBUF_H__
+#define __PARISC_IPCBUF_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/posix_types.h>
+
+/*
+ * The ipc64_perm structure for PA-RISC is almost identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel.
+ * 'seq' has been changed from long to int so that it's the same size
+ * on 64-bit kernels as on 32-bit ones.
+ */
+
+struct ipc64_perm
+{
+	__kernel_key_t		key;
+	__kernel_uid_t		uid;
+	__kernel_gid_t		gid;
+	__kernel_uid_t		cuid;
+	__kernel_gid_t		cgid;
+#if __BITS_PER_LONG != 64
+	unsigned short int	__pad1;
+#endif
+	__kernel_mode_t		mode;
+	unsigned short int	__pad2;
+	unsigned short int	seq;
+	unsigned int		__pad3;
+	unsigned long long int __unused1;
+	unsigned long long int __unused2;
+};
+
+#endif /* __PARISC_IPCBUF_H__ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/mman.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/mman.h
new file mode 100644
index 0000000..d1af0d7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/mman.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __PARISC_MMAN_H__
+#define __PARISC_MMAN_H__
+
+#define PROT_READ	0x1		/* page can be read */
+#define PROT_WRITE	0x2		/* page can be written */
+#define PROT_EXEC	0x4		/* page can be executed */
+#define PROT_SEM	0x8		/* page may be used for atomic ops */
+#define PROT_NONE	0x0		/* page can not be accessed */
+#define PROT_GROWSDOWN	0x01000000	/* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP	0x02000000	/* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED	0x01		/* Share changes */
+#define MAP_PRIVATE	0x02		/* Changes are private */
+#define MAP_TYPE	0x03		/* Mask for type of mapping */
+#define MAP_FIXED	0x04		/* Interpret addr exactly */
+#define MAP_ANONYMOUS	0x10		/* don't use a file */
+
+#define MAP_DENYWRITE	0x0800		/* ETXTBSY */
+#define MAP_EXECUTABLE	0x1000		/* mark it as an executable */
+#define MAP_LOCKED	0x2000		/* pages are locked */
+#define MAP_NORESERVE	0x4000		/* don't check for reservations */
+#define MAP_GROWSDOWN	0x8000		/* stack-like segment */
+#define MAP_POPULATE	0x10000		/* populate (prefault) pagetables */
+#define MAP_NONBLOCK	0x20000		/* do not block on IO */
+#define MAP_STACK	0x40000		/* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB	0x80000		/* create a huge page mapping */
+
+#define MS_SYNC		1		/* synchronous memory sync */
+#define MS_ASYNC	2		/* sync memory asynchronously */
+#define MS_INVALIDATE	4		/* invalidate the caches */
+
+#define MCL_CURRENT	1		/* lock all current mappings */
+#define MCL_FUTURE	2		/* lock all future mappings */
+#define MCL_ONFAULT	4		/* lock all pages that are faulted in */
+
+#define MLOCK_ONFAULT	0x01		/* Lock pages in range after they are faulted in, do not prefault */
+
+#define MADV_NORMAL     0               /* no further special treatment */
+#define MADV_RANDOM     1               /* expect random page references */
+#define MADV_SEQUENTIAL 2               /* expect sequential page references */
+#define MADV_WILLNEED   3               /* will need these pages */
+#define MADV_DONTNEED   4               /* don't need these pages */
+
+/* common/generic parameters */
+#define MADV_FREE	8		/* free pages only if memory pressure */
+#define MADV_REMOVE	9		/* remove these pages & resources */
+#define MADV_DONTFORK	10		/* don't inherit across fork */
+#define MADV_DOFORK	11		/* do inherit across fork */
+
+#define MADV_MERGEABLE   65		/* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
+
+#define MADV_HUGEPAGE	67		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	68		/* Not worth backing with hugepages */
+
+#define MADV_DONTDUMP   69		/* Explicity exclude from the core dump,
+					   overrides the coredump filter bits */
+#define MADV_DODUMP	70		/* Clear the MADV_NODUMP flag */
+
+#define MADV_WIPEONFORK 71		/* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 72		/* Undo MADV_WIPEONFORK */
+
+#define MADV_HWPOISON     100		/* poison a page for testing */
+#define MADV_SOFT_OFFLINE 101		/* soft offline page for testing */
+
+/* compatibility flags */
+#define MAP_FILE	0
+#define MAP_VARIABLE	0
+
+#define PKEY_DISABLE_ACCESS	0x1
+#define PKEY_DISABLE_WRITE	0x2
+#define PKEY_ACCESS_MASK	(PKEY_DISABLE_ACCESS |\
+				 PKEY_DISABLE_WRITE)
+
+#endif /* __PARISC_MMAN_H__ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/msgbuf.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/msgbuf.h
new file mode 100644
index 0000000..b48b810
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/msgbuf.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_MSGBUF_H
+#define _PARISC_MSGBUF_H
+
+#include <asm/bitsperlong.h>
+
+/* 
+ * The msqid64_ds structure for parisc architecture, copied from sparc.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+	struct ipc64_perm msg_perm;
+#if __BITS_PER_LONG != 64
+	unsigned int   __pad1;
+#endif
+	__kernel_time_t msg_stime;	/* last msgsnd time */
+#if __BITS_PER_LONG != 64
+	unsigned int   __pad2;
+#endif
+	__kernel_time_t msg_rtime;	/* last msgrcv time */
+#if __BITS_PER_LONG != 64
+	unsigned int   __pad3;
+#endif
+	__kernel_time_t msg_ctime;	/* last change time */
+	unsigned long msg_cbytes;	/* current number of bytes on queue */
+	unsigned long msg_qnum;		/* number of messages in queue */
+	unsigned long msg_qbytes;	/* max number of bytes on queue */
+	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
+	__kernel_pid_t msg_lrpid;	/* last receive pid */
+	unsigned long __unused1;
+	unsigned long __unused2;
+};
+
+#endif /* _PARISC_MSGBUF_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/pdc.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/pdc.h
new file mode 100644
index 0000000..0ad1176
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/pdc.h
@@ -0,0 +1,428 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PARISC_PDC_H
+#define _UAPI_PARISC_PDC_H
+
+/*
+ *	PDC return values ...
+ *	All PDC calls return a subset of these errors. 
+ */
+
+#define PDC_WARN		  3	/* Call completed with a warning */
+#define PDC_REQ_ERR_1		  2	/* See above			 */
+#define PDC_REQ_ERR_0		  1	/* Call would generate a requestor error */
+#define PDC_OK			  0	/* Call completed successfully	*/
+#define PDC_BAD_PROC		 -1	/* Called non-existent procedure*/
+#define PDC_BAD_OPTION		 -2	/* Called with non-existent option */
+#define PDC_ERROR		 -3	/* Call could not complete without an error */
+#define PDC_NE_MOD		 -5	/* Module not found		*/
+#define PDC_NE_CELL_MOD		 -7	/* Cell module not found	*/
+#define PDC_INVALID_ARG		-10	/* Called with an invalid argument */
+#define PDC_BUS_POW_WARN	-12	/* Call could not complete in allowed power budget */
+#define PDC_NOT_NARROW		-17	/* Narrow mode not supported	*/
+
+/*
+ *	PDC entry points...
+ */
+
+#define PDC_POW_FAIL	1		/* perform a power-fail		*/
+#define PDC_POW_FAIL_PREPARE	0	/* prepare for powerfail	*/
+
+#define PDC_CHASSIS	2		/* PDC-chassis functions	*/
+#define PDC_CHASSIS_DISP	0	/* update chassis display	*/
+#define PDC_CHASSIS_WARN	1	/* return chassis warnings	*/
+#define PDC_CHASSIS_DISPWARN	2	/* update&return chassis status */
+#define PDC_RETURN_CHASSIS_INFO 128	/* HVERSION dependent: return chassis LED/LCD info  */
+
+#define PDC_PIM         3               /* Get PIM data                 */
+#define PDC_PIM_HPMC            0       /* Transfer HPMC data           */
+#define PDC_PIM_RETURN_SIZE     1       /* Get Max buffer needed for PIM*/
+#define PDC_PIM_LPMC            2       /* Transfer HPMC data           */
+#define PDC_PIM_SOFT_BOOT       3       /* Transfer Soft Boot data      */
+#define PDC_PIM_TOC             4       /* Transfer TOC data            */
+
+#define PDC_MODEL	4		/* PDC model information call	*/
+#define PDC_MODEL_INFO		0	/* returns information 		*/
+#define PDC_MODEL_BOOTID	1	/* set the BOOT_ID		*/
+#define PDC_MODEL_VERSIONS	2	/* returns cpu-internal versions*/
+#define PDC_MODEL_SYSMODEL	3	/* return system model info	*/
+#define PDC_MODEL_ENSPEC	4	/* enable specific option	*/
+#define PDC_MODEL_DISPEC	5	/* disable specific option	*/
+#define PDC_MODEL_CPU_ID	6	/* returns cpu-id (only newer machines!) */
+#define PDC_MODEL_CAPABILITIES	7	/* returns OS32/OS64-flags	*/
+/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
+#define  PDC_MODEL_OS64			(1 << 0)
+#define  PDC_MODEL_OS32			(1 << 1)
+#define  PDC_MODEL_IOPDIR_FDC		(1 << 2)
+#define  PDC_MODEL_NVA_MASK		(3 << 4)
+#define  PDC_MODEL_NVA_SUPPORTED	(0 << 4)
+#define  PDC_MODEL_NVA_SLOW		(1 << 4)
+#define  PDC_MODEL_NVA_UNSUPPORTED	(3 << 4)
+#define PDC_MODEL_GET_BOOT__OP	8	/* returns boot test options	*/
+#define PDC_MODEL_SET_BOOT__OP	9	/* set boot test options	*/
+
+#define PA89_INSTRUCTION_SET	0x4	/* capabilities returned	*/
+#define PA90_INSTRUCTION_SET	0x8
+
+#define PDC_CACHE	5		/* return/set cache (& TLB) info*/
+#define PDC_CACHE_INFO		0	/* returns information 		*/
+#define PDC_CACHE_SET_COH	1	/* set coherence state		*/
+#define PDC_CACHE_RET_SPID	2	/* returns space-ID bits	*/
+
+#define PDC_HPA		6		/* return HPA of processor	*/
+#define PDC_HPA_PROCESSOR	0
+#define PDC_HPA_MODULES		1
+
+#define PDC_COPROC	7		/* Co-Processor (usually FP unit(s)) */
+#define PDC_COPROC_CFG		0	/* Co-Processor Cfg (FP unit(s) enabled?) */
+
+#define PDC_IODC	8		/* talk to IODC			*/
+#define PDC_IODC_READ		0	/* read IODC entry point	*/
+/*      PDC_IODC_RI_			 * INDEX parameter of PDC_IODC_READ */
+#define PDC_IODC_RI_DATA_BYTES	0	/* IODC Data Bytes		*/
+/*				1, 2	   obsolete - HVERSION dependent*/
+#define PDC_IODC_RI_INIT	3	/* Initialize module		*/
+#define PDC_IODC_RI_IO		4	/* Module input/output		*/
+#define PDC_IODC_RI_SPA		5	/* Module input/output		*/
+#define PDC_IODC_RI_CONFIG	6	/* Module input/output		*/
+/*				7	  obsolete - HVERSION dependent */
+#define PDC_IODC_RI_TEST	8	/* Module input/output		*/
+#define PDC_IODC_RI_TLB		9	/* Module input/output		*/
+#define PDC_IODC_NINIT		2	/* non-destructive init		*/
+#define PDC_IODC_DINIT		3	/* destructive init		*/
+#define PDC_IODC_MEMERR		4	/* check for memory errors	*/
+#define PDC_IODC_INDEX_DATA	0	/* get first 16 bytes from mod IODC */
+#define PDC_IODC_BUS_ERROR	-4	/* bus error return value	*/
+#define PDC_IODC_INVALID_INDEX	-5	/* invalid index return value	*/
+#define PDC_IODC_COUNT		-6	/* count is too small		*/
+
+#define PDC_TOD		9		/* time-of-day clock (TOD)	*/
+#define PDC_TOD_READ		0	/* read TOD			*/
+#define PDC_TOD_WRITE		1	/* write TOD			*/
+
+
+#define PDC_STABLE	10		/* stable storage (sprockets)	*/
+#define PDC_STABLE_READ		0
+#define PDC_STABLE_WRITE	1
+#define PDC_STABLE_RETURN_SIZE	2
+#define PDC_STABLE_VERIFY_CONTENTS 3
+#define PDC_STABLE_INITIALIZE	4
+
+#define PDC_NVOLATILE	11		/* often not implemented	*/
+
+#define PDC_ADD_VALID	12		/* Memory validation PDC call	*/
+#define PDC_ADD_VALID_VERIFY	0	/* Make PDC_ADD_VALID verify region */
+
+#define PDC_INSTR	15		/* get instr to invoke PDCE_CHECK() */
+
+#define PDC_PROC	16		/* (sprockets)			*/
+
+#define PDC_CONFIG	16		/* (sprockets)			*/
+#define PDC_CONFIG_DECONFIG	0
+#define PDC_CONFIG_DRECONFIG	1
+#define PDC_CONFIG_DRETURN_CONFIG 2
+
+#define PDC_BLOCK_TLB	18		/* manage hardware block-TLB	*/
+#define PDC_BTLB_INFO		0	/* returns parameter 		*/
+#define PDC_BTLB_INSERT		1	/* insert BTLB entry		*/
+#define PDC_BTLB_PURGE		2	/* purge BTLB entries 		*/
+#define PDC_BTLB_PURGE_ALL	3	/* purge all BTLB entries 	*/
+
+#define PDC_TLB		19		/* manage hardware TLB miss handling */
+#define PDC_TLB_INFO		0	/* returns parameter 		*/
+#define PDC_TLB_SETUP		1	/* set up miss handling 	*/
+
+#define PDC_MEM		20		/* Manage memory		*/
+#define PDC_MEM_MEMINFO		0	/* Return PDT info		*/
+#define PDC_MEM_ADD_PAGE	1	/* Add page to PDT		*/
+#define PDC_MEM_CLEAR_PDT	2	/* Clear PDT			*/
+#define PDC_MEM_READ_PDT	3	/* Read PDT entry		*/
+#define PDC_MEM_RESET_CLEAR	4	/* Reset PDT clear flag		*/
+#define PDC_MEM_GOODMEM		5	/* Set good_mem value		*/
+#define PDC_MEM_TABLE		128	/* Non contig mem map (sprockets) */
+#define PDC_MEM_RETURN_ADDRESS_TABLE	PDC_MEM_TABLE
+#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE	131
+#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES	132
+#define PDC_MEM_GET_PHYSICAL_LOCATION_FROM_MEMORY_ADDRESS 133
+
+#define PDC_MEM_RET_SBE_REPLACED	5	/* PDC_MEM return values */
+#define PDC_MEM_RET_DUPLICATE_ENTRY	4
+#define PDC_MEM_RET_BUF_SIZE_SMALL	1
+#define PDC_MEM_RET_PDT_FULL		-11
+#define PDC_MEM_RET_INVALID_PHYSICAL_LOCATION ~0ULL
+
+#define PDC_PSW		21		/* Get/Set default System Mask  */
+#define PDC_PSW_MASK		0	/* Return mask                  */
+#define PDC_PSW_GET_DEFAULTS	1	/* Return defaults              */
+#define PDC_PSW_SET_DEFAULTS	2	/* Set default                  */
+#define PDC_PSW_ENDIAN_BIT	1	/* set for big endian           */
+#define PDC_PSW_WIDE_BIT	2	/* set for wide mode            */ 
+
+#define PDC_SYSTEM_MAP	22		/* find system modules		*/
+#define PDC_FIND_MODULE 	0
+#define PDC_FIND_ADDRESS	1
+#define PDC_TRANSLATE_PATH	2
+
+#define PDC_SOFT_POWER	23		/* soft power switch		*/
+#define PDC_SOFT_POWER_INFO	0	/* return info about the soft power switch */
+#define PDC_SOFT_POWER_ENABLE	1	/* enable/disable soft power switch */
+
+
+/* HVERSION dependent */
+
+/* The PDC_MEM_MAP calls */
+#define PDC_MEM_MAP	128		/* on s700: return page info	*/
+#define PDC_MEM_MAP_HPA		0	/* returns hpa of a module	*/
+
+#define PDC_EEPROM	129		/* EEPROM access		*/
+#define PDC_EEPROM_READ_WORD	0
+#define PDC_EEPROM_WRITE_WORD	1
+#define PDC_EEPROM_READ_BYTE	2
+#define PDC_EEPROM_WRITE_BYTE	3
+#define PDC_EEPROM_EEPROM_PASSWORD -1000
+
+#define PDC_NVM		130		/* NVM (non-volatile memory) access */
+#define PDC_NVM_READ_WORD	0
+#define PDC_NVM_WRITE_WORD	1
+#define PDC_NVM_READ_BYTE	2
+#define PDC_NVM_WRITE_BYTE	3
+
+#define PDC_SEED_ERROR	132		/* (sprockets)			*/
+
+#define PDC_IO		135		/* log error info, reset IO system */
+#define PDC_IO_READ_AND_CLEAR_ERRORS	0
+#define PDC_IO_RESET			1
+#define PDC_IO_RESET_DEVICES		2
+/* sets bits 6&7 (little endian) of the HcControl Register */
+#define PDC_IO_USB_SUSPEND	0xC000000000000000
+#define PDC_IO_EEPROM_IO_ERR_TABLE_FULL	-5	/* return value */
+#define PDC_IO_NO_SUSPEND		-6	/* return value */
+
+#define PDC_BROADCAST_RESET 136		/* reset all processors		*/
+#define PDC_DO_RESET		0	/* option: perform a broadcast reset */
+#define PDC_DO_FIRM_TEST_RESET	1	/* Do broadcast reset with bitmap */
+#define PDC_BR_RECONFIGURATION	2	/* reset w/reconfiguration	*/
+#define PDC_FIRM_TEST_MAGIC	0xab9ec36fUL    /* for this reboot only	*/
+
+#define PDC_LAN_STATION_ID 138		/* Hversion dependent mechanism for */
+#define PDC_LAN_STATION_ID_READ	0	/* getting the lan station address  */
+
+#define	PDC_LAN_STATION_ID_SIZE	6
+
+#define PDC_CHECK_RANGES 139		/* (sprockets)			*/
+
+#define PDC_NV_SECTIONS	141		/* (sprockets)			*/
+
+#define PDC_PERFORMANCE	142		/* performance monitoring	*/
+
+#define PDC_SYSTEM_INFO	143		/* system information		*/
+#define PDC_SYSINFO_RETURN_INFO_SIZE	0
+#define PDC_SYSINFO_RRETURN_SYS_INFO	1
+#define PDC_SYSINFO_RRETURN_ERRORS	2
+#define PDC_SYSINFO_RRETURN_WARNINGS	3
+#define PDC_SYSINFO_RETURN_REVISIONS	4
+#define PDC_SYSINFO_RRETURN_DIAGNOSE	5
+#define PDC_SYSINFO_RRETURN_HV_DIAGNOSE	1005
+
+#define PDC_RDR		144		/* (sprockets)			*/
+#define PDC_RDR_READ_BUFFER	0
+#define PDC_RDR_READ_SINGLE	1
+#define PDC_RDR_WRITE_SINGLE	2
+
+#define PDC_INTRIGUE	145 		/* (sprockets)			*/
+#define PDC_INTRIGUE_WRITE_BUFFER 	 0
+#define PDC_INTRIGUE_GET_SCRATCH_BUFSIZE 1
+#define PDC_INTRIGUE_START_CPU_COUNTERS	 2
+#define PDC_INTRIGUE_STOP_CPU_COUNTERS	 3
+
+#define PDC_STI		146 		/* STI access			*/
+/* same as PDC_PCI_XXX values (see below) */
+
+/* Legacy PDC definitions for same stuff */
+#define PDC_PCI_INDEX	147
+#define PDC_PCI_INTERFACE_INFO		0
+#define PDC_PCI_SLOT_INFO		1
+#define PDC_PCI_INFLIGHT_BYTES		2
+#define PDC_PCI_READ_CONFIG		3
+#define PDC_PCI_WRITE_CONFIG		4
+#define PDC_PCI_READ_PCI_IO		5
+#define PDC_PCI_WRITE_PCI_IO		6
+#define PDC_PCI_READ_CONFIG_DELAY	7
+#define PDC_PCI_UPDATE_CONFIG_DELAY	8
+#define PDC_PCI_PCI_PATH_TO_PCI_HPA	9
+#define PDC_PCI_PCI_HPA_TO_PCI_PATH	10
+#define PDC_PCI_PCI_PATH_TO_PCI_BUS	11
+#define PDC_PCI_PCI_RESERVED		12
+#define PDC_PCI_PCI_INT_ROUTE_SIZE	13
+#define PDC_PCI_GET_INT_TBL_SIZE	PDC_PCI_PCI_INT_ROUTE_SIZE
+#define PDC_PCI_PCI_INT_ROUTE		14
+#define PDC_PCI_GET_INT_TBL		PDC_PCI_PCI_INT_ROUTE 
+#define PDC_PCI_READ_MON_TYPE		15
+#define PDC_PCI_WRITE_MON_TYPE		16
+
+
+/* Get SCSI Interface Card info:  SDTR, SCSI ID, mode (SE vs LVD) */
+#define PDC_INITIATOR	163
+#define PDC_GET_INITIATOR	0
+#define PDC_SET_INITIATOR	1
+#define PDC_DELETE_INITIATOR	2
+#define PDC_RETURN_TABLE_SIZE	3
+#define PDC_RETURN_TABLE	4
+
+#define PDC_LINK	165 		/* (sprockets)			*/
+#define PDC_LINK_PCI_ENTRY_POINTS	0  /* list (Arg1) = 0 */
+#define PDC_LINK_USB_ENTRY_POINTS	1  /* list (Arg1) = 1 */
+
+/* cl_class
+ * page 3-33 of IO-Firmware ARS
+ * IODC ENTRY_INIT(Search first) RET[1]
+ */
+#define	CL_NULL		0	/* invalid */
+#define	CL_RANDOM	1	/* random access (as disk) */
+#define	CL_SEQU		2	/* sequential access (as tape) */
+#define	CL_DUPLEX	7	/* full-duplex point-to-point (RS-232, Net) */
+#define	CL_KEYBD	8	/* half-duplex console (HIL Keyboard) */
+#define	CL_DISPL	9	/* half-duplex console (display) */
+#define	CL_FC		10	/* FiberChannel access media */
+
+/* IODC ENTRY_INIT() */
+#define ENTRY_INIT_SRCH_FRST	2
+#define ENTRY_INIT_SRCH_NEXT	3
+#define ENTRY_INIT_MOD_DEV	4
+#define ENTRY_INIT_DEV		5
+#define ENTRY_INIT_MOD		6
+#define ENTRY_INIT_MSG		9
+
+/* IODC ENTRY_IO() */
+#define ENTRY_IO_BOOTIN		0
+#define ENTRY_IO_BOOTOUT	1
+#define ENTRY_IO_CIN		2
+#define ENTRY_IO_COUT		3
+#define ENTRY_IO_CLOSE		4
+#define ENTRY_IO_GETMSG		9
+#define ENTRY_IO_BBLOCK_IN	16
+#define ENTRY_IO_BBLOCK_OUT	17
+
+/* IODC ENTRY_SPA() */
+
+/* IODC ENTRY_CONFIG() */
+
+/* IODC ENTRY_TEST() */
+
+/* IODC ENTRY_TLB() */
+
+/* constants for OS (NVM...) */
+#define OS_ID_NONE		0	/* Undefined OS ID	*/
+#define OS_ID_HPUX		1	/* HP-UX OS		*/
+#define OS_ID_MPEXL		2	/* MPE XL OS		*/
+#define OS_ID_OSF		3	/* OSF OS		*/
+#define OS_ID_HPRT		4	/* HP-RT OS		*/
+#define OS_ID_NOVEL		5	/* NOVELL OS		*/
+#define OS_ID_LINUX		6	/* Linux		*/
+
+
+/* constants for PDC_CHASSIS */
+#define OSTAT_OFF		0
+#define OSTAT_FLT		1 
+#define OSTAT_TEST		2
+#define OSTAT_INIT		3
+#define OSTAT_SHUT		4
+#define OSTAT_WARN		5
+#define OSTAT_RUN		6
+#define OSTAT_ON		7
+
+/* Page Zero constant offsets used by the HPMC handler */
+#define BOOT_CONSOLE_HPA_OFFSET  0x3c0
+#define BOOT_CONSOLE_SPA_OFFSET  0x3c4
+#define BOOT_CONSOLE_PATH_OFFSET 0x3a8
+
+/* size of the pdc_result buffer for firmware.c */
+#define NUM_PDC_RESULT	32
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+
+
+/* flags of the device_path */
+#define	PF_AUTOBOOT	0x80
+#define	PF_AUTOSEARCH	0x40
+#define	PF_TIMER	0x0F
+
+struct device_path {		/* page 1-69 */
+	unsigned char flags;	/* flags see above! */
+	unsigned char bc[6];	/* bus converter routing info */
+	unsigned char mod;
+	unsigned int  layers[6];/* device-specific layer-info */
+} __attribute__((aligned(8))) ;
+
+struct pz_device {
+	struct	device_path dp;	/* see above */
+	/* struct	iomod *hpa; */
+	unsigned int hpa;	/* HPA base address */
+	/* char	*spa; */
+	unsigned int spa;	/* SPA base address */
+	/* int	(*iodc_io)(struct iomod*, ...); */
+	unsigned int iodc_io;	/* device entry point */
+	short	pad;		/* reserved */
+	unsigned short cl_class;/* see below */
+} __attribute__((aligned(8))) ;
+
+struct zeropage {
+	/* [0x000] initialize vectors (VEC) */
+	unsigned int	vec_special;		/* must be zero */
+	/* int	(*vec_pow_fail)(void);*/
+	unsigned int	vec_pow_fail; /* power failure handler */
+	/* int	(*vec_toc)(void); */
+	unsigned int	vec_toc;
+	unsigned int	vec_toclen;
+	/* int	(*vec_rendz)(void); */
+	unsigned int vec_rendz;
+	int	vec_pow_fail_flen;
+	int	vec_pad[10];		
+	
+	/* [0x040] reserved processor dependent */
+	int	pad0[112];
+
+	/* [0x200] reserved */
+	int	pad1[84];
+
+	/* [0x350] memory configuration (MC) */
+	int	memc_cont;		/* contiguous mem size (bytes) */
+	int	memc_phsize;		/* physical memory size */
+	int	memc_adsize;		/* additional mem size, bytes of SPA space used by PDC */
+	unsigned int mem_pdc_hi;	/* used for 64-bit */
+
+	/* [0x360] various parameters for the boot-CPU */
+	/* unsigned int *mem_booterr[8]; */
+	unsigned int mem_booterr[8];	/* ptr to boot errors */
+	unsigned int mem_free;		/* first location, where OS can be loaded */
+	/* struct iomod *mem_hpa; */
+	unsigned int mem_hpa;		/* HPA of the boot-CPU */
+	/* int (*mem_pdc)(int, ...); */
+	unsigned int mem_pdc;		/* PDC entry point */
+	unsigned int mem_10msec;	/* number of clock ticks in 10msec */
+
+	/* [0x390] initial memory module (IMM) */
+	/* struct iomod *imm_hpa; */
+	unsigned int imm_hpa;		/* HPA of the IMM */
+	int	imm_soft_boot;		/* 0 = was hard boot, 1 = was soft boot */
+	unsigned int	imm_spa_size;		/* SPA size of the IMM in bytes */
+	unsigned int	imm_max_mem;		/* bytes of mem in IMM */
+
+	/* [0x3A0] boot console, display device and keyboard */
+	struct pz_device mem_cons;	/* description of console device */
+	struct pz_device mem_boot;	/* description of boot device */
+	struct pz_device mem_kbd;	/* description of keyboard device */
+
+	/* [0x430] reserved */
+	int	pad430[116];
+
+	/* [0x600] processor dependent */
+	__u32	pad600[1];
+	__u32	proc_sti;		/* pointer to STI ROM */
+	__u32	pad608[126];
+};
+
+#endif /* !defined(__ASSEMBLY__) */
+
+#endif /* _UAPI_PARISC_PDC_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/posix_types.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/posix_types.h
new file mode 100644
index 0000000..2785632
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/posix_types.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ARCH_PARISC_POSIX_TYPES_H
+#define __ARCH_PARISC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.  Also, we cannot
+ * assume GCC is being used.
+ */
+
+#ifndef __LP64__
+typedef unsigned short		__kernel_mode_t;
+#define __kernel_mode_t __kernel_mode_t
+#endif
+
+typedef unsigned short		__kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
+typedef int			__kernel_suseconds_t;
+#define __kernel_suseconds_t __kernel_suseconds_t
+
+typedef long long		__kernel_off64_t;
+typedef unsigned long long	__kernel_ino64_t;
+
+#include <asm-generic/posix_types.h>
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ptrace.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ptrace.h
new file mode 100644
index 0000000..e72e062
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/ptrace.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* written by Philipp Rumpf, Copyright (C) 1999 SuSE GmbH Nuernberg
+** Copyright (C) 2000 Grant Grundler, Hewlett-Packard
+*/
+#ifndef _UAPI_PARISC_PTRACE_H
+#define _UAPI_PARISC_PTRACE_H
+
+
+#include <linux/types.h>
+
+/* This struct defines the way the registers are stored on the 
+ * stack during a system call.
+ *
+ * N.B. gdb/strace care about the size and offsets within this
+ * structure. If you change things, you may break object compatibility
+ * for those applications.
+ *
+ * Please do NOT use this structure for future programs, but use
+ * user_regs_struct (see below) instead.
+ *
+ * It can be accessed through PTRACE_PEEKUSR/PTRACE_POKEUSR only.
+ */
+
+struct pt_regs {
+	unsigned long gr[32];	/* PSW is in gr[0] */
+	__u64 fr[32];
+	unsigned long sr[ 8];
+	unsigned long iasq[2];
+	unsigned long iaoq[2];
+	unsigned long cr27;
+	unsigned long pad0;     /* available for other uses */
+	unsigned long orig_r28;
+	unsigned long ksp;
+	unsigned long kpc;
+	unsigned long sar;	/* CR11 */
+	unsigned long iir;	/* CR19 */
+	unsigned long isr;	/* CR20 */
+	unsigned long ior;	/* CR21 */
+	unsigned long ipsw;	/* CR22 */
+};
+
+/**
+ * struct user_regs_struct - User general purpose registers
+ *
+ * This is the user-visible general purpose register state structure
+ * which is used to define the elf_gregset_t.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS
+ * and through PTRACE_GETREGS.
+ */
+struct user_regs_struct {
+	unsigned long gr[32];	/* PSW is in gr[0] */
+	unsigned long sr[8];
+	unsigned long iaoq[2];
+	unsigned long iasq[2];
+	unsigned long sar;	/* CR11 */
+	unsigned long iir;	/* CR19 */
+	unsigned long isr;	/* CR20 */
+	unsigned long ior;	/* CR21 */
+	unsigned long ipsw;	/* CR22 */
+	unsigned long cr0;
+	unsigned long cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
+	unsigned long cr8, cr9, cr12, cr13, cr10, cr15;
+	unsigned long _pad[80-64];	/* pad to ELF_NGREG (80) */
+};
+
+/**
+ * struct user_fp_struct - User floating point registers
+ *
+ * This is the user-visible floating point register state structure.
+ * It uses the same layout and size as elf_fpregset_t.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRFPREG
+ * and through PTRACE_GETFPREGS.
+ */
+struct user_fp_struct {
+	__u64 fr[32];
+};
+
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ *
+ * These ones are taken from IA-64 on the assumption that theirs are
+ * the most correct (and we also want to support PTRACE_SINGLEBLOCK
+ * since we have taken branch traps too)
+ */
+#define PTRACE_SINGLEBLOCK	12	/* resume execution until next branch */
+
+#define PTRACE_GETREGS		18
+#define PTRACE_SETREGS		19
+#define PTRACE_GETFPREGS	14
+#define PTRACE_SETFPREGS	15
+
+#endif /* _UAPI_PARISC_PTRACE_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sembuf.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sembuf.h
new file mode 100644
index 0000000..746c5d8
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sembuf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_SEMBUF_H
+#define _PARISC_SEMBUF_H
+
+#include <asm/bitsperlong.h>
+
+/* 
+ * The semid64_ds structure for parisc architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */
+#if __BITS_PER_LONG != 64
+	unsigned int	__pad1;
+#endif
+	__kernel_time_t	sem_otime;		/* last semop time */
+#if __BITS_PER_LONG != 64
+	unsigned int	__pad2;
+#endif
+	__kernel_time_t	sem_ctime;		/* last change time */
+	unsigned long 	sem_nsems;		/* no. of semaphores in array */
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+};
+
+#endif /* _PARISC_SEMBUF_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/setup.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/setup.h
new file mode 100644
index 0000000..78b2f4e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/setup.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_SETUP_H
+#define _PARISC_SETUP_H
+
+#define COMMAND_LINE_SIZE	1024
+
+#endif /* _PARISC_SETUP_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/shmbuf.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/shmbuf.h
new file mode 100644
index 0000000..cd4dbce
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/shmbuf.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_SHMBUF_H
+#define _PARISC_SHMBUF_H
+
+#include <asm/bitsperlong.h>
+
+/* 
+ * The shmid64_ds structure for parisc architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+	struct ipc64_perm	shm_perm;	/* operation perms */
+#if __BITS_PER_LONG != 64
+	unsigned int		__pad1;
+#endif
+	__kernel_time_t		shm_atime;	/* last attach time */
+#if __BITS_PER_LONG != 64
+	unsigned int		__pad2;
+#endif
+	__kernel_time_t		shm_dtime;	/* last detach time */
+#if __BITS_PER_LONG != 64
+	unsigned int		__pad3;
+#endif
+	__kernel_time_t		shm_ctime;	/* last change time */
+#if __BITS_PER_LONG != 64
+	unsigned int		__pad4;
+#endif
+	__kernel_size_t		shm_segsz;	/* size of segment (bytes) */
+	__kernel_pid_t		shm_cpid;	/* pid of creator */
+	__kernel_pid_t		shm_lpid;	/* pid of last operator */
+	unsigned long		shm_nattch;	/* no. of current attaches */
+	unsigned long		__unused1;
+	unsigned long		__unused2;
+};
+
+struct shminfo64 {
+	unsigned long	shmmax;
+	unsigned long	shmmin;
+	unsigned long	shmmni;
+	unsigned long	shmseg;
+	unsigned long	shmall;
+	unsigned long	__unused1;
+	unsigned long	__unused2;
+	unsigned long	__unused3;
+	unsigned long	__unused4;
+};
+
+#endif /* _PARISC_SHMBUF_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sigcontext.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sigcontext.h
new file mode 100644
index 0000000..be404bb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASMPARISC_SIGCONTEXT_H
+#define _ASMPARISC_SIGCONTEXT_H
+
+#define PARISC_SC_FLAG_ONSTACK 1<<0
+#define PARISC_SC_FLAG_IN_SYSCALL 1<<1
+
+/* We will add more stuff here as it becomes necessary, until we know
+   it works. */
+struct sigcontext {
+	unsigned long sc_flags;
+
+	unsigned long sc_gr[32]; /* PSW in sc_gr[0] */
+	unsigned long long sc_fr[32]; /* FIXME, do we need other state info? */
+	unsigned long sc_iasq[2];
+	unsigned long sc_iaoq[2];
+	unsigned long sc_sar; /* cr11 */
+};
+
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/siginfo.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/siginfo.h
new file mode 100644
index 0000000..4a1062e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/siginfo.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_SIGINFO_H
+#define _PARISC_SIGINFO_H
+
+#if defined(__LP64__)
+#define __ARCH_SI_PREAMBLE_SIZE   (4 * sizeof(int))
+#endif
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/signal.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/signal.h
new file mode 100644
index 0000000..d38563a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/signal.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_PARISC_SIGNAL_H
+#define _UAPI_ASM_PARISC_SIGNAL_H
+
+#define SIGHUP		 1
+#define SIGINT		 2
+#define SIGQUIT		 3
+#define SIGILL		 4
+#define SIGTRAP		 5
+#define SIGABRT		 6
+#define SIGIOT		 6
+#define SIGSTKFLT	 7
+#define SIGFPE		 8
+#define SIGKILL		 9
+#define SIGBUS		10
+#define SIGSEGV		11
+#define SIGXCPU		12
+#define SIGPIPE		13
+#define SIGALRM		14
+#define SIGTERM		15
+#define SIGUSR1		16
+#define SIGUSR2		17
+#define SIGCHLD		18
+#define SIGPWR		19
+#define SIGVTALRM	20
+#define SIGPROF		21
+#define SIGIO		22
+#define SIGPOLL		SIGIO
+#define SIGWINCH	23
+#define SIGSTOP		24
+#define SIGTSTP		25
+#define SIGCONT		26
+#define SIGTTIN		27
+#define SIGTTOU		28
+#define SIGURG		29
+#define SIGXFSZ		30
+#define SIGUNUSED	31
+#define SIGSYS		31 /* Linux doesn't use this */
+
+/* These should not be considered constants from userland.  */
+#define SIGRTMIN	32
+#define SIGRTMAX	_NSIG /* it's 44 under HP/UX */
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_ONSTACK	0x00000001
+#define SA_RESETHAND	0x00000004
+#define SA_NOCLDSTOP	0x00000008
+#define SA_SIGINFO	0x00000010
+#define SA_NODEFER	0x00000020
+#define SA_RESTART	0x00000040
+#define SA_NOCLDWAIT	0x00000080
+#define _SA_SIGGFAULT	0x00000100 /* HPUX */
+
+#define SA_NOMASK	SA_NODEFER
+#define SA_ONESHOT	SA_RESETHAND
+
+#define MINSIGSTKSZ	2048
+#define SIGSTKSZ	8192
+
+
+#define SIG_BLOCK          0	/* for blocking signals */
+#define SIG_UNBLOCK        1	/* for unblocking signals */
+#define SIG_SETMASK        2	/* for setting the signal mask */
+
+#define SIG_DFL	((__sighandler_t)0)	/* default signal handling */
+#define SIG_IGN	((__sighandler_t)1)	/* ignore signal */
+#define SIG_ERR	((__sighandler_t)-1)	/* error return from signal */
+
+# ifndef __ASSEMBLY__
+
+#  include <linux/types.h>
+
+/* Avoid too many header ordering problems.  */
+struct siginfo;
+
+/* Type of a signal handler.  */
+#if defined(__LP64__)
+/* function pointers on 64-bit parisc are pointers to little structs and the
+ * compiler doesn't support code which changes or tests the address of
+ * the function in the little struct.  This is really ugly -PB
+ */
+typedef char __user *__sighandler_t;
+#else
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+#endif
+
+typedef struct sigaltstack {
+	void __user *ss_sp;
+	int ss_flags;
+	size_t ss_size;
+} stack_t;
+
+#endif /* !__ASSEMBLY */
+#endif /* _UAPI_ASM_PARISC_SIGNAL_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/socket.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/socket.h
new file mode 100644
index 0000000..1d0fdc3
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/socket.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET	0xffff
+
+#define SO_DEBUG	0x0001
+#define SO_REUSEADDR	0x0004
+#define SO_KEEPALIVE	0x0008
+#define SO_DONTROUTE	0x0010
+#define SO_BROADCAST	0x0020
+#define SO_LINGER	0x0080
+#define SO_OOBINLINE	0x0100
+#define SO_REUSEPORT	0x0200
+#define SO_SNDBUF	0x1001
+#define SO_RCVBUF	0x1002
+#define SO_SNDBUFFORCE	0x100a
+#define SO_RCVBUFFORCE	0x100b
+#define SO_SNDLOWAT	0x1003
+#define SO_RCVLOWAT	0x1004
+#define SO_SNDTIMEO	0x1005
+#define SO_RCVTIMEO	0x1006
+#define SO_ERROR	0x1007
+#define SO_TYPE		0x1008
+#define SO_PROTOCOL	0x1028
+#define SO_DOMAIN	0x1029
+#define SO_PEERNAME	0x2000
+
+#define SO_NO_CHECK	0x400b
+#define SO_PRIORITY	0x400c
+#define SO_BSDCOMPAT	0x400e
+#define SO_PASSCRED	0x4010
+#define SO_PEERCRED	0x4011
+#define SO_TIMESTAMP	0x4012
+#define SCM_TIMESTAMP	SO_TIMESTAMP
+#define SO_TIMESTAMPNS	0x4013
+#define SCM_TIMESTAMPNS	SO_TIMESTAMPNS
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION		0x4016
+#define SO_SECURITY_ENCRYPTION_TRANSPORT	0x4017
+#define SO_SECURITY_ENCRYPTION_NETWORK		0x4018
+
+#define SO_BINDTODEVICE	0x4019
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER        0x401a
+#define SO_DETACH_FILTER        0x401b
+#define SO_GET_FILTER		SO_ATTACH_FILTER
+
+#define SO_ACCEPTCONN		0x401c
+
+#define SO_PEERSEC		0x401d
+#define SO_PASSSEC		0x401e
+
+#define SO_MARK			0x401f
+
+#define SO_TIMESTAMPING		0x4020
+#define SCM_TIMESTAMPING	SO_TIMESTAMPING
+
+#define SO_RXQ_OVFL             0x4021
+
+#define SO_WIFI_STATUS		0x4022
+#define SCM_WIFI_STATUS		SO_WIFI_STATUS
+#define SO_PEEK_OFF		0x4023
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS		0x4024
+
+#define SO_LOCK_FILTER		0x4025
+
+#define SO_SELECT_ERR_QUEUE	0x4026
+
+#define SO_BUSY_POLL		0x4027
+
+#define SO_MAX_PACING_RATE	0x4028
+
+#define SO_BPF_EXTENSIONS	0x4029
+
+#define SO_INCOMING_CPU		0x402A
+
+#define SO_ATTACH_BPF		0x402B
+#define SO_DETACH_BPF		SO_DETACH_FILTER
+
+#define SO_ATTACH_REUSEPORT_CBPF	0x402C
+#define SO_ATTACH_REUSEPORT_EBPF	0x402D
+
+#define SO_CNX_ADVICE		0x402E
+
+#define SCM_TIMESTAMPING_OPT_STATS	0x402F
+
+#define SO_MEMINFO		0x4030
+
+#define SO_INCOMING_NAPI_ID	0x4031
+
+#define SO_COOKIE		0x4032
+
+#define SCM_TIMESTAMPING_PKTINFO	0x4033
+
+#define SO_PEERGROUPS		0x4034
+
+#define SO_ZEROCOPY		0x4035
+
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sockios.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sockios.h
new file mode 100644
index 0000000..66a3ba6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/sockios.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ARCH_PARISC_SOCKIOS__
+#define __ARCH_PARISC_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 	0x8901
+#define SIOCSPGRP	0x8902
+#define FIOGETOWN	0x8903
+#define SIOCGPGRP	0x8904
+#define SIOCATMARK	0x8905
+#define SIOCGSTAMP	0x8906		/* Get stamp (timeval) */
+#define SIOCGSTAMPNS	0x8907		/* Get stamp (timespec) */
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/stat.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/stat.h
new file mode 100644
index 0000000..b5bbf67
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/stat.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_STAT_H
+#define _PARISC_STAT_H
+
+#include <linux/types.h>
+
+struct stat {
+	unsigned int	st_dev;		/* dev_t is 32 bits on parisc */
+	unsigned int	st_ino;		/* 32 bits */
+	unsigned short	st_mode;	/* 16 bits */
+	unsigned short	st_nlink;	/* 16 bits */
+	unsigned short	st_reserved1;	/* old st_uid */
+	unsigned short	st_reserved2;	/* old st_gid */
+	unsigned int	st_rdev;
+	signed int	st_size;
+	signed int	st_atime;
+	unsigned int	st_atime_nsec;
+	signed int	st_mtime;
+	unsigned int	st_mtime_nsec;
+	signed int	st_ctime;
+	unsigned int	st_ctime_nsec;
+	int		st_blksize;
+	int		st_blocks;
+	unsigned int	__unused1;	/* ACL stuff */
+	unsigned int	__unused2;	/* network */
+	unsigned int	__unused3;	/* network */
+	unsigned int	__unused4;	/* cnodes */
+	unsigned short	__unused5;	/* netsite */
+	short		st_fstype;
+	unsigned int	st_realdev;
+	unsigned short	st_basemode;
+	unsigned short	st_spareshort;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned int	st_spare4[3];
+};
+
+#define STAT_HAVE_NSEC
+
+/* This is the struct that 32-bit userspace applications are expecting.
+ * How 64-bit apps are going to be compiled, I have no idea.  But at least
+ * this way, we don't have a wrapper in the kernel.
+ */
+struct stat64 {
+	unsigned long long	st_dev;
+	unsigned int		__pad1;
+
+	unsigned int		__st_ino;	/* Not actually filled in */
+	unsigned int		st_mode;
+	unsigned int		st_nlink;
+	unsigned int		st_uid;
+	unsigned int		st_gid;
+	unsigned long long	st_rdev;
+	unsigned int		__pad2;
+	signed long long	st_size;
+	signed int		st_blksize;
+
+	signed long long	st_blocks;
+	signed int		st_atime;
+	unsigned int		st_atime_nsec;
+	signed int		st_mtime;
+	unsigned int		st_mtime_nsec;
+	signed int		st_ctime;
+	unsigned int		st_ctime_nsec;
+	unsigned long long	st_ino;
+};
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/statfs.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/statfs.h
new file mode 100644
index 0000000..e5de020
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/statfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_STATFS_H
+#define _PARISC_STATFS_H
+
+#define __statfs_word long
+#include <asm-generic/statfs.h>
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/swab.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/swab.h
new file mode 100644
index 0000000..35fb2d1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/swab.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_SWAB_H
+#define _PARISC_SWAB_H
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+	__asm__("dep %0, 15, 8, %0\n\t"		/* deposit 00ab -> 0bab */
+		"shd %%r0, %0, 8, %0"		/* shift 000000ab -> 00ba */
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
+{
+	__asm__("shd %0, %0, 8, %0\n\t"		/* shift xabcxabc -> cxab */
+		"dep %0, 15, 8, %0\n\t"		/* deposit cxab -> cbab */
+		"shd %%r0, %0, 8, %0"		/* shift 0000cbab -> 0cba */
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+	unsigned int temp;
+	__asm__("shd %0, %0, 16, %1\n\t"	/* shift abcdabcd -> cdab */
+		"dep %1, 15, 8, %1\n\t"		/* deposit cdab -> cbab */
+		"shd %0, %1, 8, %0"		/* shift abcdcbab -> dcba */
+		: "=r" (x), "=&r" (temp)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#if __BITS_PER_LONG > 32
+/*
+** From "PA-RISC 2.0 Architecture", HP Professional Books.
+** See Appendix I page 8 , "Endian Byte Swapping".
+**
+** Pretty cool algorithm: (* == zero'd bits)
+**      PERMH   01234567 -> 67452301 into %0
+**      HSHL    67452301 -> 7*5*3*1* into %1
+**      HSHR    67452301 -> *6*4*2*0 into %0
+**      OR      %0 | %1  -> 76543210 into %0 (all done!)
+*/
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+	__u64 temp;
+	__asm__("permh,3210 %0, %0\n\t"
+		"hshl %0, 8, %1\n\t"
+		"hshr,u %0, 8, %0\n\t"
+		"or %1, %0, %0"
+		: "=r" (x), "=&r" (temp)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* __BITS_PER_LONG > 32 */
+
+#endif /* _PARISC_SWAB_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termbits.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termbits.h
new file mode 100644
index 0000000..40e920f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termbits.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ARCH_PARISC_TERMBITS_H__
+#define __ARCH_PARISC_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+typedef unsigned int	tcflag_t;
+
+#define NCCS 19
+struct termios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+};
+
+struct termios2 {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+struct ktermios {
+	tcflag_t c_iflag;		/* input mode flags */
+	tcflag_t c_oflag;		/* output mode flags */
+	tcflag_t c_cflag;		/* control mode flags */
+	tcflag_t c_lflag;		/* local mode flags */
+	cc_t c_line;			/* line discipline */
+	cc_t c_cc[NCCS];		/* control characters */
+	speed_t c_ispeed;		/* input speed */
+	speed_t c_ospeed;		/* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+
+/* c_iflag bits */
+#define IGNBRK	0000001
+#define BRKINT	0000002
+#define IGNPAR	0000004
+#define PARMRK	0000010
+#define INPCK	0000020
+#define ISTRIP	0000040
+#define INLCR	0000100
+#define IGNCR	0000200
+#define ICRNL	0000400
+#define IUCLC	0001000
+#define IXON	0002000
+#define IXANY	0004000
+#define IXOFF	0010000
+#define IMAXBEL	0040000
+#define IUTF8	0100000
+
+/* c_oflag bits */
+#define OPOST	0000001
+#define OLCUC	0000002
+#define ONLCR	0000004
+#define OCRNL	0000010
+#define ONOCR	0000020
+#define ONLRET	0000040
+#define OFILL	0000100
+#define OFDEL	0000200
+#define NLDLY	0000400
+#define   NL0	0000000
+#define   NL1	0000400
+#define CRDLY	0003000
+#define   CR0	0000000
+#define   CR1	0001000
+#define   CR2	0002000
+#define   CR3	0003000
+#define TABDLY	0014000
+#define   TAB0	0000000
+#define   TAB1	0004000
+#define   TAB2	0010000
+#define   TAB3	0014000
+#define   XTABS	0014000
+#define BSDLY	0020000
+#define   BS0	0000000
+#define   BS1	0020000
+#define VTDLY	0040000
+#define   VT0	0000000
+#define   VT1	0040000
+#define FFDLY	0100000
+#define   FF0	0000000
+#define   FF1	0100000
+
+/* c_cflag bit meaning */
+#define CBAUD   0010017
+#define  B0     0000000         /* hang up */
+#define  B50    0000001
+#define  B75    0000002
+#define  B110   0000003
+#define  B134   0000004
+#define  B150   0000005
+#define  B200   0000006
+#define  B300   0000007
+#define  B600   0000010
+#define  B1200  0000011
+#define  B1800  0000012
+#define  B2400  0000013
+#define  B4800  0000014
+#define  B9600  0000015
+#define  B19200 0000016
+#define  B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE   0000060
+#define   CS5   0000000
+#define   CS6   0000020
+#define   CS7   0000040
+#define   CS8   0000060
+#define CSTOPB  0000100
+#define CREAD   0000200
+#define PARENB  0000400
+#define PARODD  0001000
+#define HUPCL   0002000
+#define CLOCAL  0004000
+#define CBAUDEX 0010000
+#define    BOTHER 0010000
+#define    B57600 0010001
+#define   B115200 0010002
+#define   B230400 0010003
+#define   B460800 0010004
+#define   B500000 0010005
+#define   B576000 0010006
+#define   B921600 0010007
+#define  B1000000 0010010
+#define  B1152000 0010011
+#define  B1500000 0010012
+#define  B2000000 0010013
+#define  B2500000 0010014
+#define  B3000000 0010015
+#define  B3500000 0010016
+#define  B4000000 0010017
+#define CIBAUD    002003600000		/* input baud rate */
+#define CMSPAR    010000000000          /* mark or space (stick) parity */
+#define CRTSCTS   020000000000          /* flow control */
+
+#define IBSHIFT	16		/* Shift from CBAUD to CIBAUD */
+
+
+/* c_lflag bits */
+#define ISIG    0000001
+#define ICANON  0000002
+#define XCASE   0000004
+#define ECHO    0000010
+#define ECHOE   0000020
+#define ECHOK   0000040
+#define ECHONL  0000100
+#define NOFLSH  0000200
+#define TOSTOP  0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE  0004000
+#define FLUSHO  0010000
+#define PENDIN  0040000
+#define IEXTEN  0100000
+#define EXTPROC	0200000
+
+/* tcflow() and TCXONC use these */
+#define	TCOOFF		0
+#define	TCOON		1
+#define	TCIOFF		2
+#define	TCION		3
+
+/* tcflush() and TCFLSH use these */
+#define	TCIFLUSH	0
+#define	TCOFLUSH	1
+#define	TCIOFLUSH	2
+
+/* tcsetattr uses these */
+#define	TCSANOW		0
+#define	TCSADRAIN	1
+#define	TCSAFLUSH	2
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termios.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termios.h
new file mode 100644
index 0000000..aba174f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/termios.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_PARISC_TERMIOS_H
+#define _UAPI_PARISC_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+	unsigned short ws_row;
+	unsigned short ws_col;
+	unsigned short ws_xpixel;
+	unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+	unsigned short c_iflag;		/* input mode flags */
+	unsigned short c_oflag;		/* output mode flags */
+	unsigned short c_cflag;		/* control mode flags */
+	unsigned short c_lflag;		/* local mode flags */
+	unsigned char c_line;		/* line discipline */
+	unsigned char c_cc[NCC];	/* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE	0x001
+#define TIOCM_DTR	0x002
+#define TIOCM_RTS	0x004
+#define TIOCM_ST	0x008
+#define TIOCM_SR	0x010
+#define TIOCM_CTS	0x020
+#define TIOCM_CAR	0x040
+#define TIOCM_RNG	0x080
+#define TIOCM_DSR	0x100
+#define TIOCM_CD	TIOCM_CAR
+#define TIOCM_RI	TIOCM_RNG
+#define TIOCM_OUT1	0x2000
+#define TIOCM_OUT2	0x4000
+#define TIOCM_LOOP	0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_PARISC_TERMIOS_H */
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/types.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/types.h
new file mode 100644
index 0000000..28c7d74
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_TYPES_H
+#define _PARISC_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/unistd.h b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/unistd.h
new file mode 100644
index 0000000..4872e77
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/include/uapi/asm/unistd.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_PARISC_UNISTD_H_
+#define _UAPI_ASM_PARISC_UNISTD_H_
+
+/*
+ * Linux system call numbers.
+ *
+ * Cary Coutant says that we should just use another syscall gateway
+ * page to avoid clashing with the HPUX space, and I think he's right:
+ * it will would keep a branch out of our syscall entry path, at the
+ * very least.  If we decide to change it later, we can ``just'' tweak
+ * the LINUX_GATEWAY_ADDR define at the bottom and make __NR_Linux be
+ * 1024 or something.  Oh, and recompile libc. =)
+ */
+
+#define __NR_Linux                0
+#define __NR_restart_syscall      (__NR_Linux + 0)
+#define __NR_exit                 (__NR_Linux + 1)
+#define __NR_fork                 (__NR_Linux + 2)
+#define __NR_read                 (__NR_Linux + 3)
+#define __NR_write                (__NR_Linux + 4)
+#define __NR_open                 (__NR_Linux + 5)
+#define __NR_close                (__NR_Linux + 6)
+#define __NR_waitpid              (__NR_Linux + 7)
+#define __NR_creat                (__NR_Linux + 8)
+#define __NR_link                 (__NR_Linux + 9)
+#define __NR_unlink              (__NR_Linux + 10)
+#define __NR_execve              (__NR_Linux + 11)
+#define __NR_chdir               (__NR_Linux + 12)
+#define __NR_time                (__NR_Linux + 13)
+#define __NR_mknod               (__NR_Linux + 14)
+#define __NR_chmod               (__NR_Linux + 15)
+#define __NR_lchown              (__NR_Linux + 16)
+#define __NR_socket              (__NR_Linux + 17)
+#define __NR_stat                (__NR_Linux + 18)
+#define __NR_lseek               (__NR_Linux + 19)
+#define __NR_getpid              (__NR_Linux + 20)
+#define __NR_mount               (__NR_Linux + 21)
+#define __NR_bind                (__NR_Linux + 22)
+#define __NR_setuid              (__NR_Linux + 23)
+#define __NR_getuid              (__NR_Linux + 24)
+#define __NR_stime               (__NR_Linux + 25)
+#define __NR_ptrace              (__NR_Linux + 26)
+#define __NR_alarm               (__NR_Linux + 27)
+#define __NR_fstat               (__NR_Linux + 28)
+#define __NR_pause               (__NR_Linux + 29)
+#define __NR_utime               (__NR_Linux + 30)
+#define __NR_connect             (__NR_Linux + 31)
+#define __NR_listen              (__NR_Linux + 32)
+#define __NR_access              (__NR_Linux + 33)
+#define __NR_nice                (__NR_Linux + 34)
+#define __NR_accept              (__NR_Linux + 35)
+#define __NR_sync                (__NR_Linux + 36)
+#define __NR_kill                (__NR_Linux + 37)
+#define __NR_rename              (__NR_Linux + 38)
+#define __NR_mkdir               (__NR_Linux + 39)
+#define __NR_rmdir               (__NR_Linux + 40)
+#define __NR_dup                 (__NR_Linux + 41)
+#define __NR_pipe                (__NR_Linux + 42)
+#define __NR_times               (__NR_Linux + 43)
+#define __NR_getsockname         (__NR_Linux + 44)
+#define __NR_brk                 (__NR_Linux + 45)
+#define __NR_setgid              (__NR_Linux + 46)
+#define __NR_getgid              (__NR_Linux + 47)
+#define __NR_signal              (__NR_Linux + 48)
+#define __NR_geteuid             (__NR_Linux + 49)
+#define __NR_getegid             (__NR_Linux + 50)
+#define __NR_acct                (__NR_Linux + 51)
+#define __NR_umount2             (__NR_Linux + 52)
+#define __NR_getpeername         (__NR_Linux + 53)
+#define __NR_ioctl               (__NR_Linux + 54)
+#define __NR_fcntl               (__NR_Linux + 55)
+#define __NR_socketpair          (__NR_Linux + 56)
+#define __NR_setpgid             (__NR_Linux + 57)
+#define __NR_send                (__NR_Linux + 58)
+#define __NR_uname               (__NR_Linux + 59)
+#define __NR_umask               (__NR_Linux + 60)
+#define __NR_chroot              (__NR_Linux + 61)
+#define __NR_ustat               (__NR_Linux + 62)
+#define __NR_dup2                (__NR_Linux + 63)
+#define __NR_getppid             (__NR_Linux + 64)
+#define __NR_getpgrp             (__NR_Linux + 65)
+#define __NR_setsid              (__NR_Linux + 66)
+#define __NR_pivot_root          (__NR_Linux + 67)
+#define __NR_sgetmask            (__NR_Linux + 68)
+#define __NR_ssetmask            (__NR_Linux + 69)
+#define __NR_setreuid            (__NR_Linux + 70)
+#define __NR_setregid            (__NR_Linux + 71)
+#define __NR_mincore             (__NR_Linux + 72)
+#define __NR_sigpending          (__NR_Linux + 73)
+#define __NR_sethostname         (__NR_Linux + 74)
+#define __NR_setrlimit           (__NR_Linux + 75)
+#define __NR_getrlimit           (__NR_Linux + 76)
+#define __NR_getrusage           (__NR_Linux + 77)
+#define __NR_gettimeofday        (__NR_Linux + 78)
+#define __NR_settimeofday        (__NR_Linux + 79)
+#define __NR_getgroups           (__NR_Linux + 80)
+#define __NR_setgroups           (__NR_Linux + 81)
+#define __NR_sendto              (__NR_Linux + 82)
+#define __NR_symlink             (__NR_Linux + 83)
+#define __NR_lstat               (__NR_Linux + 84)
+#define __NR_readlink            (__NR_Linux + 85)
+#define __NR_uselib              (__NR_Linux + 86)
+#define __NR_swapon              (__NR_Linux + 87)
+#define __NR_reboot              (__NR_Linux + 88)
+#define __NR_mmap2               (__NR_Linux + 89)
+#define __NR_mmap                (__NR_Linux + 90)
+#define __NR_munmap              (__NR_Linux + 91)
+#define __NR_truncate            (__NR_Linux + 92)
+#define __NR_ftruncate           (__NR_Linux + 93)
+#define __NR_fchmod              (__NR_Linux + 94)
+#define __NR_fchown              (__NR_Linux + 95)
+#define __NR_getpriority         (__NR_Linux + 96)
+#define __NR_setpriority         (__NR_Linux + 97)
+#define __NR_recv                (__NR_Linux + 98)
+#define __NR_statfs              (__NR_Linux + 99)
+#define __NR_fstatfs            (__NR_Linux + 100)
+#define __NR_stat64             (__NR_Linux + 101)
+/* #define __NR_socketcall         (__NR_Linux + 102) */
+#define __NR_syslog             (__NR_Linux + 103)
+#define __NR_setitimer          (__NR_Linux + 104)
+#define __NR_getitimer          (__NR_Linux + 105)
+#define __NR_capget             (__NR_Linux + 106)
+#define __NR_capset             (__NR_Linux + 107)
+#define __NR_pread64            (__NR_Linux + 108)
+#define __NR_pwrite64           (__NR_Linux + 109)
+#define __NR_getcwd             (__NR_Linux + 110)
+#define __NR_vhangup            (__NR_Linux + 111)
+#define __NR_fstat64            (__NR_Linux + 112)
+#define __NR_vfork              (__NR_Linux + 113)
+#define __NR_wait4              (__NR_Linux + 114)
+#define __NR_swapoff            (__NR_Linux + 115)
+#define __NR_sysinfo            (__NR_Linux + 116)
+#define __NR_shutdown           (__NR_Linux + 117)
+#define __NR_fsync              (__NR_Linux + 118)
+#define __NR_madvise            (__NR_Linux + 119)
+#define __NR_clone              (__NR_Linux + 120)
+#define __NR_setdomainname      (__NR_Linux + 121)
+#define __NR_sendfile           (__NR_Linux + 122)
+#define __NR_recvfrom           (__NR_Linux + 123)
+#define __NR_adjtimex           (__NR_Linux + 124)
+#define __NR_mprotect           (__NR_Linux + 125)
+#define __NR_sigprocmask        (__NR_Linux + 126)
+#define __NR_create_module      (__NR_Linux + 127) /* not used */
+#define __NR_init_module        (__NR_Linux + 128)
+#define __NR_delete_module      (__NR_Linux + 129)
+#define __NR_get_kernel_syms    (__NR_Linux + 130) /* not used */
+#define __NR_quotactl           (__NR_Linux + 131)
+#define __NR_getpgid            (__NR_Linux + 132)
+#define __NR_fchdir             (__NR_Linux + 133)
+#define __NR_bdflush            (__NR_Linux + 134)
+#define __NR_sysfs              (__NR_Linux + 135)
+#define __NR_personality        (__NR_Linux + 136)
+#define __NR_afs_syscall        (__NR_Linux + 137) /* not used */
+#define __NR_setfsuid           (__NR_Linux + 138)
+#define __NR_setfsgid           (__NR_Linux + 139)
+#define __NR__llseek            (__NR_Linux + 140)
+#define __NR_getdents           (__NR_Linux + 141)
+#define __NR__newselect         (__NR_Linux + 142)
+#define __NR_flock              (__NR_Linux + 143)
+#define __NR_msync              (__NR_Linux + 144)
+#define __NR_readv              (__NR_Linux + 145)
+#define __NR_writev             (__NR_Linux + 146)
+#define __NR_getsid             (__NR_Linux + 147)
+#define __NR_fdatasync          (__NR_Linux + 148)
+#define __NR__sysctl            (__NR_Linux + 149)
+#define __NR_mlock              (__NR_Linux + 150)
+#define __NR_munlock            (__NR_Linux + 151)
+#define __NR_mlockall           (__NR_Linux + 152)
+#define __NR_munlockall         (__NR_Linux + 153)
+#define __NR_sched_setparam             (__NR_Linux + 154)
+#define __NR_sched_getparam             (__NR_Linux + 155)
+#define __NR_sched_setscheduler         (__NR_Linux + 156)
+#define __NR_sched_getscheduler         (__NR_Linux + 157)
+#define __NR_sched_yield                (__NR_Linux + 158)
+#define __NR_sched_get_priority_max     (__NR_Linux + 159)
+#define __NR_sched_get_priority_min     (__NR_Linux + 160)
+#define __NR_sched_rr_get_interval      (__NR_Linux + 161)
+#define __NR_nanosleep          (__NR_Linux + 162)
+#define __NR_mremap             (__NR_Linux + 163)
+#define __NR_setresuid          (__NR_Linux + 164)
+#define __NR_getresuid          (__NR_Linux + 165)
+#define __NR_sigaltstack        (__NR_Linux + 166)
+#define __NR_query_module       (__NR_Linux + 167) /* not used */
+#define __NR_poll               (__NR_Linux + 168)
+#define __NR_nfsservctl         (__NR_Linux + 169) /* not used */
+#define __NR_setresgid          (__NR_Linux + 170)
+#define __NR_getresgid          (__NR_Linux + 171)
+#define __NR_prctl              (__NR_Linux + 172)
+#define __NR_rt_sigreturn       (__NR_Linux + 173)
+#define __NR_rt_sigaction       (__NR_Linux + 174)
+#define __NR_rt_sigprocmask     (__NR_Linux + 175)
+#define __NR_rt_sigpending      (__NR_Linux + 176)
+#define __NR_rt_sigtimedwait    (__NR_Linux + 177)
+#define __NR_rt_sigqueueinfo    (__NR_Linux + 178)
+#define __NR_rt_sigsuspend      (__NR_Linux + 179)
+#define __NR_chown              (__NR_Linux + 180)
+#define __NR_setsockopt         (__NR_Linux + 181)
+#define __NR_getsockopt         (__NR_Linux + 182)
+#define __NR_sendmsg            (__NR_Linux + 183)
+#define __NR_recvmsg            (__NR_Linux + 184)
+#define __NR_semop              (__NR_Linux + 185)
+#define __NR_semget             (__NR_Linux + 186)
+#define __NR_semctl             (__NR_Linux + 187)
+#define __NR_msgsnd             (__NR_Linux + 188)
+#define __NR_msgrcv             (__NR_Linux + 189)
+#define __NR_msgget             (__NR_Linux + 190)
+#define __NR_msgctl             (__NR_Linux + 191)
+#define __NR_shmat              (__NR_Linux + 192)
+#define __NR_shmdt              (__NR_Linux + 193)
+#define __NR_shmget             (__NR_Linux + 194)
+#define __NR_shmctl             (__NR_Linux + 195)
+#define __NR_getpmsg            (__NR_Linux + 196) /* not used */
+#define __NR_putpmsg            (__NR_Linux + 197) /* not used */
+#define __NR_lstat64            (__NR_Linux + 198)
+#define __NR_truncate64         (__NR_Linux + 199)
+#define __NR_ftruncate64        (__NR_Linux + 200)
+#define __NR_getdents64         (__NR_Linux + 201)
+#define __NR_fcntl64            (__NR_Linux + 202)
+#define __NR_attrctl            (__NR_Linux + 203) /* not used */
+#define __NR_acl_get            (__NR_Linux + 204) /* not used */
+#define __NR_acl_set            (__NR_Linux + 205) /* not used */
+#define __NR_gettid             (__NR_Linux + 206)
+#define __NR_readahead          (__NR_Linux + 207)
+#define __NR_tkill              (__NR_Linux + 208)
+#define __NR_sendfile64         (__NR_Linux + 209)
+#define __NR_futex              (__NR_Linux + 210)
+#define __NR_sched_setaffinity  (__NR_Linux + 211)
+#define __NR_sched_getaffinity  (__NR_Linux + 212)
+#define __NR_set_thread_area    (__NR_Linux + 213) /* not used */
+#define __NR_get_thread_area    (__NR_Linux + 214) /* not used */
+#define __NR_io_setup           (__NR_Linux + 215)
+#define __NR_io_destroy         (__NR_Linux + 216)
+#define __NR_io_getevents       (__NR_Linux + 217)
+#define __NR_io_submit          (__NR_Linux + 218)
+#define __NR_io_cancel          (__NR_Linux + 219)
+#define __NR_alloc_hugepages    (__NR_Linux + 220) /* not used */
+#define __NR_free_hugepages     (__NR_Linux + 221) /* not used */
+#define __NR_exit_group         (__NR_Linux + 222)
+#define __NR_lookup_dcookie     (__NR_Linux + 223)
+#define __NR_epoll_create       (__NR_Linux + 224)
+#define __NR_epoll_ctl          (__NR_Linux + 225)
+#define __NR_epoll_wait         (__NR_Linux + 226)
+#define __NR_remap_file_pages   (__NR_Linux + 227)
+#define __NR_semtimedop         (__NR_Linux + 228)
+#define __NR_mq_open            (__NR_Linux + 229)
+#define __NR_mq_unlink          (__NR_Linux + 230)
+#define __NR_mq_timedsend       (__NR_Linux + 231)
+#define __NR_mq_timedreceive    (__NR_Linux + 232)
+#define __NR_mq_notify          (__NR_Linux + 233)
+#define __NR_mq_getsetattr      (__NR_Linux + 234)
+#define __NR_waitid		(__NR_Linux + 235)
+#define __NR_fadvise64_64	(__NR_Linux + 236)
+#define __NR_set_tid_address	(__NR_Linux + 237)
+#define __NR_setxattr		(__NR_Linux + 238)
+#define __NR_lsetxattr		(__NR_Linux + 239)
+#define __NR_fsetxattr		(__NR_Linux + 240)
+#define __NR_getxattr		(__NR_Linux + 241)
+#define __NR_lgetxattr		(__NR_Linux + 242)
+#define __NR_fgetxattr		(__NR_Linux + 243)
+#define __NR_listxattr		(__NR_Linux + 244)
+#define __NR_llistxattr		(__NR_Linux + 245)
+#define __NR_flistxattr		(__NR_Linux + 246)
+#define __NR_removexattr	(__NR_Linux + 247)
+#define __NR_lremovexattr	(__NR_Linux + 248)
+#define __NR_fremovexattr	(__NR_Linux + 249)
+#define __NR_timer_create	(__NR_Linux + 250)
+#define __NR_timer_settime	(__NR_Linux + 251)
+#define __NR_timer_gettime	(__NR_Linux + 252)
+#define __NR_timer_getoverrun	(__NR_Linux + 253)
+#define __NR_timer_delete	(__NR_Linux + 254)
+#define __NR_clock_settime	(__NR_Linux + 255)
+#define __NR_clock_gettime	(__NR_Linux + 256)
+#define __NR_clock_getres	(__NR_Linux + 257)
+#define __NR_clock_nanosleep	(__NR_Linux + 258)
+#define __NR_tgkill		(__NR_Linux + 259)
+#define __NR_mbind		(__NR_Linux + 260)
+#define __NR_get_mempolicy	(__NR_Linux + 261)
+#define __NR_set_mempolicy	(__NR_Linux + 262)
+#define __NR_vserver		(__NR_Linux + 263) /* not used */
+#define __NR_add_key		(__NR_Linux + 264)
+#define __NR_request_key	(__NR_Linux + 265)
+#define __NR_keyctl		(__NR_Linux + 266)
+#define __NR_ioprio_set		(__NR_Linux + 267)
+#define __NR_ioprio_get		(__NR_Linux + 268)
+#define __NR_inotify_init	(__NR_Linux + 269)
+#define __NR_inotify_add_watch	(__NR_Linux + 270)
+#define __NR_inotify_rm_watch	(__NR_Linux + 271)
+#define __NR_migrate_pages	(__NR_Linux + 272)
+#define __NR_pselect6		(__NR_Linux + 273)
+#define __NR_ppoll		(__NR_Linux + 274)
+#define __NR_openat		(__NR_Linux + 275)
+#define __NR_mkdirat		(__NR_Linux + 276)
+#define __NR_mknodat		(__NR_Linux + 277)
+#define __NR_fchownat		(__NR_Linux + 278)
+#define __NR_futimesat		(__NR_Linux + 279)
+#define __NR_fstatat64		(__NR_Linux + 280)
+#define __NR_unlinkat		(__NR_Linux + 281)
+#define __NR_renameat		(__NR_Linux + 282)
+#define __NR_linkat		(__NR_Linux + 283)
+#define __NR_symlinkat		(__NR_Linux + 284)
+#define __NR_readlinkat		(__NR_Linux + 285)
+#define __NR_fchmodat		(__NR_Linux + 286)
+#define __NR_faccessat		(__NR_Linux + 287)
+#define __NR_unshare		(__NR_Linux + 288)
+#define __NR_set_robust_list	(__NR_Linux + 289)
+#define __NR_get_robust_list	(__NR_Linux + 290)
+#define __NR_splice		(__NR_Linux + 291)
+#define __NR_sync_file_range	(__NR_Linux + 292)
+#define __NR_tee		(__NR_Linux + 293)
+#define __NR_vmsplice		(__NR_Linux + 294)
+#define __NR_move_pages		(__NR_Linux + 295)
+#define __NR_getcpu		(__NR_Linux + 296)
+#define __NR_epoll_pwait	(__NR_Linux + 297)
+#define __NR_statfs64		(__NR_Linux + 298)
+#define __NR_fstatfs64		(__NR_Linux + 299)
+#define __NR_kexec_load		(__NR_Linux + 300)
+#define __NR_utimensat		(__NR_Linux + 301)
+#define __NR_signalfd		(__NR_Linux + 302)
+#define __NR_timerfd		(__NR_Linux + 303) /* not used */
+#define __NR_eventfd		(__NR_Linux + 304)
+#define __NR_fallocate		(__NR_Linux + 305)
+#define __NR_timerfd_create	(__NR_Linux + 306)
+#define __NR_timerfd_settime	(__NR_Linux + 307)
+#define __NR_timerfd_gettime	(__NR_Linux + 308)
+#define __NR_signalfd4		(__NR_Linux + 309)
+#define __NR_eventfd2		(__NR_Linux + 310)
+#define __NR_epoll_create1	(__NR_Linux + 311)
+#define __NR_dup3		(__NR_Linux + 312)
+#define __NR_pipe2		(__NR_Linux + 313)
+#define __NR_inotify_init1	(__NR_Linux + 314)
+#define __NR_preadv		(__NR_Linux + 315)
+#define __NR_pwritev		(__NR_Linux + 316)
+#define __NR_rt_tgsigqueueinfo	(__NR_Linux + 317)
+#define __NR_perf_event_open	(__NR_Linux + 318)
+#define __NR_recvmmsg		(__NR_Linux + 319)
+#define __NR_accept4		(__NR_Linux + 320)
+#define __NR_prlimit64		(__NR_Linux + 321)
+#define __NR_fanotify_init	(__NR_Linux + 322)
+#define __NR_fanotify_mark	(__NR_Linux + 323)
+#define __NR_clock_adjtime	(__NR_Linux + 324)
+#define __NR_name_to_handle_at	(__NR_Linux + 325)
+#define __NR_open_by_handle_at	(__NR_Linux + 326)
+#define __NR_syncfs		(__NR_Linux + 327)
+#define __NR_setns		(__NR_Linux + 328)
+#define __NR_sendmmsg		(__NR_Linux + 329)
+#define __NR_process_vm_readv	(__NR_Linux + 330)
+#define __NR_process_vm_writev	(__NR_Linux + 331)
+#define __NR_kcmp		(__NR_Linux + 332)
+#define __NR_finit_module	(__NR_Linux + 333)
+#define __NR_sched_setattr	(__NR_Linux + 334)
+#define __NR_sched_getattr	(__NR_Linux + 335)
+#define __NR_utimes		(__NR_Linux + 336)
+#define __NR_renameat2		(__NR_Linux + 337)
+#define __NR_seccomp		(__NR_Linux + 338)
+#define __NR_getrandom		(__NR_Linux + 339)
+#define __NR_memfd_create	(__NR_Linux + 340)
+#define __NR_bpf		(__NR_Linux + 341)
+#define __NR_execveat		(__NR_Linux + 342)
+#define __NR_membarrier		(__NR_Linux + 343)
+#define __NR_userfaultfd	(__NR_Linux + 344)
+#define __NR_mlock2		(__NR_Linux + 345)
+#define __NR_copy_file_range	(__NR_Linux + 346)
+#define __NR_preadv2		(__NR_Linux + 347)
+#define __NR_pwritev2		(__NR_Linux + 348)
+#define __NR_statx		(__NR_Linux + 349)
+
+#define __NR_Linux_syscalls	(__NR_statx + 1)
+
+
+#define __IGNORE_select		/* newselect */
+#define __IGNORE_fadvise64	/* fadvise64_64 */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
+#define LINUX_GATEWAY_ADDR      0x100
+
+#endif /* _UAPI_ASM_PARISC_UNISTD_H_ */
diff --git a/src/kernel/linux/v4.14/arch/parisc/install.sh b/src/kernel/linux/v4.14/arch/parisc/install.sh
new file mode 100644
index 0000000..6f68784
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/install.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+#
+# arch/parisc/install.sh, derived from arch/i386/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for i386 architecture
+#
+# Arguments:
+#   $1 - kernel version
+#   $2 - kernel image file
+#   $3 - kernel map file
+#   $4 - default install path (blank if root directory)
+#
+
+verify () {
+	if [ ! -f "$1" ]; then
+		echo ""                                                   1>&2
+		echo " *** Missing file: $1"                              1>&2
+		echo ' *** You need to run "make" before "make install".' 1>&2
+		echo ""                                                   1>&2
+		exit 1
+	fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+
+if [ -n "${INSTALLKERNEL}" ]; then
+  if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
+
+# Default install
+
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/.gitignore b/src/kernel/linux/v4.14/arch/parisc/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/Makefile b/src/kernel/linux/v4.14/arch/parisc/kernel/Makefile
new file mode 100644
index 0000000..649dc3e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for arch/parisc/kernel
+#
+
+extra-y			:= head.o vmlinux.lds
+
+obj-y	     	:= cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
+		   pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
+		   ptrace.o hardware.o inventory.o drivers.o \
+		   signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
+		   process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
+		   topology.o
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_cache.o = -pg
+CFLAGS_REMOVE_perf.o = -pg
+CFLAGS_REMOVE_unwind.o = -pg
+endif
+
+obj-$(CONFIG_SMP)	+= smp.o
+obj-$(CONFIG_PA11)	+= pci-dma.o
+obj-$(CONFIG_PCI)	+= pci.o
+obj-$(CONFIG_MODULES)	+= module.o
+obj-$(CONFIG_64BIT)	+= binfmt_elf32.o sys_parisc32.o signal32.o
+obj-$(CONFIG_STACKTRACE)+= stacktrace.o
+obj-$(CONFIG_AUDIT)	+= audit.o
+obj64-$(CONFIG_AUDIT)	+= compat_audit.o
+# only supported for PCX-W/U in 64-bit mode at the moment
+obj-$(CONFIG_64BIT)	+= perf.o perf_asm.o $(obj64-y)
+obj-$(CONFIG_FUNCTION_TRACER)		+= ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/asm-offsets.c b/src/kernel/linux/v4.14/arch/parisc/kernel/asm-offsets.c
new file mode 100644
index 0000000..dfff8a0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/asm-offsets.c
@@ -0,0 +1,304 @@
+/* 
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ *    Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ *    Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ *    Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
+ *    Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ *    Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
+ *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/kbuild.h>
+
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define FRAME_SIZE	128
+#else
+#define FRAME_SIZE	64
+#endif
+#define FRAME_ALIGN	64
+
+/* Add FRAME_SIZE to the size x and align it to y. All definitions
+ * that use align_frame will include space for a frame.
+ */
+#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
+
+int main(void)
+{
+	DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+	DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+	DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+	DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
+	DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+	DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
+	DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+	BLANK();
+	DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
+	DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
+	DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
+	DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
+	DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
+	DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
+	DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
+	DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
+	DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
+	DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
+	DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
+	DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
+	DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
+	DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
+	DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
+	DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
+	DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
+	DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
+	DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
+	DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
+	DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
+	DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
+	DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
+	DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
+	DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
+	DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
+	DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
+	DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
+	DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
+	DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
+	DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
+	DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
+	DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
+	DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
+	DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
+	DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
+	DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
+	DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
+	DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
+	DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
+	DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
+	DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
+	DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
+	DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
+	DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
+	DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
+	DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
+	DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
+	DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
+	DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
+	DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
+	DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
+	DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
+	DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
+	DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
+	DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
+	DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
+	DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
+	DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
+	DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
+	DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
+	DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
+	DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
+	DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
+	DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
+	DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
+	DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
+	DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
+	DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
+	DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
+	DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
+	DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
+	DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
+	DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
+	DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
+	DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
+	DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
+	DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
+	DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
+	DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
+	DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
+	DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
+	DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
+	DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
+	DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
+	BLANK();
+	DEFINE(TASK_SZ, sizeof(struct task_struct));
+	/* TASK_SZ_ALGN includes space for a stack frame. */
+	DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
+	BLANK();
+	DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
+	DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
+	DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
+	DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
+	DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
+	DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
+	DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
+	DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
+	DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
+	DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
+	DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
+	DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
+	DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
+	DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
+	DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
+	DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
+	DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
+	DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
+	DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
+	DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
+	DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
+	DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
+	DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
+	DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
+	DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
+	DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
+	DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
+	DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
+	DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
+	DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
+	DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
+	DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
+	DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
+	DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
+	DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
+	DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
+	DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
+	DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
+	DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
+	DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
+	DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
+	DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
+	DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
+	DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
+	DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
+	DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
+	DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
+	DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
+	DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
+	DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
+	DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
+	DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
+	DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
+	DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
+	DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
+	DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
+	DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
+	DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
+	DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
+	DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
+	DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
+	DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
+	DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
+	DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
+	DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
+	DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
+	DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
+	DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
+	DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
+	DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
+	DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
+	DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
+	DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
+	DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
+	DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
+	DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
+	DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
+	DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
+	DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
+	DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
+	DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
+	DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
+	DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
+	DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	/* PT_SZ_ALGN includes space for a stack frame. */
+	DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
+	BLANK();
+	DEFINE(TI_TASK, offsetof(struct thread_info, task));
+	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+	DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
+	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+	DEFINE(THREAD_SZ, sizeof(struct thread_info));
+	/* THREAD_SZ_ALGN includes space for a stack frame. */
+	DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
+	BLANK();
+	DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
+	DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+	DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
+	DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
+	DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
+	DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
+	DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
+	DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
+	DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
+	DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
+	DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
+	DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
+	DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
+	DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
+	DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
+	DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
+	DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
+	DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
+	DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
+	DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
+	DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
+	DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
+	BLANK();
+	DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
+	DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
+	BLANK();
+	DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
+	DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
+	DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
+	DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
+	DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
+	DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
+	DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
+	DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
+	DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
+	DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
+	DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
+	DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
+	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
+	BLANK();
+	/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
+	 * and kernel data on physical huge pages */
+#ifdef CONFIG_HUGETLB_PAGE
+	DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#else
+	DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
+#endif
+	BLANK();
+	DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
+	BLANK();
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/audit.c b/src/kernel/linux/v4.14/arch/parisc/kernel/audit.c
new file mode 100644
index 0000000..9eb47b2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/audit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+#ifdef CONFIG_COMPAT
+	if (arch == AUDIT_ARCH_PARISC)
+		return 1;
+#endif
+	return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_COMPAT
+	extern int parisc32_classify_syscall(unsigned);
+	if (abi == AUDIT_ARCH_PARISC)
+		return parisc32_classify_syscall(syscall);
+#endif
+	switch (syscall) {
+	case __NR_open:
+		return 2;
+	case __NR_openat:
+		return 3;
+	case __NR_execve:
+		return 5;
+	default:
+		return 0;
+	}
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_COMPAT
+	extern __u32 parisc32_dir_class[];
+	extern __u32 parisc32_write_class[];
+	extern __u32 parisc32_read_class[];
+	extern __u32 parisc32_chattr_class[];
+	extern __u32 parisc32_signal_class[];
+	audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
+	audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
+	audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
+	audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
+#endif
+	audit_register_class(AUDIT_CLASS_WRITE, write_class);
+	audit_register_class(AUDIT_CLASS_READ, read_class);
+	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+	audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+	return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/binfmt_elf32.c b/src/kernel/linux/v4.14/arch/parisc/kernel/binfmt_elf32.c
new file mode 100644
index 0000000..20dfa08
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/binfmt_elf32.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
+ *
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2000 Hewlett Packard Co.
+ *
+ * Heavily inspired from various other efforts to do the same thing
+ * (ia64,sparc64/mips64)
+ */
+
+/* Make sure include/asm-parisc/elf.h does the right thing */
+
+#define ELF_CLASS	ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dst, pt)	\
+	memset(dst, 0, sizeof(dst));	/* don't leak any "random" bits */ \
+	{	int i; \
+		for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
+		for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
+	} \
+	dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
+	dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
+	dst[44] = (elf_greg_t) pt->sar;   dst[45] = (elf_greg_t) pt->iir; \
+	dst[46] = (elf_greg_t) pt->isr;   dst[47] = (elf_greg_t) pt->ior; \
+	dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
+	dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
+	dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
+	dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
+	dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
+	dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
+	dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
+	dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
+
+
+typedef unsigned int elf_greg_t;
+
+#include <linux/spinlock.h>
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h>		/* struct compat_timeval */
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+	struct elf_siginfo pr_info;	/* Info associated with signal */
+	short	pr_cursig;		/* Current signal */
+	unsigned int pr_sigpend;	/* Set of pending signals */
+	unsigned int pr_sighold;	/* Set of held signals */
+	pid_t	pr_pid;
+	pid_t	pr_ppid;
+	pid_t	pr_pgrp;
+	pid_t	pr_sid;
+	struct compat_timeval pr_utime;		/* User time */
+	struct compat_timeval pr_stime;		/* System time */
+	struct compat_timeval pr_cutime;	/* Cumulative user time */
+	struct compat_timeval pr_cstime;	/* Cumulative system time */
+	elf_gregset_t pr_reg;	/* GP registers */
+	int pr_fpvalid;		/* True if math co-processor being used.  */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+	char	pr_state;	/* numeric process state */
+	char	pr_sname;	/* char for pr_state */
+	char	pr_zomb;	/* zombie */
+	char	pr_nice;	/* nice val */
+	unsigned int pr_flag;	/* flags */
+	u16	pr_uid;
+	u16	pr_gid;
+	pid_t	pr_pid, pr_ppid, pr_pgrp, pr_sid;
+	/* Lots missing */
+	char	pr_fname[16];	/* filename of executable */
+	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
+};
+
+#define init_elf_binfmt init_elf32_binfmt
+
+#define ELF_PLATFORM  ("PARISC32\0")
+
+/*
+ * We should probably use this macro to set a flag somewhere to indicate
+ * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
+ * could set a processor dependent flag in the thread_struct.
+ */
+
+#undef SET_PERSONALITY
+#define SET_PERSONALITY(ex) \
+	set_thread_flag(TIF_32BIT); \
+	current->thread.map_base = DEFAULT_MAP_BASE32; \
+	current->thread.task_size = DEFAULT_TASK_SIZE32 \
+
+#undef ns_to_timeval
+#define ns_to_timeval ns_to_compat_timeval
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/cache.c b/src/kernel/linux/v4.14/arch/parisc/kernel/cache.c
new file mode 100644
index 0000000..e3b4554
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/cache.c
@@ -0,0 +1,661 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
+ * Copyright (C) 1999 SuSE GmbH Nuernberg
+ * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
+ *
+ * Cache and TLB management
+ *
+ */
+ 
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <asm/pdc.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/shmparam.h>
+
+int split_tlb __read_mostly;
+int dcache_stride __read_mostly;
+int icache_stride __read_mostly;
+EXPORT_SYMBOL(dcache_stride);
+
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+EXPORT_SYMBOL(flush_dcache_page_asm);
+void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
+
+/* On some machines (e.g. ones with the Merced bus), there can be
+ * only a single PxTLB broadcast at a time; this must be guaranteed
+ * by software.  We put a spinlock around all TLB flushes  to
+ * ensure this.
+ */
+DEFINE_SPINLOCK(pa_tlb_lock);
+
+struct pdc_cache_info cache_info __read_mostly;
+#ifndef CONFIG_PA20
+static struct pdc_btlb_info btlb_info __read_mostly;
+#endif
+
+#ifdef CONFIG_SMP
+void
+flush_data_cache(void)
+{
+	on_each_cpu(flush_data_cache_local, NULL, 1);
+}
+void 
+flush_instruction_cache(void)
+{
+	on_each_cpu(flush_instruction_cache_local, NULL, 1);
+}
+#endif
+
+void
+flush_cache_all_local(void)
+{
+	flush_instruction_cache_local(NULL);
+	flush_data_cache_local(NULL);
+}
+EXPORT_SYMBOL(flush_cache_all_local);
+
+/* Virtual address of pfn.  */
+#define pfn_va(pfn)	__va(PFN_PHYS(pfn))
+
+void
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+	unsigned long pfn = pte_pfn(*ptep);
+	struct page *page;
+
+	/* We don't have pte special.  As a result, we can be called with
+	   an invalid pfn and we don't need to flush the kernel dcache page.
+	   This occurs with FireGL card in C8000.  */
+	if (!pfn_valid(pfn))
+		return;
+
+	page = pfn_to_page(pfn);
+	if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+		flush_kernel_dcache_page_addr(pfn_va(pfn));
+		clear_bit(PG_dcache_dirty, &page->flags);
+	} else if (parisc_requires_coherency())
+		flush_kernel_dcache_page_addr(pfn_va(pfn));
+}
+
+void
+show_cache_info(struct seq_file *m)
+{
+	char buf[32];
+
+	seq_printf(m, "I-cache\t\t: %ld KB\n", 
+		cache_info.ic_size/1024 );
+	if (cache_info.dc_loop != 1)
+		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
+	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
+		cache_info.dc_size/1024,
+		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
+		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
+		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
+	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
+		cache_info.it_size,
+		cache_info.dt_size,
+		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
+	);
+		
+#ifndef CONFIG_PA20
+	/* BTLB - Block TLB */
+	if (btlb_info.max_size==0) {
+		seq_printf(m, "BTLB\t\t: not supported\n" );
+	} else {
+		seq_printf(m, 
+		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
+		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
+		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
+		btlb_info.max_size, (int)4096,
+		btlb_info.max_size>>8,
+		btlb_info.fixed_range_info.num_i,
+		btlb_info.fixed_range_info.num_d,
+		btlb_info.fixed_range_info.num_comb, 
+		btlb_info.variable_range_info.num_i,
+		btlb_info.variable_range_info.num_d,
+		btlb_info.variable_range_info.num_comb
+		);
+	}
+#endif
+}
+
+void __init 
+parisc_cache_init(void)
+{
+	if (pdc_cache_info(&cache_info) < 0)
+		panic("parisc_cache_init: pdc_cache_info failed");
+
+#if 0
+	printk("ic_size %lx dc_size %lx it_size %lx\n",
+		cache_info.ic_size,
+		cache_info.dc_size,
+		cache_info.it_size);
+
+	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+		cache_info.dc_base,
+		cache_info.dc_stride,
+		cache_info.dc_count,
+		cache_info.dc_loop);
+
+	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
+		*(unsigned long *) (&cache_info.dc_conf),
+		cache_info.dc_conf.cc_alias,
+		cache_info.dc_conf.cc_block,
+		cache_info.dc_conf.cc_line,
+		cache_info.dc_conf.cc_shift);
+	printk("	wt %d sh %d cst %d hv %d\n",
+		cache_info.dc_conf.cc_wt,
+		cache_info.dc_conf.cc_sh,
+		cache_info.dc_conf.cc_cst,
+		cache_info.dc_conf.cc_hv);
+
+	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+		cache_info.ic_base,
+		cache_info.ic_stride,
+		cache_info.ic_count,
+		cache_info.ic_loop);
+
+	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
+		cache_info.it_sp_base,
+		cache_info.it_sp_stride,
+		cache_info.it_sp_count,
+		cache_info.it_loop,
+		cache_info.it_off_base,
+		cache_info.it_off_stride,
+		cache_info.it_off_count);
+
+	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
+		cache_info.dt_sp_base,
+		cache_info.dt_sp_stride,
+		cache_info.dt_sp_count,
+		cache_info.dt_loop,
+		cache_info.dt_off_base,
+		cache_info.dt_off_stride,
+		cache_info.dt_off_count);
+
+	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
+		*(unsigned long *) (&cache_info.ic_conf),
+		cache_info.ic_conf.cc_alias,
+		cache_info.ic_conf.cc_block,
+		cache_info.ic_conf.cc_line,
+		cache_info.ic_conf.cc_shift);
+	printk("	wt %d sh %d cst %d hv %d\n",
+		cache_info.ic_conf.cc_wt,
+		cache_info.ic_conf.cc_sh,
+		cache_info.ic_conf.cc_cst,
+		cache_info.ic_conf.cc_hv);
+
+	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
+		cache_info.dt_conf.tc_sh,
+		cache_info.dt_conf.tc_page,
+		cache_info.dt_conf.tc_cst,
+		cache_info.dt_conf.tc_aid,
+		cache_info.dt_conf.tc_sr);
+
+	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
+		cache_info.it_conf.tc_sh,
+		cache_info.it_conf.tc_page,
+		cache_info.it_conf.tc_cst,
+		cache_info.it_conf.tc_aid,
+		cache_info.it_conf.tc_sr);
+#endif
+
+	split_tlb = 0;
+	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
+		if (cache_info.dt_conf.tc_sh == 2)
+			printk(KERN_WARNING "Unexpected TLB configuration. "
+			"Will flush I/D separately (could be optimized).\n");
+
+		split_tlb = 1;
+	}
+
+	/* "New and Improved" version from Jim Hull 
+	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
+	 * The following CAFL_STRIDE is an optimized version, see
+	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
+	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
+	 */
+#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
+	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
+	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
+#undef CAFL_STRIDE
+
+#ifndef CONFIG_PA20
+	if (pdc_btlb_info(&btlb_info) < 0) {
+		memset(&btlb_info, 0, sizeof btlb_info);
+	}
+#endif
+
+	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
+						PDC_MODEL_NVA_UNSUPPORTED) {
+		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
+#if 0
+		panic("SMP kernel required to avoid non-equivalent aliasing");
+#endif
+	}
+}
+
+void disable_sr_hashing(void)
+{
+	int srhash_type, retval;
+	unsigned long space_bits;
+
+	switch (boot_cpu_data.cpu_type) {
+	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
+		BUG();
+		return;
+
+	case pcxs:
+	case pcxt:
+	case pcxt_:
+		srhash_type = SRHASH_PCXST;
+		break;
+
+	case pcxl:
+		srhash_type = SRHASH_PCXL;
+		break;
+
+	case pcxl2: /* pcxl2 doesn't support space register hashing */
+		return;
+
+	default: /* Currently all PA2.0 machines use the same ins. sequence */
+		srhash_type = SRHASH_PA20;
+		break;
+	}
+
+	disable_sr_hashing_asm(srhash_type);
+
+	retval = pdc_spaceid_bits(&space_bits);
+	/* If this procedure isn't implemented, don't panic. */
+	if (retval < 0 && retval != PDC_BAD_OPTION)
+		panic("pdc_spaceid_bits call failed.\n");
+	if (space_bits != 0)
+		panic("SpaceID hashing is still on!\n");
+}
+
+static inline void
+__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+		   unsigned long physaddr)
+{
+	preempt_disable();
+	flush_dcache_page_asm(physaddr, vmaddr);
+	if (vma->vm_flags & VM_EXEC)
+		flush_icache_page_asm(physaddr, vmaddr);
+	preempt_enable();
+}
+
+void flush_dcache_page(struct page *page)
+{
+	struct address_space *mapping = page_mapping(page);
+	struct vm_area_struct *mpnt;
+	unsigned long offset;
+	unsigned long addr, old_addr = 0;
+	pgoff_t pgoff;
+
+	if (mapping && !mapping_mapped(mapping)) {
+		set_bit(PG_dcache_dirty, &page->flags);
+		return;
+	}
+
+	flush_kernel_dcache_page(page);
+
+	if (!mapping)
+		return;
+
+	pgoff = page->index;
+
+	/* We have carefully arranged in arch_get_unmapped_area() that
+	 * *any* mappings of a file are always congruently mapped (whether
+	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
+	 * to flush one address here for them all to become coherent */
+
+	flush_dcache_mmap_lock(mapping);
+	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
+		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+		addr = mpnt->vm_start + offset;
+
+		/* The TLB is the engine of coherence on parisc: The
+		 * CPU is entitled to speculate any page with a TLB
+		 * mapping, so here we kill the mapping then flush the
+		 * page along a special flush only alias mapping.
+		 * This guarantees that the page is no-longer in the
+		 * cache for any process and nor may it be
+		 * speculatively read in (until the user or kernel
+		 * specifically accesses it, of course) */
+
+		flush_tlb_page(mpnt, addr);
+		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+				      != (addr & (SHM_COLOUR - 1))) {
+			__flush_cache_page(mpnt, addr, page_to_phys(page));
+			if (old_addr)
+				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
+			old_addr = addr;
+		}
+	}
+	flush_dcache_mmap_unlock(mapping);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/* Defined in arch/parisc/kernel/pacache.S */
+EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
+EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
+EXPORT_SYMBOL(flush_data_cache_local);
+EXPORT_SYMBOL(flush_kernel_icache_range_asm);
+
+#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
+
+void __init parisc_setup_cache_timing(void)
+{
+	unsigned long rangetime, alltime;
+	unsigned long size, start;
+	unsigned long threshold;
+
+	alltime = mfctl(16);
+	flush_data_cache();
+	alltime = mfctl(16) - alltime;
+
+	size = (unsigned long)(_end - _text);
+	rangetime = mfctl(16);
+	flush_kernel_dcache_range((unsigned long)_text, size);
+	rangetime = mfctl(16) - rangetime;
+
+	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
+		alltime, size, rangetime);
+
+	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
+	if (threshold > cache_info.dc_size)
+		threshold = cache_info.dc_size;
+	if (threshold)
+		parisc_cache_flush_threshold = threshold;
+	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
+		parisc_cache_flush_threshold/1024);
+
+	/* calculate TLB flush threshold */
+
+	/* On SMP machines, skip the TLB measure of kernel text which
+	 * has been mapped as huge pages. */
+	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
+		threshold = max(cache_info.it_size, cache_info.dt_size);
+		threshold *= PAGE_SIZE;
+		threshold /= num_online_cpus();
+		goto set_tlb_threshold;
+	}
+
+	alltime = mfctl(16);
+	flush_tlb_all();
+	alltime = mfctl(16) - alltime;
+
+	size = 0;
+	start = (unsigned long) _text;
+	rangetime = mfctl(16);
+	while (start < (unsigned long) _end) {
+		flush_tlb_kernel_range(start, start + PAGE_SIZE);
+		start += PAGE_SIZE;
+		size += PAGE_SIZE;
+	}
+	rangetime = mfctl(16) - rangetime;
+
+	printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+		alltime, size, rangetime);
+
+	threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
+
+set_tlb_threshold:
+	if (threshold)
+		parisc_tlb_flush_threshold = threshold;
+	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
+		parisc_tlb_flush_threshold/1024);
+}
+
+extern void purge_kernel_dcache_page_asm(unsigned long);
+extern void clear_user_page_asm(void *, unsigned long);
+extern void copy_user_page_asm(void *, void *, unsigned long);
+
+void flush_kernel_dcache_page_addr(void *addr)
+{
+	unsigned long flags;
+
+	flush_kernel_dcache_page_asm(addr);
+	purge_tlb_start(flags);
+	pdtlb_kernel(addr);
+	purge_tlb_end(flags);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+	struct page *pg)
+{
+       /* Copy using kernel mapping.  No coherency is needed (all in
+	  kunmap) for the `to' page.  However, the `from' page needs to
+	  be flushed through a mapping equivalent to the user mapping
+	  before it can be accessed through the kernel mapping. */
+	preempt_disable();
+	flush_dcache_page_asm(__pa(vfrom), vaddr);
+	copy_page_asm(vto, vfrom);
+	preempt_enable();
+}
+EXPORT_SYMBOL(copy_user_page);
+
+/* __flush_tlb_range()
+ *
+ * returns 1 if all TLBs were flushed.
+ */
+int __flush_tlb_range(unsigned long sid, unsigned long start,
+		      unsigned long end)
+{
+	unsigned long flags;
+
+	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+	    end - start >= parisc_tlb_flush_threshold) {
+		flush_tlb_all();
+		return 1;
+	}
+
+	/* Purge TLB entries for small ranges using the pdtlb and
+	   pitlb instructions.  These instructions execute locally
+	   but cause a purge request to be broadcast to other TLBs.  */
+	if (likely(!split_tlb)) {
+		while (start < end) {
+			purge_tlb_start(flags);
+			mtsp(sid, 1);
+			pdtlb(start);
+			purge_tlb_end(flags);
+			start += PAGE_SIZE;
+		}
+		return 0;
+	}
+
+	/* split TLB case */
+	while (start < end) {
+		purge_tlb_start(flags);
+		mtsp(sid, 1);
+		pdtlb(start);
+		pitlb(start);
+		purge_tlb_end(flags);
+		start += PAGE_SIZE;
+	}
+	return 0;
+}
+
+static void cacheflush_h_tmp_function(void *dummy)
+{
+	flush_cache_all_local();
+}
+
+void flush_cache_all(void)
+{
+	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
+}
+
+static inline unsigned long mm_total_size(struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+	unsigned long usize = 0;
+
+	for (vma = mm->mmap; vma; vma = vma->vm_next)
+		usize += vma->vm_end - vma->vm_start;
+	return usize;
+}
+
+static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
+{
+	pte_t *ptep = NULL;
+
+	if (!pgd_none(*pgd)) {
+		pud_t *pud = pud_offset(pgd, addr);
+		if (!pud_none(*pud)) {
+			pmd_t *pmd = pmd_offset(pud, addr);
+			if (!pmd_none(*pmd))
+				ptep = pte_offset_map(pmd, addr);
+		}
+	}
+	return ptep;
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+	pgd_t *pgd;
+
+	/* Flushing the whole cache on each cpu takes forever on
+	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
+	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
+		if (mm->context)
+			flush_tlb_all();
+		flush_cache_all();
+		return;
+	}
+
+	if (mm->context == mfsp(3)) {
+		for (vma = mm->mmap; vma; vma = vma->vm_next) {
+			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+			if (vma->vm_flags & VM_EXEC)
+				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
+		}
+		return;
+	}
+
+	pgd = mm->pgd;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		unsigned long addr;
+
+		for (addr = vma->vm_start; addr < vma->vm_end;
+		     addr += PAGE_SIZE) {
+			unsigned long pfn;
+			pte_t *ptep = get_ptep(pgd, addr);
+			if (!ptep)
+				continue;
+			pfn = pte_pfn(*ptep);
+			if (!pfn_valid(pfn))
+				continue;
+			if (unlikely(mm->context))
+				flush_tlb_page(vma, addr);
+			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+		}
+	}
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end)
+{
+	pgd_t *pgd;
+	unsigned long addr;
+
+	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+	    end - start >= parisc_cache_flush_threshold) {
+		if (vma->vm_mm->context)
+			flush_tlb_range(vma, start, end);
+		flush_cache_all();
+		return;
+	}
+
+	if (vma->vm_mm->context == mfsp(3)) {
+		flush_user_dcache_range_asm(start, end);
+		if (vma->vm_flags & VM_EXEC)
+			flush_user_icache_range_asm(start, end);
+		flush_tlb_range(vma, start, end);
+		return;
+	}
+
+	pgd = vma->vm_mm->pgd;
+	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+		unsigned long pfn;
+		pte_t *ptep = get_ptep(pgd, addr);
+		if (!ptep)
+			continue;
+		pfn = pte_pfn(*ptep);
+		if (pfn_valid(pfn)) {
+			if (unlikely(vma->vm_mm->context))
+				flush_tlb_page(vma, addr);
+			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+		}
+	}
+}
+
+void
+flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
+{
+	if (pfn_valid(pfn)) {
+		if (likely(vma->vm_mm->context))
+			flush_tlb_page(vma, vmaddr);
+		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+	}
+}
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+	unsigned long end = start + size;
+
+	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+	    (unsigned long)size >= parisc_cache_flush_threshold) {
+		flush_tlb_kernel_range(start, end);
+		flush_data_cache();
+		return;
+	}
+
+	flush_kernel_dcache_range_asm(start, end);
+	flush_tlb_kernel_range(start, end);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+	unsigned long end = start + size;
+
+	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+	    (unsigned long)size >= parisc_cache_flush_threshold) {
+		flush_tlb_kernel_range(start, end);
+		flush_data_cache();
+		return;
+	}
+
+	purge_kernel_dcache_range_asm(start, end);
+	flush_tlb_kernel_range(start, end);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/compat_audit.c b/src/kernel/linux/v4.14/arch/parisc/kernel/compat_audit.c
new file mode 100644
index 0000000..20c39c9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/compat_audit.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/unistd.h>
+
+unsigned int parisc32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned int parisc32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned int parisc32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned int parisc32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned int parisc32_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int parisc32_classify_syscall(unsigned syscall)
+{
+	switch (syscall) {
+	case __NR_open:
+		return 2;
+	case __NR_openat:
+		return 3;
+	case __NR_execve:
+		return 5;
+	default:
+		return 1;
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/drivers.c b/src/kernel/linux/v4.14/arch/parisc/kernel/drivers.c
new file mode 100644
index 0000000..6a71d31
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/drivers.c
@@ -0,0 +1,919 @@
+/*
+ * drivers.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
+ * Copyright (c) 2001 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001,2002 Ryan Bradetich 
+ * Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
+ * 
+ * The file handles registering devices and drivers, then matching them.
+ * It's the closest we get to a dating agency.
+ *
+ * If you're thinking about modifying this file, here are some gotchas to
+ * bear in mind:
+ *  - 715/Mirage device paths have a dummy device between Lasi and its children
+ *  - The EISA adapter may show up as a sibling or child of Wax
+ *  - Dino has an optionally functional serial port.  If firmware enables it,
+ *    it shows up as a child of Dino.  If firmware disables it, the buswalk
+ *    finds it and it shows up as a child of Cujo
+ *  - Dino has both parisc and pci devices as children
+ *  - parisc devices are discovered in a random order, including children
+ *    before parents in some cases.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+
+/* See comments in include/asm-parisc/pci.h */
+const struct dma_map_ops *hppa_dma_ops __read_mostly;
+EXPORT_SYMBOL(hppa_dma_ops);
+
+static struct device root = {
+	.init_name = "parisc",
+};
+
+static inline int check_dev(struct device *dev)
+{
+	if (dev->bus == &parisc_bus_type) {
+		struct parisc_device *pdev;
+		pdev = to_parisc_device(dev);
+		return pdev->id.hw_type != HPHW_FAULTY;
+	}
+	return 1;
+}
+
+static struct device *
+parse_tree_node(struct device *parent, int index, struct hardware_path *modpath);
+
+struct recurse_struct {
+	void * obj;
+	int (*fn)(struct device *, void *);
+};
+
+static int descend_children(struct device * dev, void * data)
+{
+	struct recurse_struct * recurse_data = (struct recurse_struct *)data;
+
+	if (recurse_data->fn(dev, recurse_data->obj))
+		return 1;
+	else
+		return device_for_each_child(dev, recurse_data, descend_children);
+}
+
+/**
+ *	for_each_padev - Iterate over all devices in the tree
+ *	@fn:	Function to call for each device.
+ *	@data:	Data to pass to the called function.
+ *
+ *	This performs a depth-first traversal of the tree, calling the
+ *	function passed for each node.  It calls the function for parents
+ *	before children.
+ */
+
+static int for_each_padev(int (*fn)(struct device *, void *), void * data)
+{
+	struct recurse_struct recurse_data = {
+		.obj	= data,
+		.fn	= fn,
+	};
+	return device_for_each_child(&root, &recurse_data, descend_children);
+}
+
+/**
+ * match_device - Report whether this driver can handle this device
+ * @driver: the PA-RISC driver to try
+ * @dev: the PA-RISC device to try
+ */
+static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+{
+	const struct parisc_device_id *ids;
+
+	for (ids = driver->id_table; ids->sversion; ids++) {
+		if ((ids->sversion != SVERSION_ANY_ID) &&
+		    (ids->sversion != dev->id.sversion))
+			continue;
+
+		if ((ids->hw_type != HWTYPE_ANY_ID) &&
+		    (ids->hw_type != dev->id.hw_type))
+			continue;
+
+		if ((ids->hversion != HVERSION_ANY_ID) &&
+		    (ids->hversion != dev->id.hversion))
+			continue;
+
+		return 1;
+	}
+	return 0;
+}
+
+static int parisc_driver_probe(struct device *dev)
+{
+	int rc;
+	struct parisc_device *pa_dev = to_parisc_device(dev);
+	struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+
+	rc = pa_drv->probe(pa_dev);
+
+	if (!rc)
+		pa_dev->driver = pa_drv;
+
+	return rc;
+}
+
+static int parisc_driver_remove(struct device *dev)
+{
+	struct parisc_device *pa_dev = to_parisc_device(dev);
+	struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+	if (pa_drv->remove)
+		pa_drv->remove(pa_dev);
+
+	return 0;
+}
+	
+
+/**
+ * register_parisc_driver - Register this driver if it can handle a device
+ * @driver: the PA-RISC driver to try
+ */
+int register_parisc_driver(struct parisc_driver *driver)
+{
+	/* FIXME: we need this because apparently the sti
+	 * driver can be registered twice */
+	if(driver->drv.name) {
+		printk(KERN_WARNING 
+		       "BUG: skipping previously registered driver %s\n",
+		       driver->name);
+		return 1;
+	}
+
+	if (!driver->probe) {
+		printk(KERN_WARNING 
+		       "BUG: driver %s has no probe routine\n",
+		       driver->name);
+		return 1;
+	}
+
+	driver->drv.bus = &parisc_bus_type;
+
+	/* We install our own probe and remove routines */
+	WARN_ON(driver->drv.probe != NULL);
+	WARN_ON(driver->drv.remove != NULL);
+
+	driver->drv.name = driver->name;
+
+	return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(register_parisc_driver);
+
+
+struct match_count {
+	struct parisc_driver * driver;
+	int count;
+};
+
+static int match_and_count(struct device * dev, void * data)
+{
+	struct match_count * m = data;
+	struct parisc_device * pdev = to_parisc_device(dev);
+
+	if (check_dev(dev)) {
+		if (match_device(m->driver, pdev))
+			m->count++;
+	}
+	return 0;
+}
+
+/**
+ * count_parisc_driver - count # of devices this driver would match
+ * @driver: the PA-RISC driver to try
+ *
+ * Use by IOMMU support to "guess" the right size IOPdir.
+ * Formula is something like memsize/(num_iommu * entry_size).
+ */
+int count_parisc_driver(struct parisc_driver *driver)
+{
+	struct match_count m = {
+		.driver	= driver,
+		.count	= 0,
+	};
+
+	for_each_padev(match_and_count, &m);
+
+	return m.count;
+}
+
+
+
+/**
+ * unregister_parisc_driver - Unregister this driver from the list of drivers
+ * @driver: the PA-RISC driver to unregister
+ */
+int unregister_parisc_driver(struct parisc_driver *driver)
+{
+	driver_unregister(&driver->drv);
+	return 0;
+}
+EXPORT_SYMBOL(unregister_parisc_driver);
+
+struct find_data {
+	unsigned long hpa;
+	struct parisc_device * dev;
+};
+
+static int find_device(struct device * dev, void * data)
+{
+	struct parisc_device * pdev = to_parisc_device(dev);
+	struct find_data * d = (struct find_data*)data;
+
+	if (check_dev(dev)) {
+		if (pdev->hpa.start == d->hpa) {
+			d->dev = pdev;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static struct parisc_device *find_device_by_addr(unsigned long hpa)
+{
+	struct find_data d = {
+		.hpa	= hpa,
+	};
+	int ret;
+
+	ret = for_each_padev(find_device, &d);
+	return ret ? d.dev : NULL;
+}
+
+/**
+ * find_pa_parent_type - Find a parent of a specific type
+ * @dev: The device to start searching from
+ * @type: The device type to search for.
+ *
+ * Walks up the device tree looking for a device of the specified type.
+ * If it finds it, it returns it.  If not, it returns NULL.
+ */
+const struct parisc_device *
+find_pa_parent_type(const struct parisc_device *padev, int type)
+{
+	const struct device *dev = &padev->dev;
+	while (dev != &root) {
+		struct parisc_device *candidate = to_parisc_device(dev);
+		if (candidate->id.hw_type == type)
+			return candidate;
+		dev = dev->parent;
+	}
+
+	return NULL;
+}
+
+/*
+ * get_node_path fills in @path with the firmware path to the device.
+ * Note that if @node is a parisc device, we don't fill in the 'mod' field.
+ * This is because both callers pass the parent and fill in the mod
+ * themselves.  If @node is a PCI device, we do fill it in, even though this
+ * is inconsistent.
+ */
+static void get_node_path(struct device *dev, struct hardware_path *path)
+{
+	int i = 5;
+	memset(&path->bc, -1, 6);
+
+	if (dev_is_pci(dev)) {
+		unsigned int devfn = to_pci_dev(dev)->devfn;
+		path->mod = PCI_FUNC(devfn);
+		path->bc[i--] = PCI_SLOT(devfn);
+		dev = dev->parent;
+	}
+
+	while (dev != &root) {
+		if (dev_is_pci(dev)) {
+			unsigned int devfn = to_pci_dev(dev)->devfn;
+			path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
+		} else if (dev->bus == &parisc_bus_type) {
+			path->bc[i--] = to_parisc_device(dev)->hw_path;
+		}
+		dev = dev->parent;
+	}
+}
+
+static char *print_hwpath(struct hardware_path *path, char *output)
+{
+	int i;
+	for (i = 0; i < 6; i++) {
+		if (path->bc[i] == -1)
+			continue;
+		output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
+	}
+	output += sprintf(output, "%u", (unsigned char) path->mod);
+	return output;
+}
+
+/**
+ * print_pa_hwpath - Returns hardware path for PA devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PA device.  This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pa_hwpath(struct parisc_device *dev, char *output)
+{
+	struct hardware_path path;
+
+	get_node_path(dev->dev.parent, &path);
+	path.mod = dev->hw_path;
+	return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pa_hwpath);
+
+#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
+/**
+ * get_pci_node_path - Determines the hardware path for a PCI device
+ * @pdev: The device to return the path for
+ * @path: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the hardware_path structure with the route to
+ * the specified PCI device.  This structure is suitable for passing to
+ * PDC calls.
+ */
+void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
+{
+	get_node_path(&pdev->dev, path);
+}
+EXPORT_SYMBOL(get_pci_node_path);
+
+/**
+ * print_pci_hwpath - Returns hardware path for PCI devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PCI device.  This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pci_hwpath(struct pci_dev *dev, char *output)
+{
+	struct hardware_path path;
+
+	get_pci_node_path(dev, &path);
+	return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pci_hwpath);
+
+#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
+
+static void setup_bus_id(struct parisc_device *padev)
+{
+	struct hardware_path path;
+	char name[28];
+	char *output = name;
+	int i;
+
+	get_node_path(padev->dev.parent, &path);
+
+	for (i = 0; i < 6; i++) {
+		if (path.bc[i] == -1)
+			continue;
+		output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
+	}
+	sprintf(output, "%u", (unsigned char) padev->hw_path);
+	dev_set_name(&padev->dev, name);
+}
+
+struct parisc_device * create_tree_node(char id, struct device *parent)
+{
+	struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	dev->hw_path = id;
+	dev->id.hw_type = HPHW_FAULTY;
+
+	dev->dev.parent = parent;
+	setup_bus_id(dev);
+
+	dev->dev.bus = &parisc_bus_type;
+	dev->dma_mask = 0xffffffffUL;	/* PARISC devices are 32-bit */
+
+	/* make the generic dma mask a pointer to the parisc one */
+	dev->dev.dma_mask = &dev->dma_mask;
+	dev->dev.coherent_dma_mask = dev->dma_mask;
+	if (device_register(&dev->dev)) {
+		kfree(dev);
+		return NULL;
+	}
+
+	return dev;
+}
+
+struct match_id_data {
+	char id;
+	struct parisc_device * dev;
+};
+
+static int match_by_id(struct device * dev, void * data)
+{
+	struct parisc_device * pdev = to_parisc_device(dev);
+	struct match_id_data * d = data;
+
+	if (pdev->hw_path == d->id) {
+		d->dev = pdev;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * alloc_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @id: the element of the module path for this entry
+ *
+ * Checks all the children of @parent for a matching @id.  If none
+ * found, it allocates a new device and returns it.
+ */
+static struct parisc_device * __init alloc_tree_node(
+			struct device *parent, char id)
+{
+	struct match_id_data d = {
+		.id = id,
+	};
+	if (device_for_each_child(parent, &d, match_by_id))
+		return d.dev;
+	else
+		return create_tree_node(id, parent);
+}
+
+static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
+{
+	int i;
+	struct device *parent = &root;
+	for (i = 0; i < 6; i++) {
+		if (modpath->bc[i] == -1)
+			continue;
+		parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
+	}
+	return alloc_tree_node(parent, modpath->mod);
+}
+
+struct parisc_device *
+alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
+{
+	int status;
+	unsigned long bytecnt;
+	u8 iodc_data[32];
+	struct parisc_device *dev;
+	const char *name;
+
+	/* Check to make sure this device has not already been added - Ryan */
+	if (find_device_by_addr(hpa) != NULL)
+		return NULL;
+
+	status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
+	if (status != PDC_OK)
+		return NULL;
+
+	dev = create_parisc_device(mod_path);
+	if (dev->id.hw_type != HPHW_FAULTY) {
+		printk(KERN_ERR "Two devices have hardware path [%s].  "
+				"IODC data for second device: "
+				"%02x%02x%02x%02x%02x%02x\n"
+				"Rearranging GSC cards sometimes helps\n",
+			parisc_pathname(dev), iodc_data[0], iodc_data[1],
+			iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+		return NULL;
+	}
+
+	dev->id.hw_type = iodc_data[3] & 0x1f;
+	dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
+	dev->id.hversion_rev = iodc_data[1] & 0x0f;
+	dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
+			(iodc_data[5] << 8) | iodc_data[6];
+	dev->hpa.name = parisc_pathname(dev);
+	dev->hpa.start = hpa;
+	/* This is awkward.  The STI spec says that gfx devices may occupy
+	 * 32MB or 64MB.  Unfortunately, we don't know how to tell whether
+	 * it's the former or the latter.  Assumptions either way can hurt us.
+	 */
+	if (hpa == 0xf4000000 || hpa == 0xf8000000) {
+		dev->hpa.end = hpa + 0x03ffffff;
+	} else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
+		dev->hpa.end = hpa + 0x01ffffff;
+	} else {
+		dev->hpa.end = hpa + 0xfff;
+	}
+	dev->hpa.flags = IORESOURCE_MEM;
+	name = parisc_hardware_description(&dev->id);
+	if (name) {
+		strlcpy(dev->name, name, sizeof(dev->name));
+	}
+
+	/* Silently fail things like mouse ports which are subsumed within
+	 * the keyboard controller
+	 */
+	if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
+		printk("Unable to claim HPA %lx for device %s\n",
+				hpa, name);
+
+	return dev;
+}
+
+static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+{
+	return match_device(to_parisc_driver(drv), to_parisc_device(dev));
+}
+
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+	const struct parisc_device *padev = to_parisc_device(dev);
+	const struct parisc_device_id *id = &padev->id;
+
+	return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+		(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+		(u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	const struct parisc_device *padev;
+	char modalias[40];
+
+	if (!dev)
+		return -ENODEV;
+
+	padev = to_parisc_device(dev);
+	if (!padev)
+		return -ENODEV;
+
+	if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+		return -ENOMEM;
+
+	make_modalias(dev, modalias);
+	if (add_uevent_var(env, "MODALIAS=%s", modalias))
+		return -ENOMEM;
+
+	return 0;
+}
+
+#define pa_dev_attr(name, field, format_string)				\
+static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf)		\
+{									\
+	struct parisc_device *padev = to_parisc_device(dev);		\
+	return sprintf(buf, format_string, padev->field);		\
+}									\
+static DEVICE_ATTR_RO(name);
+
+#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
+
+pa_dev_attr(irq, irq, "%u\n");
+pa_dev_attr_id(hw_type, "0x%02x\n");
+pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
+pa_dev_attr_id(hversion, "0x%03x\n");
+pa_dev_attr_id(sversion, "0x%05x\n");
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return make_modalias(dev, buf);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *parisc_device_attrs[] = {
+	&dev_attr_irq.attr,
+	&dev_attr_hw_type.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_hversion.attr,
+	&dev_attr_sversion.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(parisc_device);
+
+struct bus_type parisc_bus_type = {
+	.name = "parisc",
+	.match = parisc_generic_match,
+	.uevent = parisc_uevent,
+	.dev_groups = parisc_device_groups,
+	.probe = parisc_driver_probe,
+	.remove = parisc_driver_remove,
+};
+
+/**
+ * register_parisc_device - Locate a driver to manage this device.
+ * @dev: The parisc device.
+ *
+ * Search the driver list for a driver that is willing to manage
+ * this device.
+ */
+int register_parisc_device(struct parisc_device *dev)
+{
+	if (!dev)
+		return 0;
+
+	if (dev->driver)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * match_pci_device - Matches a pci device against a given hardware path
+ * entry.
+ * @dev: the generic device (known to be contained by a pci_dev).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_pci_device(struct device *dev, int index,
+		struct hardware_path *modpath)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int id;
+
+	if (index == 5) {
+		/* we are at the end of the path, and on the actual device */
+		unsigned int devfn = pdev->devfn;
+		return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
+					(modpath->mod == PCI_FUNC(devfn)));
+	}
+
+	/* index might be out of bounds for bc[] */
+	if (index >= 6)
+		return 0;
+
+	id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+	return (modpath->bc[index] == id);
+}
+
+/**
+ * match_parisc_device - Matches a parisc device against a given hardware
+ * path entry.
+ * @dev: the generic device (known to be contained by a parisc_device).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_parisc_device(struct device *dev, int index,
+		struct hardware_path *modpath)
+{
+	struct parisc_device *curr = to_parisc_device(dev);
+	char id = (index == 6) ? modpath->mod : modpath->bc[index];
+
+	return (curr->hw_path == id);
+}
+
+struct parse_tree_data {
+	int index;
+	struct hardware_path * modpath;
+	struct device * dev;
+};
+
+static int check_parent(struct device * dev, void * data)
+{
+	struct parse_tree_data * d = data;
+
+	if (check_dev(dev)) {
+		if (dev->bus == &parisc_bus_type) {
+			if (match_parisc_device(dev, d->index, d->modpath))
+				d->dev = dev;
+		} else if (dev_is_pci(dev)) {
+			if (match_pci_device(dev, d->index, d->modpath))
+				d->dev = dev;
+		} else if (dev->bus == NULL) {
+			/* we are on a bus bridge */
+			struct device *new = parse_tree_node(dev, d->index, d->modpath);
+			if (new)
+				d->dev = new;
+		}
+	}
+	return d->dev != NULL;
+}
+
+/**
+ * parse_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @index: the current BC index
+ * @modpath: the hardware_path struct to match a device against
+ * @return: The corresponding device if found, NULL otherwise.
+ *
+ * Checks all the children of @parent for a matching @id.  If none
+ * found, it returns NULL.
+ */
+static struct device *
+parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
+{
+	struct parse_tree_data d = {
+		.index          = index,
+		.modpath        = modpath,
+	};
+
+	struct recurse_struct recurse_data = {
+		.obj	= &d,
+		.fn	= check_parent,
+	};
+
+	if (device_for_each_child(parent, &recurse_data, descend_children))
+		/* nothing */;
+
+	return d.dev;
+}
+
+/**
+ * hwpath_to_device - Finds the generic device corresponding to a given hardware path.
+ * @modpath: the hardware path.
+ * @return: The target device, NULL if not found.
+ */
+struct device *hwpath_to_device(struct hardware_path *modpath)
+{
+	int i;
+	struct device *parent = &root;
+	for (i = 0; i < 6; i++) {
+		if (modpath->bc[i] == -1)
+			continue;
+		parent = parse_tree_node(parent, i, modpath);
+		if (!parent)
+			return NULL;
+	}
+	if (dev_is_pci(parent)) /* pci devices already parse MOD */
+		return parent;
+	else
+		return parse_tree_node(parent, 6, modpath);
+}
+EXPORT_SYMBOL(hwpath_to_device);
+
+/**
+ * device_to_hwpath - Populates the hwpath corresponding to the given device.
+ * @param dev the target device
+ * @param path pointer to a previously allocated hwpath struct to be filled in
+ */
+void device_to_hwpath(struct device *dev, struct hardware_path *path)
+{
+	struct parisc_device *padev;
+	if (dev->bus == &parisc_bus_type) {
+		padev = to_parisc_device(dev);
+		get_node_path(dev->parent, path);
+		path->mod = padev->hw_path;
+	} else if (dev_is_pci(dev)) {
+		get_node_path(dev, path);
+	}
+}
+EXPORT_SYMBOL(device_to_hwpath);
+
+#define BC_PORT_MASK 0x8
+#define BC_LOWER_PORT 0x8
+
+#define BUS_CONVERTER(dev) \
+        ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
+
+#define IS_LOWER_PORT(dev) \
+        ((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \
+                & BC_PORT_MASK) == BC_LOWER_PORT)
+
+#define MAX_NATIVE_DEVICES 64
+#define NATIVE_DEVICE_OFFSET 0x1000
+
+#define FLEX_MASK 	F_EXTEND(0xfffc0000)
+#define IO_IO_LOW	offsetof(struct bc_module, io_io_low)
+#define IO_IO_HIGH	offsetof(struct bc_module, io_io_high)
+#define READ_IO_IO_LOW(dev)  (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW)
+#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH)
+
+static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+                            struct device *parent);
+
+void walk_lower_bus(struct parisc_device *dev)
+{
+	unsigned long io_io_low, io_io_high;
+
+	if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
+		return;
+
+	if (dev->id.hw_type == HPHW_IOA) {
+		io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
+		io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
+	} else {
+		io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
+		io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
+	}
+
+	walk_native_bus(io_io_low, io_io_high, &dev->dev);
+}
+
+/**
+ * walk_native_bus -- Probe a bus for devices
+ * @io_io_low: Base address of this bus.
+ * @io_io_high: Last address of this bus.
+ * @parent: The parent bus device.
+ * 
+ * A native bus (eg Runway or GSC) may have up to 64 devices on it,
+ * spaced at intervals of 0x1000 bytes.  PDC may not inform us of these
+ * devices, so we have to probe for them.  Unfortunately, we may find
+ * devices which are not physically connected (such as extra serial &
+ * keyboard ports).  This problem is not yet solved.
+ */
+static void __init walk_native_bus(unsigned long io_io_low,
+	unsigned long io_io_high, struct device *parent)
+{
+	int i, devices_found = 0;
+	unsigned long hpa = io_io_low;
+	struct hardware_path path;
+
+	get_node_path(parent, &path);
+	do {
+		for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
+			struct parisc_device *dev;
+
+			/* Was the device already added by Firmware? */
+			dev = find_device_by_addr(hpa);
+			if (!dev) {
+				path.mod = i;
+				dev = alloc_pa_dev(hpa, &path);
+				if (!dev)
+					continue;
+
+				register_parisc_device(dev);
+				devices_found++;
+			}
+			walk_lower_bus(dev);
+		}
+	} while(!devices_found && hpa < io_io_high);
+}
+
+#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
+
+/**
+ * walk_central_bus - Find devices attached to the central bus
+ *
+ * PDC doesn't tell us about all devices in the system.  This routine
+ * finds devices connected to the central bus.
+ */
+void walk_central_bus(void)
+{
+	walk_native_bus(CENTRAL_BUS_ADDR,
+			CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
+			&root);
+}
+
+static void print_parisc_device(struct parisc_device *dev)
+{
+	char hw_path[64];
+	static int count;
+
+	print_pa_hwpath(dev, hw_path);
+	printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+		++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
+		dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
+
+	if (dev->num_addrs) {
+		int k;
+		pr_cont(", additional addresses: ");
+		for (k = 0; k < dev->num_addrs; k++)
+			pr_cont("0x%lx ", dev->addr[k]);
+	}
+	pr_cont("\n");
+}
+
+/**
+ * init_parisc_bus - Some preparation to be done before inventory
+ */
+void init_parisc_bus(void)
+{
+	if (bus_register(&parisc_bus_type))
+		panic("Could not register PA-RISC bus type\n");
+	if (device_register(&root))
+		panic("Could not register PA-RISC root device\n");
+	get_device(&root);
+}
+
+
+static int print_one_device(struct device * dev, void * data)
+{
+	struct parisc_device * pdev = to_parisc_device(dev);
+
+	if (check_dev(dev))
+		print_parisc_device(pdev);
+	return 0;
+}
+
+/**
+ * print_parisc_devices - Print out a list of devices found in this system
+ */
+void print_parisc_devices(void)
+{
+	for_each_padev(print_one_device, NULL);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/entry.S b/src/kernel/linux/v4.14/arch/parisc/kernel/entry.S
new file mode 100644
index 0000000..843825a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/entry.S
@@ -0,0 +1,2282 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * kernel entry points (interruptions, system call wrappers)
+ *  Copyright (C) 1999,2000 Philipp Rumpf 
+ *  Copyright (C) 1999 SuSE GmbH Nuernberg 
+ *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/asm-offsets.h>
+
+/* we have the following possibilities to act on an interruption:
+ *  - handle in assembly and use shadowed registers only
+ *  - save registers to kernel stack and handle in assembly or C */
+
+
+#include <asm/psw.h>
+#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
+#include <asm/assembly.h>	/* for LDREG/STREG defines */
+#include <asm/pgtable.h>
+#include <asm/signal.h>
+#include <asm/unistd.h>
+#include <asm/ldcw.h>
+#include <asm/thread_info.h>
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_64BIT
+	.level 2.0w
+#else
+	.level 2.0
+#endif
+
+	.import		pa_tlb_lock,data
+	.macro  load_pa_tlb_lock reg
+#if __PA_LDCW_ALIGNMENT > 4
+	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
+	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
+#else
+	load32	PA(pa_tlb_lock), \reg
+#endif
+	.endm
+
+	/* space_to_prot macro creates a prot id from a space id */
+
+#if (SPACEID_SHIFT) == 0
+	.macro  space_to_prot spc prot
+	depd,z  \spc,62,31,\prot
+	.endm
+#else
+	.macro  space_to_prot spc prot
+	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
+	.endm
+#endif
+
+	/* Switch to virtual mapping, trashing only %r1 */
+	.macro  virt_map
+	/* pcxt_ssm_bug */
+	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
+	mtsp	%r0, %sr4
+	mtsp	%r0, %sr5
+	mtsp	%r0, %sr6
+	tovirt_r1 %r29
+	load32	KERNEL_PSW, %r1
+
+	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
+	mtctl	%r0, %cr17	/* Clear IIASQ tail */
+	mtctl	%r0, %cr17	/* Clear IIASQ head */
+	mtctl	%r1, %ipsw
+	load32	4f, %r1
+	mtctl	%r1, %cr18	/* Set IIAOQ tail */
+	ldo	4(%r1), %r1
+	mtctl	%r1, %cr18	/* Set IIAOQ head */
+	rfir
+	nop
+4:
+	.endm
+
+	/*
+	 * The "get_stack" macros are responsible for determining the
+	 * kernel stack value.
+	 *
+	 *      If sr7 == 0
+	 *          Already using a kernel stack, so call the
+	 *          get_stack_use_r30 macro to push a pt_regs structure
+	 *          on the stack, and store registers there.
+	 *      else
+	 *          Need to set up a kernel stack, so call the
+	 *          get_stack_use_cr30 macro to set up a pointer
+	 *          to the pt_regs structure contained within the
+	 *          task pointer pointed to by cr30. Set the stack
+	 *          pointer to point to the end of the task structure.
+	 *
+	 * Note that we use shadowed registers for temps until
+	 * we can save %r26 and %r29. %r26 is used to preserve
+	 * %r8 (a shadowed register) which temporarily contained
+	 * either the fault type ("code") or the eirr. We need
+	 * to use a non-shadowed register to carry the value over
+	 * the rfir in virt_map. We use %r26 since this value winds
+	 * up being passed as the argument to either do_cpu_irq_mask
+	 * or handle_interruption. %r29 is used to hold a pointer
+	 * the register save area, and once again, it needs to
+	 * be a non-shadowed register so that it survives the rfir.
+	 *
+	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
+	 */
+
+	.macro  get_stack_use_cr30
+
+	/* we save the registers in the task struct */
+
+	copy	%r30, %r17
+	mfctl   %cr30, %r1
+	ldo	THREAD_SZ_ALGN(%r1), %r30
+	mtsp	%r0,%sr7
+	mtsp	%r16,%sr3
+	tophys  %r1,%r9
+	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
+	tophys  %r1,%r9
+	ldo     TASK_REGS(%r9),%r9
+	STREG   %r17,PT_GR30(%r9)
+	STREG   %r29,PT_GR29(%r9)
+	STREG   %r26,PT_GR26(%r9)
+	STREG	%r16,PT_SR7(%r9)
+	copy    %r9,%r29
+	.endm
+
+	.macro  get_stack_use_r30
+
+	/* we put a struct pt_regs on the stack and save the registers there */
+
+	tophys  %r30,%r9
+	copy	%r30,%r1
+	ldo	PT_SZ_ALGN(%r30),%r30
+	STREG   %r1,PT_GR30(%r9)
+	STREG   %r29,PT_GR29(%r9)
+	STREG   %r26,PT_GR26(%r9)
+	STREG	%r16,PT_SR7(%r9)
+	copy    %r9,%r29
+	.endm
+
+	.macro  rest_stack
+	LDREG   PT_GR1(%r29), %r1
+	LDREG   PT_GR30(%r29),%r30
+	LDREG   PT_GR29(%r29),%r29
+	.endm
+
+	/* default interruption handler
+	 * (calls traps.c:handle_interruption) */
+	.macro	def code
+	b	intr_save
+	ldi     \code, %r8
+	.align	32
+	.endm
+
+	/* Interrupt interruption handler
+	 * (calls irq.c:do_cpu_irq_mask) */
+	.macro	extint code
+	b	intr_extint
+	mfsp    %sr7,%r16
+	.align	32
+	.endm	
+
+	.import	os_hpmc, code
+
+	/* HPMC handler */
+	.macro	hpmc code
+	nop			/* must be a NOP, will be patched later */
+	load32	PA(os_hpmc), %r3
+	bv,n	0(%r3)
+	nop
+	.word	0		/* checksum (will be patched) */
+	.word	0		/* address of handler */
+	.word	0		/* length of handler */
+	.endm
+
+	/*
+	 * Performance Note: Instructions will be moved up into
+	 * this part of the code later on, once we are sure
+	 * that the tlb miss handlers are close to final form.
+	 */
+
+	/* Register definitions for tlb miss handler macros */
+
+	va  = r8	/* virtual address for which the trap occurred */
+	spc = r24	/* space for which the trap occurred */
+
+#ifndef CONFIG_64BIT
+
+	/*
+	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
+	 */
+
+	.macro	itlb_11 code
+
+	mfctl	%pcsq, spc
+	b	itlb_miss_11
+	mfctl	%pcoq, va
+
+	.align		32
+	.endm
+#endif
+	
+	/*
+	 * itlb miss interruption handler (parisc 2.0)
+	 */
+
+	.macro	itlb_20 code
+	mfctl	%pcsq, spc
+#ifdef CONFIG_64BIT
+	b       itlb_miss_20w
+#else
+	b	itlb_miss_20
+#endif
+	mfctl	%pcoq, va
+
+	.align		32
+	.endm
+	
+#ifndef CONFIG_64BIT
+	/*
+	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
+	 */
+
+	.macro	naitlb_11 code
+
+	mfctl	%isr,spc
+	b	naitlb_miss_11
+	mfctl 	%ior,va
+
+	.align		32
+	.endm
+#endif
+	
+	/*
+	 * naitlb miss interruption handler (parisc 2.0)
+	 */
+
+	.macro	naitlb_20 code
+
+	mfctl	%isr,spc
+#ifdef CONFIG_64BIT
+	b       naitlb_miss_20w
+#else
+	b	naitlb_miss_20
+#endif
+	mfctl 	%ior,va
+
+	.align		32
+	.endm
+	
+#ifndef CONFIG_64BIT
+	/*
+	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
+	 */
+
+	.macro	dtlb_11 code
+
+	mfctl	%isr, spc
+	b	dtlb_miss_11
+	mfctl	%ior, va
+
+	.align		32
+	.endm
+#endif
+
+	/*
+	 * dtlb miss interruption handler (parisc 2.0)
+	 */
+
+	.macro	dtlb_20 code
+
+	mfctl	%isr, spc
+#ifdef CONFIG_64BIT
+	b       dtlb_miss_20w
+#else
+	b	dtlb_miss_20
+#endif
+	mfctl	%ior, va
+
+	.align		32
+	.endm
+	
+#ifndef CONFIG_64BIT
+	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
+
+	.macro	nadtlb_11 code
+
+	mfctl	%isr,spc
+	b       nadtlb_miss_11
+	mfctl	%ior,va
+
+	.align		32
+	.endm
+#endif
+	
+	/* nadtlb miss interruption handler (parisc 2.0) */
+
+	.macro	nadtlb_20 code
+
+	mfctl	%isr,spc
+#ifdef CONFIG_64BIT
+	b       nadtlb_miss_20w
+#else
+	b       nadtlb_miss_20
+#endif
+	mfctl	%ior,va
+
+	.align		32
+	.endm
+	
+#ifndef CONFIG_64BIT
+	/*
+	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
+	 */
+
+	.macro	dbit_11 code
+
+	mfctl	%isr,spc
+	b	dbit_trap_11
+	mfctl	%ior,va
+
+	.align		32
+	.endm
+#endif
+
+	/*
+	 * dirty bit trap interruption handler (parisc 2.0)
+	 */
+
+	.macro	dbit_20 code
+
+	mfctl	%isr,spc
+#ifdef CONFIG_64BIT
+	b       dbit_trap_20w
+#else
+	b	dbit_trap_20
+#endif
+	mfctl	%ior,va
+
+	.align		32
+	.endm
+
+	/* In LP64, the space contains part of the upper 32 bits of the
+	 * fault.  We have to extract this and place it in the va,
+	 * zeroing the corresponding bits in the space register */
+	.macro		space_adjust	spc,va,tmp
+#ifdef CONFIG_64BIT
+	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
+	depd		%r0,63,SPACEID_SHIFT,\spc
+	depd		\tmp,31,SPACEID_SHIFT,\va
+#endif
+	.endm
+
+	.import		swapper_pg_dir,code
+
+	/* Get the pgd.  For faults on space zero (kernel space), this
+	 * is simply swapper_pg_dir.  For user space faults, the
+	 * pgd is stored in %cr25 */
+	.macro		get_pgd		spc,reg
+	ldil		L%PA(swapper_pg_dir),\reg
+	ldo		R%PA(swapper_pg_dir)(\reg),\reg
+	or,COND(=)	%r0,\spc,%r0
+	mfctl		%cr25,\reg
+	.endm
+
+	/* 
+		space_check(spc,tmp,fault)
+
+		spc - The space we saw the fault with.
+		tmp - The place to store the current space.
+		fault - Function to call on failure.
+
+		Only allow faults on different spaces from the
+		currently active one if we're the kernel 
+
+	*/
+	.macro		space_check	spc,tmp,fault
+	mfsp		%sr7,\tmp
+	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
+					 * as kernel, so defeat the space
+					 * check if it is */
+	copy		\spc,\tmp
+	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
+	cmpb,COND(<>),n	\tmp,\spc,\fault
+	.endm
+
+	/* Look up a PTE in a 2-Level scheme (faulting at each
+	 * level if the entry isn't present 
+	 *
+	 * NOTE: we use ldw even for LP64, since the short pointers
+	 * can address up to 1TB
+	 */
+	.macro		L2_ptep	pmd,pte,index,va,fault
+#if CONFIG_PGTABLE_LEVELS == 3
+	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+#else
+# if defined(CONFIG_64BIT)
+	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+  #else
+  # if PAGE_SIZE > 4096
+	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
+  # else
+	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+  # endif
+# endif
+#endif
+	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+	copy		%r0,\pte
+	ldw,s		\index(\pmd),\pmd
+	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
+	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+	copy		\pmd,%r9
+	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
+	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
+	LDREG		%r0(\pmd),\pte
+	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
+	.endm
+
+	/* Look up PTE in a 3-Level scheme.
+	 *
+	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
+	 * first pmd adjacent to the pgd.  This means that we can
+	 * subtract a constant offset to get to it.  The pmd and pgd
+	 * sizes are arranged so that a single pmd covers 4GB (giving
+	 * a full LP64 process access to 8TB) so our lookups are
+	 * effectively L2 for the first 4GB of the kernel (i.e. for
+	 * all ILP32 processes and all the kernel for machines with
+	 * under 4GB of memory) */
+	.macro		L3_ptep pgd,pte,index,va,fault
+#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+	copy		%r0,\pte
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+	ldw,s		\index(\pgd),\pgd
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+	shld		\pgd,PxD_VALUE_SHIFT,\index
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+	copy		\index,\pgd
+	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
+	L2_ptep		\pgd,\pte,\index,\va,\fault
+	.endm
+
+	/* Acquire pa_tlb_lock lock and recheck page is still present. */
+	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
+#ifdef CONFIG_SMP
+	cmpib,COND(=),n	0,\spc,2f
+	load_pa_tlb_lock \tmp
+1:	LDCW		0(\tmp),\tmp1
+	cmpib,COND(=)	0,\tmp1,1b
+	nop
+	LDREG		0(\ptp),\pte
+	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
+	b		\fault
+	stw		 \spc,0(\tmp)
+2:
+#endif
+	.endm
+
+	/* Release pa_tlb_lock lock without reloading lock address. */
+	.macro		tlb_unlock0	spc,tmp
+#ifdef CONFIG_SMP
+	or,COND(=)	%r0,\spc,%r0
+	sync
+	or,COND(=)	%r0,\spc,%r0
+	stw             \spc,0(\tmp)
+#endif
+	.endm
+
+	/* Release pa_tlb_lock lock. */
+	.macro		tlb_unlock1	spc,tmp
+#ifdef CONFIG_SMP
+	load_pa_tlb_lock \tmp
+	tlb_unlock0	\spc,\tmp
+#endif
+	.endm
+
+	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
+	 * don't needlessly dirty the cache line if it was already set */
+	.macro		update_accessed	ptp,pte,tmp,tmp1
+	ldi		_PAGE_ACCESSED,\tmp1
+	or		\tmp1,\pte,\tmp
+	and,COND(<>)	\tmp1,\pte,%r0
+	STREG		\tmp,0(\ptp)
+	.endm
+
+	/* Set the dirty bit (and accessed bit).  No need to be
+	 * clever, this is only used from the dirty fault */
+	.macro		update_dirty	ptp,pte,tmp
+	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+	or		\tmp,\pte,\pte
+	STREG		\pte,0(\ptp)
+	.endm
+
+	/* We have (depending on the page size):
+	 * - 38 to 52-bit Physical Page Number
+	 * - 12 to 26-bit page offset
+	 */
+	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
+	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
+
+	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+	.macro		convert_for_tlb_insert20 pte,tmp
+#ifdef CONFIG_HUGETLB_PAGE
+	copy		\pte,\tmp
+	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+
+	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+				(63-58)+PAGE_ADD_SHIFT,\pte
+	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
+	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+#else /* Huge pages disabled */
+	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
+				(63-58)+PAGE_ADD_SHIFT,\pte
+#endif
+	.endm
+
+	/* Convert the pte and prot to tlb insertion values.  How
+	 * this happens is quite subtle, read below */
+	.macro		make_insert_tlb	spc,pte,prot,tmp
+	space_to_prot   \spc \prot        /* create prot id from space */
+	/* The following is the real subtlety.  This is depositing
+	 * T <-> _PAGE_REFTRAP
+	 * D <-> _PAGE_DIRTY
+	 * B <-> _PAGE_DMB (memory break)
+	 *
+	 * Then incredible subtlety: The access rights are
+	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
+	 * See 3-14 of the parisc 2.0 manual
+	 *
+	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+	 * trigger an access rights trap in user space if the user
+	 * tries to read an unreadable page */
+	depd            \pte,8,7,\prot
+
+	/* PAGE_USER indicates the page can be read with user privileges,
+	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+	 * contains _PAGE_READ) */
+	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
+	depdi		7,11,3,\prot
+	/* If we're a gateway page, drop PL2 back to zero for promotion
+	 * to kernel privilege (so we can execute the page as kernel).
+	 * Any privilege promotion page always denys read and write */
+	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
+	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
+
+	/* Enforce uncacheable pages.
+	 * This should ONLY be use for MMIO on PA 2.0 machines.
+	 * Memory/DMA is cache coherent on all PA2.0 machines we support
+	 * (that means T-class is NOT supported) and the memory controllers
+	 * on most of those machines only handles cache transactions.
+	 */
+	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+	depdi		1,12,1,\prot
+
+	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+	convert_for_tlb_insert20 \pte \tmp
+	.endm
+
+	/* Identical macro to make_insert_tlb above, except it
+	 * makes the tlb entry for the differently formatted pa11
+	 * insertion instructions */
+	.macro		make_insert_tlb_11	spc,pte,prot
+	zdep		\spc,30,15,\prot
+	dep		\pte,8,7,\prot
+	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
+	depi		1,12,1,\prot
+	extru,=         \pte,_PAGE_USER_BIT,1,%r0
+	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
+	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
+	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
+
+	/* Get rid of prot bits and convert to page addr for iitlba */
+
+	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
+	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
+	.endm
+
+	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
+	 * to extend into I/O space if the address is 0xfXXXXXXX
+	 * so we extend the f's into the top word of the pte in
+	 * this case */
+	.macro		f_extend	pte,tmp
+	extrd,s		\pte,42,4,\tmp
+	addi,<>		1,\tmp,%r0
+	extrd,s		\pte,63,25,\pte
+	.endm
+
+	/* The alias region is an 8MB aligned 16MB to do clear and
+	 * copy user pages at addresses congruent with the user
+	 * virtual address.
+	 *
+	 * To use the alias page, you set %r26 up with the to TLB
+	 * entry (identifying the physical page) and %r23 up with
+	 * the from tlb entry (or nothing if only a to entry---for
+	 * clear_user_page_asm) */
+	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
+	cmpib,COND(<>),n 0,\spc,\fault
+	ldil		L%(TMPALIAS_MAP_START),\tmp
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+	/* on LP64, ldi will sign extend into the upper 32 bits,
+	 * which is behaviour we don't want */
+	depdi		0,31,32,\tmp
+#endif
+	copy		\va,\tmp1
+	depi		0,31,23,\tmp1
+	cmpb,COND(<>),n	\tmp,\tmp1,\fault
+	mfctl		%cr19,\tmp	/* iir */
+	/* get the opcode (first six bits) into \tmp */
+	extrw,u		\tmp,5,6,\tmp
+	/*
+	 * Only setting the T bit prevents data cache movein
+	 * Setting access rights to zero prevents instruction cache movein
+	 *
+	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
+	 * to type field and _PAGE_READ goes to top bit of PL1
+	 */
+	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
+	/*
+	 * so if the opcode is one (i.e. this is a memory management
+	 * instruction) nullify the next load so \prot is only T.
+	 * Otherwise this is a normal data operation
+	 */
+	cmpiclr,=	0x01,\tmp,%r0
+	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+.ifc \patype,20
+	depd,z		\prot,8,7,\prot
+.else
+.ifc \patype,11
+	depw,z		\prot,8,7,\prot
+.else
+	.error "undefined PA type to do_alias"
+.endif
+.endif
+	/*
+	 * OK, it is in the temp alias region, check whether "from" or "to".
+	 * Check "subtle" note in pacache.S re: r23/r26.
+	 */
+#ifdef CONFIG_64BIT
+	extrd,u,*=	\va,41,1,%r0
+#else
+	extrw,u,=	\va,9,1,%r0
+#endif
+	or,COND(tr)	%r23,%r0,\pte
+	or		%r26,%r0,\pte
+	.endm 
+
+
+	/*
+	 * Fault_vectors are architecturally required to be aligned on a 2K
+	 * boundary
+	 */
+
+	.section .text.hot
+	.align 2048
+
+ENTRY(fault_vector_20)
+	/* First vector is invalid (0) */
+	.ascii	"cows can fly"
+	.byte 0
+	.align 32
+
+	hpmc		 1
+	def		 2
+	def		 3
+	extint		 4
+	def		 5
+	itlb_20		 6
+	def		 7
+	def		 8
+	def              9
+	def		10
+	def		11
+	def		12
+	def		13
+	def		14
+	dtlb_20		15
+	naitlb_20	16
+	nadtlb_20	17
+	def		18
+	def		19
+	dbit_20		20
+	def		21
+	def		22
+	def		23
+	def		24
+	def		25
+	def		26
+	def		27
+	def		28
+	def		29
+	def		30
+	def		31
+END(fault_vector_20)
+
+#ifndef CONFIG_64BIT
+
+	.align 2048
+
+ENTRY(fault_vector_11)
+	/* First vector is invalid (0) */
+	.ascii	"cows can fly"
+	.byte 0
+	.align 32
+
+	hpmc		 1
+	def		 2
+	def		 3
+	extint		 4
+	def		 5
+	itlb_11		 6
+	def		 7
+	def		 8
+	def              9
+	def		10
+	def		11
+	def		12
+	def		13
+	def		14
+	dtlb_11		15
+	naitlb_11	16
+	nadtlb_11	17
+	def		18
+	def		19
+	dbit_11		20
+	def		21
+	def		22
+	def		23
+	def		24
+	def		25
+	def		26
+	def		27
+	def		28
+	def		29
+	def		30
+	def		31
+END(fault_vector_11)
+
+#endif
+	/* Fault vector is separately protected and *must* be on its own page */
+	.align		PAGE_SIZE
+ENTRY(end_fault_vector)
+
+	.import		handle_interruption,code
+	.import		do_cpu_irq_mask,code
+
+	/*
+	 * Child Returns here
+	 *
+	 * copy_thread moved args into task save area.
+	 */
+
+ENTRY_CFI(ret_from_kernel_thread)
+
+	/* Call schedule_tail first though */
+	BL	schedule_tail, %r2
+	nop
+
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+	LDREG	TASK_PT_GR25(%r1), %r26
+#ifdef CONFIG_64BIT
+	LDREG	TASK_PT_GR27(%r1), %r27
+#endif
+	LDREG	TASK_PT_GR26(%r1), %r1
+	ble	0(%sr7, %r1)
+	copy	%r31, %r2
+	b	finish_child_return
+	nop
+ENDPROC_CFI(ret_from_kernel_thread)
+
+
+	/*
+	 * struct task_struct *_switch_to(struct task_struct *prev,
+	 *	struct task_struct *next)
+	 *
+	 * switch kernel stacks and return prev */
+ENTRY_CFI(_switch_to)
+	STREG	 %r2, -RP_OFFSET(%r30)
+
+	callee_save_float
+	callee_save
+
+	load32	_switch_to_ret, %r2
+
+	STREG	%r2, TASK_PT_KPC(%r26)
+	LDREG	TASK_PT_KPC(%r25), %r2
+
+	STREG	%r30, TASK_PT_KSP(%r26)
+	LDREG	TASK_PT_KSP(%r25), %r30
+	LDREG	TASK_THREAD_INFO(%r25), %r25
+	bv	%r0(%r2)
+	mtctl   %r25,%cr30
+
+_switch_to_ret:
+	mtctl	%r0, %cr0		/* Needed for single stepping */
+	callee_rest
+	callee_rest_float
+
+	LDREG	-RP_OFFSET(%r30), %r2
+	bv	%r0(%r2)
+	copy	%r26, %r28
+ENDPROC_CFI(_switch_to)
+
+	/*
+	 * Common rfi return path for interruptions, kernel execve, and
+	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
+	 * return via this path if the signal was received when the process
+	 * was running; if the process was blocked on a syscall then the
+	 * normal syscall_exit path is used.  All syscalls for traced
+	 * proceses exit via intr_restore.
+	 *
+	 * XXX If any syscalls that change a processes space id ever exit
+	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
+	 * adjust IASQ[0..1].
+	 *
+	 */
+
+	.align	PAGE_SIZE
+
+ENTRY_CFI(syscall_exit_rfi)
+	mfctl   %cr30,%r16
+	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
+	ldo	TASK_REGS(%r16),%r16
+	/* Force iaoq to userspace, as the user has had access to our current
+	 * context via sigcontext. Also Filter the PSW for the same reason.
+	 */
+	LDREG	PT_IAOQ0(%r16),%r19
+	depi	3,31,2,%r19
+	STREG	%r19,PT_IAOQ0(%r16)
+	LDREG	PT_IAOQ1(%r16),%r19
+	depi	3,31,2,%r19
+	STREG	%r19,PT_IAOQ1(%r16)
+	LDREG   PT_PSW(%r16),%r19
+	load32	USER_PSW_MASK,%r1
+#ifdef CONFIG_64BIT
+	load32	USER_PSW_HI_MASK,%r20
+	depd    %r20,31,32,%r1
+#endif
+	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
+	load32	USER_PSW,%r1
+	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
+	STREG   %r19,PT_PSW(%r16)
+
+	/*
+	 * If we aren't being traced, we never saved space registers
+	 * (we don't store them in the sigcontext), so set them
+	 * to "proper" values now (otherwise we'll wind up restoring
+	 * whatever was last stored in the task structure, which might
+	 * be inconsistent if an interrupt occurred while on the gateway
+	 * page). Note that we may be "trashing" values the user put in
+	 * them, but we don't support the user changing them.
+	 */
+
+	STREG   %r0,PT_SR2(%r16)
+	mfsp    %sr3,%r19
+	STREG   %r19,PT_SR0(%r16)
+	STREG   %r19,PT_SR1(%r16)
+	STREG   %r19,PT_SR3(%r16)
+	STREG   %r19,PT_SR4(%r16)
+	STREG   %r19,PT_SR5(%r16)
+	STREG   %r19,PT_SR6(%r16)
+	STREG   %r19,PT_SR7(%r16)
+
+intr_return:
+	/* check for reschedule */
+	mfctl   %cr30,%r1
+	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
+	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+
+	.import do_notify_resume,code
+intr_check_sig:
+	/* As above */
+	mfctl   %cr30,%r1
+	LDREG	TI_FLAGS(%r1),%r19
+	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
+	and,COND(<>)	%r19, %r20, %r0
+	b,n	intr_restore	/* skip past if we've nothing to do */
+
+	/* This check is critical to having LWS
+	 * working. The IASQ is zero on the gateway
+	 * page and we cannot deliver any signals until
+	 * we get off the gateway page.
+	 *
+	 * Only do signals if we are returning to user space
+	 */
+	LDREG	PT_IASQ0(%r16), %r20
+	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+	LDREG	PT_IASQ1(%r16), %r20
+	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+
+	/* NOTE: We need to enable interrupts if we have to deliver
+	 * signals. We used to do this earlier but it caused kernel
+	 * stack overflows. */
+	ssm     PSW_SM_I, %r0
+
+	copy	%r0, %r25			/* long in_syscall = 0 */
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+#endif
+
+	BL	do_notify_resume,%r2
+	copy	%r16, %r26			/* struct pt_regs *regs */
+
+	b,n	intr_check_sig
+
+intr_restore:
+	copy            %r16,%r29
+	ldo             PT_FR31(%r29),%r1
+	rest_fp         %r1
+	rest_general    %r29
+
+	/* inverse of virt_map */
+	pcxt_ssm_bug
+	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
+	tophys_r1       %r29
+
+	/* Restore space id's and special cr's from PT_REGS
+	 * structure pointed to by r29
+	 */
+	rest_specials	%r29
+
+	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
+	 * It also restores r1 and r30.
+	 */
+	rest_stack
+
+	rfi
+	nop
+
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt	intr_restore
+#endif /* !CONFIG_PREEMPT */
+
+	.import schedule,code
+intr_do_resched:
+	/* Only call schedule on return to userspace. If we're returning
+	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+	 * we jump back to intr_restore.
+	 */
+	LDREG	PT_IASQ0(%r16), %r20
+	cmpib,COND(=)	0, %r20, intr_do_preempt
+	nop
+	LDREG	PT_IASQ1(%r16), %r20
+	cmpib,COND(=)	0, %r20, intr_do_preempt
+	nop
+
+	/* NOTE: We need to enable interrupts if we schedule.  We used
+	 * to do this earlier but it caused kernel stack overflows. */
+	ssm     PSW_SM_I, %r0
+
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29		/* Reference param save area */
+#endif
+
+	ldil	L%intr_check_sig, %r2
+#ifndef CONFIG_64BIT
+	b	schedule
+#else
+	load32	schedule, %r20
+	bv	%r0(%r20)
+#endif
+	ldo	R%intr_check_sig(%r2), %r2
+
+	/* preempt the current task on returning to kernel
+	 * mode from an interrupt, iff need_resched is set,
+	 * and preempt_count is 0. otherwise, we continue on
+	 * our merry way back to the current running task.
+	 */
+#ifdef CONFIG_PREEMPT
+	.import preempt_schedule_irq,code
+intr_do_preempt:
+	rsm	PSW_SM_I, %r0		/* disable interrupts */
+
+	/* current_thread_info()->preempt_count */
+	mfctl	%cr30, %r1
+	LDREG	TI_PRE_COUNT(%r1), %r19
+	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
+	nop				/* prev insn branched backwards */
+
+	/* check if we interrupted a critical path */
+	LDREG	PT_PSW(%r16), %r20
+	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
+	nop
+
+	BL	preempt_schedule_irq, %r2
+	nop
+
+	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
+
+	/*
+	 * External interrupts.
+	 */
+
+intr_extint:
+	cmpib,COND(=),n 0,%r16,1f
+
+	get_stack_use_cr30
+	b,n 2f
+
+1:
+	get_stack_use_r30
+2:
+	save_specials	%r29
+	virt_map
+	save_general	%r29
+
+	ldo	PT_FR0(%r29), %r24
+	save_fp	%r24
+	
+	loadgp
+
+	copy	%r29, %r26	/* arg0 is pt_regs */
+	copy	%r29, %r16	/* save pt_regs */
+
+	ldil	L%intr_return, %r2
+
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29	/* Reference param save area */
+#endif
+
+	b	do_cpu_irq_mask
+	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
+ENDPROC_CFI(syscall_exit_rfi)
+
+
+	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
+
+ENTRY_CFI(intr_save)		/* for os_hpmc */
+	mfsp    %sr7,%r16
+	cmpib,COND(=),n 0,%r16,1f
+	get_stack_use_cr30
+	b	2f
+	copy    %r8,%r26
+
+1:
+	get_stack_use_r30
+	copy    %r8,%r26
+
+2:
+	save_specials	%r29
+
+	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
+
+	/*
+	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
+	 *           traps.c.
+	 *        2) Once we start executing code above 4 Gb, we need
+	 *           to adjust iasq/iaoq here in the same way we
+	 *           adjust isr/ior below.
+	 */
+
+	cmpib,COND(=),n        6,%r26,skip_save_ior
+
+
+	mfctl           %cr20, %r16 /* isr */
+	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
+	mfctl           %cr21, %r17 /* ior */
+
+
+#ifdef CONFIG_64BIT
+	/*
+	 * If the interrupted code was running with W bit off (32 bit),
+	 * clear the b bits (bits 0 & 1) in the ior.
+	 * save_specials left ipsw value in r8 for us to test.
+	 */
+	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
+	depdi           0,1,2,%r17
+
+	/*
+	 * FIXME: This code has hardwired assumptions about the split
+	 *        between space bits and offset bits. This will change
+	 *        when we allow alternate page sizes.
+	 */
+
+	/* adjust isr/ior. */
+	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
+	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
+	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
+#endif
+	STREG           %r16, PT_ISR(%r29)
+	STREG           %r17, PT_IOR(%r29)
+
+
+skip_save_ior:
+	virt_map
+	save_general	%r29
+
+	ldo		PT_FR0(%r29), %r25
+	save_fp		%r25
+	
+	loadgp
+
+	copy		%r29, %r25	/* arg1 is pt_regs */
+#ifdef CONFIG_64BIT
+	ldo		-16(%r30),%r29	/* Reference param save area */
+#endif
+
+	ldil		L%intr_check_sig, %r2
+	copy		%r25, %r16	/* save pt_regs */
+
+	b		handle_interruption
+	ldo		R%intr_check_sig(%r2), %r2
+ENDPROC_CFI(intr_save)
+
+
+	/*
+	 * Note for all tlb miss handlers:
+	 *
+	 * cr24 contains a pointer to the kernel address space
+	 * page directory.
+	 *
+	 * cr25 contains a pointer to the current user address
+	 * space page directory.
+	 *
+	 * sr3 will contain the space id of the user address space
+	 * of the current running thread while that thread is
+	 * running in the kernel.
+	 */
+
+	/*
+	 * register number allocations.  Note that these are all
+	 * in the shadowed registers
+	 */
+
+	t0 = r1		/* temporary register 0 */
+	va = r8		/* virtual address for which the trap occurred */
+	t1 = r9		/* temporary register 1 */
+	pte  = r16	/* pte/phys page # */
+	prot = r17	/* prot bits */
+	spc  = r24	/* space for which the trap occurred */
+	ptp = r25	/* page directory/page table pointer */
+
+#ifdef CONFIG_64BIT
+
+dtlb_miss_20w:
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,dtlb_fault
+
+	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
+
+	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+	
+	idtlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+dtlb_check_alias_20w:
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
+
+	idtlbt          pte,prot
+
+	rfir
+	nop
+
+nadtlb_miss_20w:
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,nadtlb_fault
+
+	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
+
+	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	idtlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+nadtlb_check_alias_20w:
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+	idtlbt          pte,prot
+
+	rfir
+	nop
+
+#else
+
+dtlb_miss_11:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,dtlb_fault
+
+	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
+
+	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb_11	spc,pte,prot
+
+	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+	mtsp		spc,%sr1
+
+	idtlba		pte,(%sr1,va)
+	idtlbp		prot,(%sr1,va)
+
+	mtsp		t1, %sr1	/* Restore sr1 */
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+dtlb_check_alias_11:
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
+
+	idtlba          pte,(va)
+	idtlbp          prot,(va)
+
+	rfir
+	nop
+
+nadtlb_miss_11:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,nadtlb_fault
+
+	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
+
+	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb_11	spc,pte,prot
+
+	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+	mtsp		spc,%sr1
+
+	idtlba		pte,(%sr1,va)
+	idtlbp		prot,(%sr1,va)
+
+	mtsp		t1, %sr1	/* Restore sr1 */
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+nadtlb_check_alias_11:
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
+
+	idtlba          pte,(va)
+	idtlbp          prot,(va)
+
+	rfir
+	nop
+
+dtlb_miss_20:
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,dtlb_fault
+
+	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
+
+	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	f_extend	pte,t1
+
+	idtlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+dtlb_check_alias_20:
+	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
+	
+	idtlbt          pte,prot
+
+	rfir
+	nop
+
+nadtlb_miss_20:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,nadtlb_fault
+
+	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
+
+	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	f_extend	pte,t1
+	
+	idtlbt		pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+nadtlb_check_alias_20:
+	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+	idtlbt          pte,prot
+
+	rfir
+	nop
+
+#endif
+
+nadtlb_emulate:
+
+	/*
+	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
+	 * probei instructions. We don't want to fault for these
+	 * instructions (not only does it not make sense, it can cause
+	 * deadlocks, since some flushes are done with the mmap
+	 * semaphore held). If the translation doesn't exist, we can't
+	 * insert a translation, so have to emulate the side effects
+	 * of the instruction. Since we don't insert a translation
+	 * we can get a lot of faults during a flush loop, so it makes
+	 * sense to try to do it here with minimum overhead. We only
+	 * emulate fdc,fic,pdc,probew,prober instructions whose base 
+	 * and index registers are not shadowed. We defer everything 
+	 * else to the "slow" path.
+	 */
+
+	mfctl           %cr19,%r9 /* Get iir */
+
+	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
+	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
+
+	/* Checks for fdc,fdce,pdc,"fic,4f" only */
+	ldi             0x280,%r16
+	and             %r9,%r16,%r17
+	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
+	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
+	BL		get_register,%r25
+	extrw,u         %r9,15,5,%r8           /* Get index register # */
+	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
+	copy            %r1,%r24
+	BL		get_register,%r25
+	extrw,u         %r9,10,5,%r8           /* Get base register # */
+	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
+	BL		set_register,%r25
+	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
+
+nadtlb_nullify:
+	mfctl           %ipsw,%r8
+	ldil            L%PSW_N,%r9
+	or              %r8,%r9,%r8            /* Set PSW_N */
+	mtctl           %r8,%ipsw
+
+	rfir
+	nop
+
+	/* 
+		When there is no translation for the probe address then we
+		must nullify the insn and return zero in the target register.
+		This will indicate to the calling code that it does not have 
+		write/read privileges to this address.
+
+		This should technically work for prober and probew in PA 1.1,
+		and also probe,r and probe,w in PA 2.0
+
+		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
+		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
+
+	*/
+nadtlb_probe_check:
+	ldi             0x80,%r16
+	and             %r9,%r16,%r17
+	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
+	BL              get_register,%r25      /* Find the target register */
+	extrw,u         %r9,31,5,%r8           /* Get target register */
+	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
+	BL		set_register,%r25
+	copy            %r0,%r1                /* Write zero to target register */
+	b nadtlb_nullify                       /* Nullify return insn */
+	nop
+
+
+#ifdef CONFIG_64BIT
+itlb_miss_20w:
+
+	/*
+	 * I miss is a little different, since we allow users to fault
+	 * on the gateway page which is in the kernel address space.
+	 */
+
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,itlb_fault
+
+	L3_ptep		ptp,pte,t0,va,itlb_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+	
+	iitlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_miss_20w:
+
+	/*
+	 * I miss is a little different, since we allow users to fault
+	 * on the gateway page which is in the kernel address space.
+	 */
+
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,naitlb_fault
+
+	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
+
+	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	iitlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_check_alias_20w:
+	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+	iitlbt		pte,prot
+
+	rfir
+	nop
+
+#else
+
+itlb_miss_11:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,itlb_fault
+
+	L2_ptep		ptp,pte,t0,va,itlb_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb_11	spc,pte,prot
+
+	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+	mtsp		spc,%sr1
+
+	iitlba		pte,(%sr1,va)
+	iitlbp		prot,(%sr1,va)
+
+	mtsp		t1, %sr1	/* Restore sr1 */
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_miss_11:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,naitlb_fault
+
+	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
+
+	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb_11	spc,pte,prot
+
+	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+	mtsp		spc,%sr1
+
+	iitlba		pte,(%sr1,va)
+	iitlbp		prot,(%sr1,va)
+
+	mtsp		t1, %sr1	/* Restore sr1 */
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_check_alias_11:
+	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
+
+	iitlba          pte,(%sr0, va)
+	iitlbp          prot,(%sr0, va)
+
+	rfir
+	nop
+
+
+itlb_miss_20:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,itlb_fault
+
+	L2_ptep		ptp,pte,t0,va,itlb_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	f_extend	pte,t1
+
+	iitlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_miss_20:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,naitlb_fault
+
+	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
+
+	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
+	update_accessed	ptp,pte,t0,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	f_extend	pte,t1
+
+	iitlbt          pte,prot
+
+	tlb_unlock1	spc,t0
+	rfir
+	nop
+
+naitlb_check_alias_20:
+	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+	iitlbt          pte,prot
+
+	rfir
+	nop
+
+#endif
+
+#ifdef CONFIG_64BIT
+
+dbit_trap_20w:
+	space_adjust	spc,va,t0
+	get_pgd		spc,ptp
+	space_check	spc,t0,dbit_fault
+
+	L3_ptep		ptp,pte,t0,va,dbit_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
+	update_dirty	ptp,pte,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+		
+	idtlbt          pte,prot
+
+	tlb_unlock0	spc,t0
+	rfir
+	nop
+#else
+
+dbit_trap_11:
+
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,dbit_fault
+
+	L2_ptep		ptp,pte,t0,va,dbit_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
+	update_dirty	ptp,pte,t1
+
+	make_insert_tlb_11	spc,pte,prot
+
+	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+	mtsp		spc,%sr1
+
+	idtlba		pte,(%sr1,va)
+	idtlbp		prot,(%sr1,va)
+
+	mtsp            t1, %sr1     /* Restore sr1 */
+
+	tlb_unlock0	spc,t0
+	rfir
+	nop
+
+dbit_trap_20:
+	get_pgd		spc,ptp
+
+	space_check	spc,t0,dbit_fault
+
+	L2_ptep		ptp,pte,t0,va,dbit_fault
+
+	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
+	update_dirty	ptp,pte,t1
+
+	make_insert_tlb	spc,pte,prot,t1
+
+	f_extend	pte,t1
+	
+	idtlbt		pte,prot
+
+	tlb_unlock0	spc,t0
+	rfir
+	nop
+#endif
+
+	.import handle_interruption,code
+
+kernel_bad_space:
+	b               intr_save
+	ldi             31,%r8  /* Use an unused code */
+
+dbit_fault:
+	b               intr_save
+	ldi             20,%r8
+
+itlb_fault:
+	b               intr_save
+	ldi             6,%r8
+
+nadtlb_fault:
+	b               intr_save
+	ldi             17,%r8
+
+naitlb_fault:
+	b               intr_save
+	ldi             16,%r8
+
+dtlb_fault:
+	b               intr_save
+	ldi             15,%r8
+
+	/* Register saving semantics for system calls:
+
+	   %r1		   clobbered by system call macro in userspace
+	   %r2		   saved in PT_REGS by gateway page
+	   %r3  - %r18	   preserved by C code (saved by signal code)
+	   %r19 - %r20	   saved in PT_REGS by gateway page
+	   %r21 - %r22	   non-standard syscall args
+			   stored in kernel stack by gateway page
+	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
+	   %r27 - %r30	   saved in PT_REGS by gateway page
+	   %r31		   syscall return pointer
+	 */
+
+	/* Floating point registers (FIXME: what do we do with these?)
+
+	   %fr0  - %fr3	   status/exception, not preserved
+	   %fr4  - %fr7	   arguments
+	   %fr8	 - %fr11   not preserved by C code
+	   %fr12 - %fr21   preserved by C code
+	   %fr22 - %fr31   not preserved by C code
+	 */
+
+	.macro	reg_save regs
+	STREG	%r3, PT_GR3(\regs)
+	STREG	%r4, PT_GR4(\regs)
+	STREG	%r5, PT_GR5(\regs)
+	STREG	%r6, PT_GR6(\regs)
+	STREG	%r7, PT_GR7(\regs)
+	STREG	%r8, PT_GR8(\regs)
+	STREG	%r9, PT_GR9(\regs)
+	STREG   %r10,PT_GR10(\regs)
+	STREG   %r11,PT_GR11(\regs)
+	STREG   %r12,PT_GR12(\regs)
+	STREG   %r13,PT_GR13(\regs)
+	STREG   %r14,PT_GR14(\regs)
+	STREG   %r15,PT_GR15(\regs)
+	STREG   %r16,PT_GR16(\regs)
+	STREG   %r17,PT_GR17(\regs)
+	STREG   %r18,PT_GR18(\regs)
+	.endm
+
+	.macro	reg_restore regs
+	LDREG	PT_GR3(\regs), %r3
+	LDREG	PT_GR4(\regs), %r4
+	LDREG	PT_GR5(\regs), %r5
+	LDREG	PT_GR6(\regs), %r6
+	LDREG	PT_GR7(\regs), %r7
+	LDREG	PT_GR8(\regs), %r8
+	LDREG	PT_GR9(\regs), %r9
+	LDREG   PT_GR10(\regs),%r10
+	LDREG   PT_GR11(\regs),%r11
+	LDREG   PT_GR12(\regs),%r12
+	LDREG   PT_GR13(\regs),%r13
+	LDREG   PT_GR14(\regs),%r14
+	LDREG   PT_GR15(\regs),%r15
+	LDREG   PT_GR16(\regs),%r16
+	LDREG   PT_GR17(\regs),%r17
+	LDREG   PT_GR18(\regs),%r18
+	.endm
+
+	.macro	fork_like name
+ENTRY_CFI(sys_\name\()_wrapper)
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+	ldo	TASK_REGS(%r1),%r1
+	reg_save %r1
+	mfctl	%cr27, %r28
+	ldil	L%sys_\name, %r31
+	be	R%sys_\name(%sr4,%r31)
+	STREG	%r28, PT_CR27(%r1)
+ENDPROC_CFI(sys_\name\()_wrapper)
+	.endm
+
+fork_like clone
+fork_like fork
+fork_like vfork
+
+	/* Set the return value for the child */
+ENTRY_CFI(child_return)
+	BL	schedule_tail, %r2
+	nop
+finish_child_return:
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
+
+	LDREG	PT_CR27(%r1), %r3
+	mtctl	%r3, %cr27
+	reg_restore %r1
+	b	syscall_exit
+	copy	%r0,%r28
+ENDPROC_CFI(child_return)
+
+ENTRY_CFI(sys_rt_sigreturn_wrapper)
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
+	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
+	/* Don't save regs, we are going to restore them from sigcontext. */
+	STREG	%r2, -RP_OFFSET(%r30)
+#ifdef CONFIG_64BIT
+	ldo	FRAME_SIZE(%r30), %r30
+	BL	sys_rt_sigreturn,%r2
+	ldo	-16(%r30),%r29		/* Reference param save area */
+#else
+	BL	sys_rt_sigreturn,%r2
+	ldo	FRAME_SIZE(%r30), %r30
+#endif
+
+	ldo	-FRAME_SIZE(%r30), %r30
+	LDREG	-RP_OFFSET(%r30), %r2
+
+	/* FIXME: I think we need to restore a few more things here. */
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
+	reg_restore %r1
+
+	/* If the signal was received while the process was blocked on a
+	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
+	 * take us to syscall_exit_rfi and on to intr_return.
+	 */
+	bv	%r0(%r2)
+	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
+ENDPROC_CFI(sys_rt_sigreturn_wrapper)
+
+ENTRY_CFI(syscall_exit)
+	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
+	 * via syscall_exit_rfi if the signal was received while the process
+	 * was running.
+	 */
+
+	/* save return value now */
+
+	mfctl     %cr30, %r1
+	LDREG     TI_TASK(%r1),%r1
+	STREG     %r28,TASK_PT_GR28(%r1)
+
+	/* Seems to me that dp could be wrong here, if the syscall involved
+	 * calling a module, and nothing got round to restoring dp on return.
+	 */
+	loadgp
+
+syscall_check_resched:
+
+	/* check for reschedule */
+
+	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
+	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
+
+	.import do_signal,code
+syscall_check_sig:
+	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
+	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
+	and,COND(<>)	%r19, %r26, %r0
+	b,n	syscall_restore	/* skip past if we've nothing to do */
+
+syscall_do_signal:
+	/* Save callee-save registers (for sigcontext).
+	 * FIXME: After this point the process structure should be
+	 * consistent with all the relevant state of the process
+	 * before the syscall.  We need to verify this.
+	 */
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
+	reg_save %r26
+
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+#endif
+
+	BL	do_notify_resume,%r2
+	ldi	1, %r25				/* long in_syscall = 1 */
+
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
+	reg_restore %r20
+
+	b,n     syscall_check_sig
+
+syscall_restore:
+	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+
+	/* Are we being ptraced? */
+	ldw	TASK_FLAGS(%r1),%r19
+	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
+	and,COND(=)	%r19,%r2,%r0
+	b,n	syscall_restore_rfi
+
+	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
+	rest_fp	%r19
+
+	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
+	mtsar	%r19
+
+	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
+	LDREG	TASK_PT_GR19(%r1),%r19
+	LDREG   TASK_PT_GR20(%r1),%r20
+	LDREG	TASK_PT_GR21(%r1),%r21
+	LDREG	TASK_PT_GR22(%r1),%r22
+	LDREG	TASK_PT_GR23(%r1),%r23
+	LDREG	TASK_PT_GR24(%r1),%r24
+	LDREG	TASK_PT_GR25(%r1),%r25
+	LDREG	TASK_PT_GR26(%r1),%r26
+	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
+	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
+	LDREG	TASK_PT_GR29(%r1),%r29
+	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
+
+	/* NOTE: We use rsm/ssm pair to make this operation atomic */
+	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
+	rsm     PSW_SM_I, %r0
+	copy    %r1,%r30                           /* Restore user sp */
+	mfsp    %sr3,%r1                           /* Get user space id */
+	mtsp    %r1,%sr7                           /* Restore sr7 */
+	ssm     PSW_SM_I, %r0
+
+	/* Set sr2 to zero for userspace syscalls to work. */
+	mtsp	%r0,%sr2 
+	mtsp	%r1,%sr4			   /* Restore sr4 */
+	mtsp	%r1,%sr5			   /* Restore sr5 */
+	mtsp	%r1,%sr6			   /* Restore sr6 */
+
+	depi	3,31,2,%r31			   /* ensure return to user mode. */
+
+#ifdef CONFIG_64BIT
+	/* decide whether to reset the wide mode bit
+	 *
+	 * For a syscall, the W bit is stored in the lowest bit
+	 * of sp.  Extract it and reset W if it is zero */
+	extrd,u,*<>	%r30,63,1,%r1
+	rsm	PSW_SM_W, %r0
+	/* now reset the lowest bit of sp if it was set */
+	xor	%r30,%r1,%r30
+#endif
+	be,n    0(%sr3,%r31)                       /* return to user space */
+
+	/* We have to return via an RFI, so that PSW T and R bits can be set
+	 * appropriately.
+	 * This sets up pt_regs so we can return via intr_restore, which is not
+	 * the most efficient way of doing things, but it works.
+	 */
+syscall_restore_rfi:
+	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
+	mtctl	%r2,%cr0			   /*   for immediate trap */
+	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
+	ldi	0x0b,%r20			   /* Create new PSW */
+	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
+
+	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+	 * set in thread_info.h and converted to PA bitmap
+	 * numbers in asm-offsets.c */
+
+	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
+	depi	-1,27,1,%r20			   /* R bit */
+
+	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
+	depi	-1,7,1,%r20			   /* T bit */
+
+	STREG	%r20,TASK_PT_PSW(%r1)
+
+	/* Always store space registers, since sr3 can be changed (e.g. fork) */
+
+	mfsp    %sr3,%r25
+	STREG   %r25,TASK_PT_SR3(%r1)
+	STREG   %r25,TASK_PT_SR4(%r1)
+	STREG   %r25,TASK_PT_SR5(%r1)
+	STREG   %r25,TASK_PT_SR6(%r1)
+	STREG   %r25,TASK_PT_SR7(%r1)
+	STREG   %r25,TASK_PT_IASQ0(%r1)
+	STREG   %r25,TASK_PT_IASQ1(%r1)
+
+	/* XXX W bit??? */
+	/* Now if old D bit is clear, it means we didn't save all registers
+	 * on syscall entry, so do that now.  This only happens on TRACEME
+	 * calls, or if someone attached to us while we were on a syscall.
+	 * We could make this more efficient by not saving r3-r18, but
+	 * then we wouldn't be able to use the common intr_restore path.
+	 * It is only for traced processes anyway, so performance is not
+	 * an issue.
+	 */
+	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
+	ldo	TASK_REGS(%r1),%r25
+	reg_save %r25				   /* Save r3 to r18 */
+
+	/* Save the current sr */
+	mfsp	%sr0,%r2
+	STREG	%r2,TASK_PT_SR0(%r1)
+
+	/* Save the scratch sr */
+	mfsp	%sr1,%r2
+	STREG	%r2,TASK_PT_SR1(%r1)
+
+	/* sr2 should be set to zero for userspace syscalls */
+	STREG	%r0,TASK_PT_SR2(%r1)
+
+	LDREG	TASK_PT_GR31(%r1),%r2
+	depi	3,31,2,%r2		   /* ensure return to user mode. */
+	STREG   %r2,TASK_PT_IAOQ0(%r1)
+	ldo	4(%r2),%r2
+	STREG	%r2,TASK_PT_IAOQ1(%r1)
+	b	intr_restore
+	copy	%r25,%r16
+
+pt_regs_ok:
+	LDREG	TASK_PT_IAOQ0(%r1),%r2
+	depi	3,31,2,%r2		   /* ensure return to user mode. */
+	STREG	%r2,TASK_PT_IAOQ0(%r1)
+	LDREG	TASK_PT_IAOQ1(%r1),%r2
+	depi	3,31,2,%r2
+	STREG	%r2,TASK_PT_IAOQ1(%r1)
+	b	intr_restore
+	copy	%r25,%r16
+
+syscall_do_resched:
+	load32	syscall_check_resched,%r2 /* if resched, we start over again */
+	load32	schedule,%r19
+	bv	%r0(%r19)		/* jumps to schedule() */
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29		/* Reference param save area */
+#else
+	nop
+#endif
+ENDPROC_CFI(syscall_exit)
+
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+	.import ftrace_function_trampoline,code
+	.align L1_CACHE_BYTES
+	.globl mcount
+	.type  mcount, @function
+ENTRY(mcount)
+_mcount:
+	.export _mcount,data
+	.proc
+	.callinfo caller,frame=0
+	.entry
+	/*
+	 * The 64bit mcount() function pointer needs 4 dwords, of which the
+	 * first two are free.  We optimize it here and put 2 instructions for
+	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
+	 * have all on one L1 cacheline.
+	 */
+	b	ftrace_function_trampoline
+	copy	%r3, %arg2	/* caller original %sp */
+ftrace_stub:
+	.globl ftrace_stub
+        .type  ftrace_stub, @function
+#ifdef CONFIG_64BIT
+	bve	(%rp)
+#else
+	bv	%r0(%rp)
+#endif
+	nop
+#ifdef CONFIG_64BIT
+	.dword mcount
+	.dword 0 /* code in head.S puts value of global gp here */
+#endif
+	.exit
+	.procend
+ENDPROC(mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.align 8
+	.globl return_to_handler
+	.type  return_to_handler, @function
+ENTRY_CFI(return_to_handler)
+	.proc
+	.callinfo caller,frame=FRAME_SIZE
+	.entry
+	.export parisc_return_to_handler,data
+parisc_return_to_handler:
+	copy %r3,%r1
+	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
+	copy %sp,%r3
+	STREGM %r1,FRAME_SIZE(%sp)
+	STREG %ret0,8(%r3)
+	STREG %ret1,16(%r3)
+
+#ifdef CONFIG_64BIT
+	loadgp
+#endif
+
+	/* call ftrace_return_to_handler(0) */
+	.import ftrace_return_to_handler,code
+	load32 ftrace_return_to_handler,%ret0
+	load32 .Lftrace_ret,%r2
+#ifdef CONFIG_64BIT
+	ldo -16(%sp),%ret1		/* Reference param save area */
+	bve	(%ret0)
+#else
+	bv	%r0(%ret0)
+#endif
+	ldi 0,%r26
+.Lftrace_ret:
+	copy %ret0,%rp
+
+	/* restore original return values */
+	LDREG 8(%r3),%ret0
+	LDREG 16(%r3),%ret1
+
+	/* return from function */
+#ifdef CONFIG_64BIT
+	bve	(%rp)
+#else
+	bv	%r0(%rp)
+#endif
+	LDREGM -FRAME_SIZE(%sp),%r3
+	.exit
+	.procend
+ENDPROC_CFI(return_to_handler)
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#endif	/* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_IRQSTACKS
+/* void call_on_stack(unsigned long param1, void *func,
+		      unsigned long new_stack) */
+ENTRY_CFI(call_on_stack)
+	copy	%sp, %r1
+
+	/* Regarding the HPPA calling conventions for function pointers,
+	   we assume the PIC register is not changed across call.  For
+	   CONFIG_64BIT, the argument pointer is left to point at the
+	   argument region allocated for the call to call_on_stack. */
+# ifdef CONFIG_64BIT
+	/* Switch to new stack.  We allocate two 128 byte frames.  */
+	ldo	256(%arg2), %sp
+	/* Save previous stack pointer and return pointer in frame marker */
+	STREG	%rp, -144(%sp)
+	/* Calls always use function descriptor */
+	LDREG	16(%arg1), %arg1
+	bve,l	(%arg1), %rp
+	STREG	%r1, -136(%sp)
+	LDREG	-144(%sp), %rp
+	bve	(%rp)
+	LDREG	-136(%sp), %sp
+# else
+	/* Switch to new stack.  We allocate two 64 byte frames.  */
+	ldo	128(%arg2), %sp
+	/* Save previous stack pointer and return pointer in frame marker */
+	STREG	%r1, -68(%sp)
+	STREG	%rp, -84(%sp)
+	/* Calls use function descriptor if PLABEL bit is set */
+	bb,>=,n	%arg1, 30, 1f
+	depwi	0,31,2, %arg1
+	LDREG	0(%arg1), %arg1
+1:
+	be,l	0(%sr4,%arg1), %sr0, %r31
+	copy	%r31, %rp
+	LDREG	-84(%sp), %rp
+	bv	(%rp)
+	LDREG	-68(%sp), %sp
+# endif /* CONFIG_64BIT */
+ENDPROC_CFI(call_on_stack)
+#endif /* CONFIG_IRQSTACKS */
+
+ENTRY_CFI(get_register)
+	/*
+	 * get_register is used by the non access tlb miss handlers to
+	 * copy the value of the general register specified in r8 into
+	 * r1. This routine can't be used for shadowed registers, since
+	 * the rfir will restore the original value. So, for the shadowed
+	 * registers we put a -1 into r1 to indicate that the register
+	 * should not be used (the register being copied could also have
+	 * a -1 in it, but that is OK, it just means that we will have
+	 * to use the slow path instead).
+	 */
+	blr     %r8,%r0
+	nop
+	bv      %r0(%r25)    /* r0 */
+	copy    %r0,%r1
+	bv      %r0(%r25)    /* r1 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r2 */
+	copy    %r2,%r1
+	bv      %r0(%r25)    /* r3 */
+	copy    %r3,%r1
+	bv      %r0(%r25)    /* r4 */
+	copy    %r4,%r1
+	bv      %r0(%r25)    /* r5 */
+	copy    %r5,%r1
+	bv      %r0(%r25)    /* r6 */
+	copy    %r6,%r1
+	bv      %r0(%r25)    /* r7 */
+	copy    %r7,%r1
+	bv      %r0(%r25)    /* r8 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r9 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r10 */
+	copy    %r10,%r1
+	bv      %r0(%r25)    /* r11 */
+	copy    %r11,%r1
+	bv      %r0(%r25)    /* r12 */
+	copy    %r12,%r1
+	bv      %r0(%r25)    /* r13 */
+	copy    %r13,%r1
+	bv      %r0(%r25)    /* r14 */
+	copy    %r14,%r1
+	bv      %r0(%r25)    /* r15 */
+	copy    %r15,%r1
+	bv      %r0(%r25)    /* r16 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r17 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r18 */
+	copy    %r18,%r1
+	bv      %r0(%r25)    /* r19 */
+	copy    %r19,%r1
+	bv      %r0(%r25)    /* r20 */
+	copy    %r20,%r1
+	bv      %r0(%r25)    /* r21 */
+	copy    %r21,%r1
+	bv      %r0(%r25)    /* r22 */
+	copy    %r22,%r1
+	bv      %r0(%r25)    /* r23 */
+	copy    %r23,%r1
+	bv      %r0(%r25)    /* r24 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r25 - shadowed */
+	ldi     -1,%r1
+	bv      %r0(%r25)    /* r26 */
+	copy    %r26,%r1
+	bv      %r0(%r25)    /* r27 */
+	copy    %r27,%r1
+	bv      %r0(%r25)    /* r28 */
+	copy    %r28,%r1
+	bv      %r0(%r25)    /* r29 */
+	copy    %r29,%r1
+	bv      %r0(%r25)    /* r30 */
+	copy    %r30,%r1
+	bv      %r0(%r25)    /* r31 */
+	copy    %r31,%r1
+ENDPROC_CFI(get_register)
+
+
+ENTRY_CFI(set_register)
+	/*
+	 * set_register is used by the non access tlb miss handlers to
+	 * copy the value of r1 into the general register specified in
+	 * r8.
+	 */
+	blr     %r8,%r0
+	nop
+	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
+	copy    %r1,%r0
+	bv      %r0(%r25)    /* r1 */
+	copy    %r1,%r1
+	bv      %r0(%r25)    /* r2 */
+	copy    %r1,%r2
+	bv      %r0(%r25)    /* r3 */
+	copy    %r1,%r3
+	bv      %r0(%r25)    /* r4 */
+	copy    %r1,%r4
+	bv      %r0(%r25)    /* r5 */
+	copy    %r1,%r5
+	bv      %r0(%r25)    /* r6 */
+	copy    %r1,%r6
+	bv      %r0(%r25)    /* r7 */
+	copy    %r1,%r7
+	bv      %r0(%r25)    /* r8 */
+	copy    %r1,%r8
+	bv      %r0(%r25)    /* r9 */
+	copy    %r1,%r9
+	bv      %r0(%r25)    /* r10 */
+	copy    %r1,%r10
+	bv      %r0(%r25)    /* r11 */
+	copy    %r1,%r11
+	bv      %r0(%r25)    /* r12 */
+	copy    %r1,%r12
+	bv      %r0(%r25)    /* r13 */
+	copy    %r1,%r13
+	bv      %r0(%r25)    /* r14 */
+	copy    %r1,%r14
+	bv      %r0(%r25)    /* r15 */
+	copy    %r1,%r15
+	bv      %r0(%r25)    /* r16 */
+	copy    %r1,%r16
+	bv      %r0(%r25)    /* r17 */
+	copy    %r1,%r17
+	bv      %r0(%r25)    /* r18 */
+	copy    %r1,%r18
+	bv      %r0(%r25)    /* r19 */
+	copy    %r1,%r19
+	bv      %r0(%r25)    /* r20 */
+	copy    %r1,%r20
+	bv      %r0(%r25)    /* r21 */
+	copy    %r1,%r21
+	bv      %r0(%r25)    /* r22 */
+	copy    %r1,%r22
+	bv      %r0(%r25)    /* r23 */
+	copy    %r1,%r23
+	bv      %r0(%r25)    /* r24 */
+	copy    %r1,%r24
+	bv      %r0(%r25)    /* r25 */
+	copy    %r1,%r25
+	bv      %r0(%r25)    /* r26 */
+	copy    %r1,%r26
+	bv      %r0(%r25)    /* r27 */
+	copy    %r1,%r27
+	bv      %r0(%r25)    /* r28 */
+	copy    %r1,%r28
+	bv      %r0(%r25)    /* r29 */
+	copy    %r1,%r29
+	bv      %r0(%r25)    /* r30 */
+	copy    %r1,%r30
+	bv      %r0(%r25)    /* r31 */
+	copy    %r1,%r31
+ENDPROC_CFI(set_register)
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/firmware.c b/src/kernel/linux/v4.14/arch/parisc/kernel/firmware.c
new file mode 100644
index 0000000..6d471c0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/firmware.c
@@ -0,0 +1,1700 @@
+/*
+ * arch/parisc/kernel/firmware.c  - safe PDC access routines
+ *
+ *	PDC == Processor Dependent Code
+ *
+ * See http://www.parisc-linux.org/documentation/index.html
+ * for documentation describing the entry points and calling
+ * conventions defined below.
+ *
+ * Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, prumpf@tux.org)
+ * Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy)
+ * Copyright 2003 Grant Grundler <grundler parisc-linux org>
+ * Copyright 2003,2004 Ryan Bradetich <rbrad@parisc-linux.org>
+ * Copyright 2004,2006 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ */
+
+/*	I think it would be in everyone's best interest to follow this
+ *	guidelines when writing PDC wrappers:
+ *
+ *	 - the name of the pdc wrapper should match one of the macros
+ *	   used for the first two arguments
+ *	 - don't use caps for random parts of the name
+ *	 - use the static PDC result buffers and "copyout" to structs
+ *	   supplied by the caller to encapsulate alignment restrictions
+ *	 - hold pdc_lock while in PDC or using static result buffers
+ *	 - use __pa() to convert virtual (kernel) pointers to physical
+ *	   ones.
+ *	 - the name of the struct used for pdc return values should equal
+ *	   one of the macros used for the first two arguments to the
+ *	   corresponding PDC call
+ *	 - keep the order of arguments
+ *	 - don't be smart (setting trailing NUL bytes for strings, return
+ *	   something useful even if the call failed) unless you are sure
+ *	   it's not going to affect functionality or performance
+ *
+ *	Example:
+ *	int pdc_cache_info(struct pdc_cache_info *cache_info )
+ *	{
+ *		int retval;
+ *
+ *		spin_lock_irq(&pdc_lock);
+ *		retval = mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
+ *		convert_to_wide(pdc_result);
+ *		memcpy(cache_info, pdc_result, sizeof(*cache_info));
+ *		spin_unlock_irq(&pdc_lock);
+ *
+ *		return retval;
+ *	}
+ *					prumpf	991016	
+ */
+
+#include <stdarg.h>
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/processor.h>	/* for boot_cpu_data */
+
+#if defined(BOOTLOADER)
+# undef  spin_lock_irqsave
+# define spin_lock_irqsave(a, b) { b = 1; }
+# undef  spin_unlock_irqrestore
+# define spin_unlock_irqrestore(a, b)
+#else
+static DEFINE_SPINLOCK(pdc_lock);
+#endif
+
+extern unsigned long pdc_result[NUM_PDC_RESULT];
+extern unsigned long pdc_result2[NUM_PDC_RESULT];
+
+#ifdef CONFIG_64BIT
+#define WIDE_FIRMWARE 0x1
+#define NARROW_FIRMWARE 0x2
+
+/* Firmware needs to be initially set to narrow to determine the 
+ * actual firmware width. */
+int parisc_narrow_firmware __read_mostly = 1;
+#endif
+
+/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
+ * and MEM_PDC calls are always the same width as the OS.
+ * Some PAT boxes may have 64-bit IODC I/O.
+ *
+ * Ryan Bradetich added the now obsolete CONFIG_PDC_NARROW to allow
+ * 64-bit kernels to run on systems with 32-bit MEM_PDC calls.
+ * This allowed wide kernels to run on Cxxx boxes.
+ * We now detect 32-bit-only PDC and dynamically switch to 32-bit mode
+ * when running a 64-bit kernel on such boxes (e.g. C200 or C360).
+ */
+
+#ifdef CONFIG_64BIT
+long real64_call(unsigned long function, ...);
+#endif
+long real32_call(unsigned long function, ...);
+
+#ifdef CONFIG_64BIT
+#   define MEM_PDC (unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc
+#   define mem_pdc_call(args...) unlikely(parisc_narrow_firmware) ? real32_call(MEM_PDC, args) : real64_call(MEM_PDC, args)
+#else
+#   define MEM_PDC (unsigned long)PAGE0->mem_pdc
+#   define mem_pdc_call(args...) real32_call(MEM_PDC, args)
+#endif
+
+
+/**
+ * f_extend - Convert PDC addresses to kernel addresses.
+ * @address: Address returned from PDC.
+ *
+ * This function is used to convert PDC addresses into kernel addresses
+ * when the PDC address size and kernel address size are different.
+ */
+static unsigned long f_extend(unsigned long address)
+{
+#ifdef CONFIG_64BIT
+	if(unlikely(parisc_narrow_firmware)) {
+		if((address & 0xff000000) == 0xf0000000)
+			return 0xf0f0f0f000000000UL | (u32)address;
+
+		if((address & 0xf0000000) == 0xf0000000)
+			return 0xffffffff00000000UL | (u32)address;
+	}
+#endif
+	return address;
+}
+
+/**
+ * convert_to_wide - Convert the return buffer addresses into kernel addresses.
+ * @address: The return buffer from PDC.
+ *
+ * This function is used to convert the return buffer addresses retrieved from PDC
+ * into kernel addresses when the PDC address size and kernel address size are
+ * different.
+ */
+static void convert_to_wide(unsigned long *addr)
+{
+#ifdef CONFIG_64BIT
+	int i;
+	unsigned int *p = (unsigned int *)addr;
+
+	if (unlikely(parisc_narrow_firmware)) {
+		for (i = (NUM_PDC_RESULT-1); i >= 0; --i)
+			addr[i] = p[i];
+	}
+#endif
+}
+
+#ifdef CONFIG_64BIT
+void set_firmware_width_unlocked(void)
+{
+	int ret;
+
+	ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
+		__pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	if (pdc_result[0] != NARROW_FIRMWARE)
+		parisc_narrow_firmware = 0;
+}
+	
+/**
+ * set_firmware_width - Determine if the firmware is wide or narrow.
+ * 
+ * This function must be called before any pdc_* function that uses the
+ * convert_to_wide function.
+ */
+void set_firmware_width(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&pdc_lock, flags);
+	set_firmware_width_unlocked();
+	spin_unlock_irqrestore(&pdc_lock, flags);
+}
+#else
+void set_firmware_width_unlocked(void)
+{
+	return;
+}
+
+void set_firmware_width(void)
+{
+	return;
+}
+#endif /*CONFIG_64BIT*/
+
+
+#if !defined(BOOTLOADER)
+/**
+ * pdc_emergency_unlock - Unlock the linux pdc lock
+ *
+ * This call unlocks the linux pdc lock in case we need some PDC functions
+ * (like pdc_add_valid) during kernel stack dump.
+ */
+void pdc_emergency_unlock(void)
+{
+ 	/* Spinlock DEBUG code freaks out if we unconditionally unlock */
+        if (spin_is_locked(&pdc_lock))
+		spin_unlock(&pdc_lock);
+}
+
+
+/**
+ * pdc_add_valid - Verify address can be accessed without causing a HPMC.
+ * @address: Address to be verified.
+ *
+ * This PDC call attempts to read from the specified address and verifies
+ * if the address is valid.
+ * 
+ * The return value is PDC_OK (0) in case accessing this address is valid.
+ */
+int pdc_add_valid(unsigned long address)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_ADD_VALID, PDC_ADD_VALID_VERIFY, address);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+EXPORT_SYMBOL(pdc_add_valid);
+
+/**
+ * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler.
+ * @instr: Pointer to variable which will get instruction opcode.
+ *
+ * The return value is PDC_OK (0) in case call succeeded.
+ */
+int __init pdc_instr(unsigned int *instr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result));
+	convert_to_wide(pdc_result);
+	*instr = pdc_result[0];
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_chassis_info - Return chassis information.
+ * @result: The return buffer.
+ * @chassis_info: The memory buffer address.
+ * @len: The size of the memory buffer address.
+ *
+ * An HVERSION dependent call for returning the chassis information.
+ */
+int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        memcpy(&pdc_result, chassis_info, sizeof(*chassis_info));
+        memcpy(&pdc_result2, led_info, len);
+        retval = mem_pdc_call(PDC_CHASSIS, PDC_RETURN_CHASSIS_INFO,
+                              __pa(pdc_result), __pa(pdc_result2), len);
+        memcpy(chassis_info, pdc_result, sizeof(*chassis_info));
+        memcpy(led_info, pdc_result2, len);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
+ * @retval: -1 on error, 0 on success. Other value are PDC errors
+ * 
+ * Must be correctly formatted or expect system crash
+ */
+#ifdef CONFIG_64BIT
+int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
+{
+	int retval = 0;
+	unsigned long flags;
+        
+	if (!is_pdc_pat())
+		return -1;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+#endif
+
+/**
+ * pdc_chassis_disp - Updates chassis code
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_chassis_disp(unsigned long disp)
+{
+	int retval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_chassis_warn - Fetches chassis warnings
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_chassis_warn(unsigned long *warn)
+{
+	int retval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_WARN, __pa(pdc_result));
+	*warn = pdc_result[0];
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+	int ret;
+
+	ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
+	convert_to_wide(pdc_result);
+	pdc_coproc_info->ccr_functional = pdc_result[0];
+	pdc_coproc_info->ccr_present = pdc_result[1];
+	pdc_coproc_info->revision = pdc_result[17];
+	pdc_coproc_info->model = pdc_result[18];
+
+	return ret;
+}
+
+/**
+ * pdc_coproc_cfg - To identify coprocessors attached to the processor.
+ * @pdc_coproc_info: Return buffer address.
+ *
+ * This PDC call returns the presence and status of all the coprocessors
+ * attached to the processor.
+ */
+int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	ret = pdc_coproc_cfg_unlocked(pdc_coproc_info);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return ret;
+}
+
+/**
+ * pdc_iodc_read - Read data from the modules IODC.
+ * @actcnt: The actual number of bytes.
+ * @hpa: The HPA of the module for the iodc read.
+ * @index: The iodc entry point.
+ * @iodc_data: A buffer memory for the iodc options.
+ * @iodc_data_size: Size of the memory buffer.
+ *
+ * This PDC call reads from the IODC of the module specified by the hpa
+ * argument.
+ */
+int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
+		  void *iodc_data, unsigned int iodc_data_size)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa, 
+			      index, __pa(pdc_result2), iodc_data_size);
+	convert_to_wide(pdc_result);
+	*actcnt = pdc_result[0];
+	memcpy(iodc_data, pdc_result2, iodc_data_size);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL(pdc_iodc_read);
+
+/**
+ * pdc_system_map_find_mods - Locate unarchitected modules.
+ * @pdc_mod_info: Return buffer address.
+ * @mod_path: pointer to dev path structure.
+ * @mod_index: fixed address module index.
+ *
+ * To locate and identify modules which reside at fixed I/O addresses, which
+ * do not self-identify via architected bus walks.
+ */
+int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
+			     struct pdc_module_path *mod_path, long mod_index)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), 
+			      __pa(pdc_result2), mod_index);
+	convert_to_wide(pdc_result);
+	memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info));
+	memcpy(mod_path, pdc_result2, sizeof(*mod_path));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	pdc_mod_info->mod_addr = f_extend(pdc_mod_info->mod_addr);
+	return retval;
+}
+
+/**
+ * pdc_system_map_find_addrs - Retrieve additional address ranges.
+ * @pdc_addr_info: Return buffer address.
+ * @mod_index: Fixed address module index.
+ * @addr_index: Address range index.
+ * 
+ * Retrieve additional information about subsequent address ranges for modules
+ * with multiple address ranges.  
+ */
+int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info, 
+			      long mod_index, long addr_index)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_ADDRESS, __pa(pdc_result),
+			      mod_index, addr_index);
+	convert_to_wide(pdc_result);
+	memcpy(pdc_addr_info, pdc_result, sizeof(*pdc_addr_info));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	pdc_addr_info->mod_addr = f_extend(pdc_addr_info->mod_addr);
+	return retval;
+}
+
+/**
+ * pdc_model_info - Return model information about the processor.
+ * @model: The return buffer.
+ *
+ * Returns the version numbers, identifiers, and capabilities from the processor module.
+ */
+int pdc_model_info(struct pdc_model *model) 
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_INFO, __pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	memcpy(model, pdc_result, sizeof(*model));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_model_sysmodel - Get the system model name.
+ * @name: A char array of at least 81 characters.
+ *
+ * Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L).
+ * Using OS_ID_HPUX will return the equivalent of the 'modelname' command
+ * on HP/UX.
+ */
+int pdc_model_sysmodel(char *name)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result),
+                              OS_ID_HPUX, __pa(name));
+        convert_to_wide(pdc_result);
+
+        if (retval == PDC_OK) {
+                name[pdc_result[0]] = '\0'; /* add trailing '\0' */
+        } else {
+                name[0] = 0;
+        }
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_model_versions - Identify the version number of each processor.
+ * @cpu_id: The return buffer.
+ * @id: The id of the processor to check.
+ *
+ * Returns the version number for each processor component.
+ *
+ * This comment was here before, but I do not know what it means :( -RB
+ * id: 0 = cpu revision, 1 = boot-rom-version
+ */
+int pdc_model_versions(unsigned long *versions, int id)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_VERSIONS, __pa(pdc_result), id);
+        convert_to_wide(pdc_result);
+        *versions = pdc_result[0];
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_model_cpuid - Returns the CPU_ID.
+ * @cpu_id: The return buffer.
+ *
+ * Returns the CPU_ID value which uniquely identifies the cpu portion of
+ * the processor module.
+ */
+int pdc_model_cpuid(unsigned long *cpu_id)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+        retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CPU_ID, __pa(pdc_result), 0);
+        convert_to_wide(pdc_result);
+        *cpu_id = pdc_result[0];
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_model_capabilities - Returns the platform capabilities.
+ * @capabilities: The return buffer.
+ *
+ * Returns information about platform support for 32- and/or 64-bit
+ * OSes, IO-PDIR coherency, and virtual aliasing.
+ */
+int pdc_model_capabilities(unsigned long *capabilities)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+        retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
+        convert_to_wide(pdc_result);
+        if (retval == PDC_OK) {
+                *capabilities = pdc_result[0];
+        } else {
+                *capabilities = PDC_MODEL_OS32;
+        }
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_cache_info - Return cache and TLB information.
+ * @cache_info: The return buffer.
+ *
+ * Returns information about the processor's cache and TLB.
+ */
+int pdc_cache_info(struct pdc_cache_info *cache_info)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_INFO, __pa(pdc_result), 0);
+        convert_to_wide(pdc_result);
+        memcpy(cache_info, pdc_result, sizeof(*cache_info));
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/**
+ * pdc_spaceid_bits - Return whether Space ID hashing is turned on.
+ * @space_bits: Should be 0, if not, bad mojo!
+ *
+ * Returns information about Space ID hashing.
+ */
+int pdc_spaceid_bits(unsigned long *space_bits)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	pdc_result[0] = 0;
+	retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_RET_SPID, __pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	*space_bits = pdc_result[0];
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+#ifndef CONFIG_PA20
+/**
+ * pdc_btlb_info - Return block TLB information.
+ * @btlb: The return buffer.
+ *
+ * Returns information about the hardware Block TLB.
+ */
+int pdc_btlb_info(struct pdc_btlb_info *btlb) 
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+        memcpy(btlb, pdc_result, sizeof(*btlb));
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        if(retval < 0) {
+                btlb->max_size = 0;
+        }
+        return retval;
+}
+
+/**
+ * pdc_mem_map_hpa - Find fixed module information.  
+ * @address: The return buffer
+ * @mod_path: pointer to dev path structure.
+ *
+ * This call was developed for S700 workstations to allow the kernel to find
+ * the I/O devices (Core I/O). In the future (Kittyhawk and beyond) this
+ * call will be replaced (on workstations) by the architected PDC_SYSTEM_MAP
+ * call.
+ *
+ * This call is supported by all existing S700 workstations (up to  Gecko).
+ */
+int pdc_mem_map_hpa(struct pdc_memory_map *address,
+		struct pdc_module_path *mod_path)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        memcpy(pdc_result2, mod_path, sizeof(*mod_path));
+        retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
+				__pa(pdc_result2));
+        memcpy(address, pdc_result, sizeof(*address));
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+#endif	/* !CONFIG_PA20 */
+
+/**
+ * pdc_lan_station_id - Get the LAN address.
+ * @lan_addr: The return buffer.
+ * @hpa: The network device HPA.
+ *
+ * Get the LAN station address when it is not directly available from the LAN hardware.
+ */
+int pdc_lan_station_id(char *lan_addr, unsigned long hpa)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_LAN_STATION_ID, PDC_LAN_STATION_ID_READ,
+			__pa(pdc_result), hpa);
+	if (retval < 0) {
+		/* FIXME: else read MAC from NVRAM */
+		memset(lan_addr, 0, PDC_LAN_STATION_ID_SIZE);
+	} else {
+		memcpy(lan_addr, pdc_result, PDC_LAN_STATION_ID_SIZE);
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL(pdc_lan_station_id);
+
+/**
+ * pdc_stable_read - Read data from Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be copied.
+ * @count: number of bytes to transfer. count is multiple of 4.
+ *
+ * This PDC call reads from the Stable Storage address supplied in staddr
+ * and copies count bytes to the memory address memaddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count)
+{
+       int retval;
+	unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_READ, staddr,
+               __pa(pdc_result), count);
+       convert_to_wide(pdc_result);
+       memcpy(memaddr, pdc_result, count);
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(pdc_stable_read);
+
+/**
+ * pdc_stable_write - Write data to Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be read from.
+ * @count: number of bytes to transfer. count is multiple of 4.
+ *
+ * This PDC call reads count bytes from the supplied memaddr address,
+ * and copies count bytes to the Stable Storage address staddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count)
+{
+       int retval;
+	unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       memcpy(pdc_result, memaddr, count);
+       convert_to_wide(pdc_result);
+       retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_WRITE, staddr,
+               __pa(pdc_result), count);
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(pdc_stable_write);
+
+/**
+ * pdc_stable_get_size - Get Stable Storage size in bytes.
+ * @size: pointer where the size will be stored.
+ *
+ * This PDC call returns the number of bytes in the processor's Stable
+ * Storage, which is the number of contiguous bytes implemented in Stable
+ * Storage starting from staddr=0. size in an unsigned 64-bit integer
+ * which is a multiple of four.
+ */
+int pdc_stable_get_size(unsigned long *size)
+{
+       int retval;
+	unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_RETURN_SIZE, __pa(pdc_result));
+       *size = pdc_result[0];
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(pdc_stable_get_size);
+
+/**
+ * pdc_stable_verify_contents - Checks that Stable Storage contents are valid.
+ *
+ * This PDC call is meant to be used to check the integrity of the current
+ * contents of Stable Storage.
+ */
+int pdc_stable_verify_contents(void)
+{
+       int retval;
+	unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_VERIFY_CONTENTS);
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(pdc_stable_verify_contents);
+
+/**
+ * pdc_stable_initialize - Sets Stable Storage contents to zero and initialize
+ * the validity indicator.
+ *
+ * This PDC call will erase all contents of Stable Storage. Use with care!
+ */
+int pdc_stable_initialize(void)
+{
+       int retval;
+	unsigned long flags;
+
+       spin_lock_irqsave(&pdc_lock, flags);
+       retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_INITIALIZE);
+       spin_unlock_irqrestore(&pdc_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(pdc_stable_initialize);
+
+/**
+ * pdc_get_initiator - Get the SCSI Interface Card params (SCSI ID, SDTR, SE or LVD)
+ * @hwpath: fully bc.mod style path to the device.
+ * @initiator: the array to return the result into
+ *
+ * Get the SCSI operational parameters from PDC.
+ * Needed since HPUX never used BIOS or symbios card NVRAM.
+ * Most ncr/sym cards won't have an entry and just use whatever
+ * capabilities of the card are (eg Ultra, LVD). But there are
+ * several cases where it's useful:
+ *    o set SCSI id for Multi-initiator clusters,
+ *    o cable too long (ie SE scsi 10Mhz won't support 6m length),
+ *    o bus width exported is less than what the interface chip supports.
+ */
+int pdc_get_initiator(struct hardware_path *hwpath, struct pdc_initiator *initiator)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+
+/* BCJ-XXXX series boxes. E.G. "9000/785/C3000" */
+#define IS_SPROCKETS() (strlen(boot_cpu_data.pdc.sys_model_name) == 14 && \
+	strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 8) == 0)
+
+	retval = mem_pdc_call(PDC_INITIATOR, PDC_GET_INITIATOR, 
+			      __pa(pdc_result), __pa(hwpath));
+	if (retval < PDC_OK)
+		goto out;
+
+	if (pdc_result[0] < 16) {
+		initiator->host_id = pdc_result[0];
+	} else {
+		initiator->host_id = -1;
+	}
+
+	/*
+	 * Sprockets and Piranha return 20 or 40 (MT/s).  Prelude returns
+	 * 1, 2, 5 or 10 for 5, 10, 20 or 40 MT/s, respectively
+	 */
+	switch (pdc_result[1]) {
+		case  1: initiator->factor = 50; break;
+		case  2: initiator->factor = 25; break;
+		case  5: initiator->factor = 12; break;
+		case 25: initiator->factor = 10; break;
+		case 20: initiator->factor = 12; break;
+		case 40: initiator->factor = 10; break;
+		default: initiator->factor = -1; break;
+	}
+
+	if (IS_SPROCKETS()) {
+		initiator->width = pdc_result[4];
+		initiator->mode = pdc_result[5];
+	} else {
+		initiator->width = -1;
+		initiator->mode = -1;
+	}
+
+ out:
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return (retval >= PDC_OK);
+}
+EXPORT_SYMBOL(pdc_get_initiator);
+
+
+/**
+ * pdc_pci_irt_size - Get the number of entries in the interrupt routing table.
+ * @num_entries: The return value.
+ * @hpa: The HPA for the device.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */ 
+int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL_SIZE, 
+			      __pa(pdc_result), hpa);
+	convert_to_wide(pdc_result);
+	*num_entries = pdc_result[0];
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/** 
+ * pdc_pci_irt - Get the PCI interrupt routing table.
+ * @num_entries: The number of entries in the table.
+ * @hpa: The Hard Physical Address of the device.
+ * @tbl: 
+ *
+ * Get the PCI interrupt routing table for the device at the given HPA.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */
+int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
+{
+	int retval;
+	unsigned long flags;
+
+	BUG_ON((unsigned long)tbl & 0x7);
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	pdc_result[0] = num_entries;
+	retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL, 
+			      __pa(pdc_result), hpa, __pa(tbl));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+
+#if 0	/* UNTEST CODE - left here in case someone needs it */
+
+/** 
+ * pdc_pci_config_read - read PCI config space.
+ * @hpa		token from PDC to indicate which PCI device
+ * @pci_addr	configuration space address to read from
+ *
+ * Read PCI Configuration space *before* linux PCI subsystem is running.
+ */
+unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	pdc_result[0] = 0;
+	pdc_result[1] = 0;
+	retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_READ_CONFIG, 
+			      __pa(pdc_result), hpa, cfg_addr&~3UL, 4UL);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval ? ~0 : (unsigned int) pdc_result[0];
+}
+
+
+/** 
+ * pdc_pci_config_write - read PCI config space.
+ * @hpa		token from PDC to indicate which PCI device
+ * @pci_addr	configuration space address to write
+ * @val		value we want in the 32-bit register
+ *
+ * Write PCI Configuration space *before* linux PCI subsystem is running.
+ */
+void pdc_pci_config_write(void *hpa, unsigned long cfg_addr, unsigned int val)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	pdc_result[0] = 0;
+	retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_WRITE_CONFIG, 
+			      __pa(pdc_result), hpa,
+			      cfg_addr&~3UL, 4UL, (unsigned long) val);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+#endif /* UNTESTED CODE */
+
+/**
+ * pdc_tod_read - Read the Time-Of-Day clock.
+ * @tod: The return buffer:
+ *
+ * Read the Time-Of-Day clock
+ */
+int pdc_tod_read(struct pdc_tod *tod)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_TOD, PDC_TOD_READ, __pa(pdc_result), 0);
+        convert_to_wide(pdc_result);
+        memcpy(tod, pdc_result, sizeof(*tod));
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+EXPORT_SYMBOL(pdc_tod_read);
+
+int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MEM, PDC_MEM_MEMINFO, __pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	memcpy(rinfo, pdc_result, sizeof(*rinfo));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret,
+		unsigned long *pdt_entries_ptr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MEM, PDC_MEM_READ_PDT, __pa(pdc_result),
+			__pa(pdt_entries_ptr));
+	if (retval == PDC_OK) {
+		convert_to_wide(pdc_result);
+		memcpy(pret, pdc_result, sizeof(*pret));
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+#ifdef CONFIG_64BIT
+	/*
+	 * 64-bit kernels should not call this PDT function in narrow mode.
+	 * The pdt_entries_ptr array above will now contain 32-bit values
+	 */
+	if (WARN_ON_ONCE((retval == PDC_OK) && parisc_narrow_firmware))
+		return PDC_ERROR;
+#endif
+
+	return retval;
+}
+
+/**
+ * pdc_tod_set - Set the Time-Of-Day clock.
+ * @sec: The number of seconds since epoch.
+ * @usec: The number of micro seconds.
+ *
+ * Set the Time-Of-Day clock.
+ */ 
+int pdc_tod_set(unsigned long sec, unsigned long usec)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_TOD, PDC_TOD_WRITE, sec, usec);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+EXPORT_SYMBOL(pdc_tod_set);
+
+#ifdef CONFIG_64BIT
+int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
+		struct pdc_memory_table *tbl, unsigned long entries)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MEM, PDC_MEM_TABLE, __pa(pdc_result), __pa(pdc_result2), entries);
+	convert_to_wide(pdc_result);
+	memcpy(r_addr, pdc_result, sizeof(*r_addr));
+	memcpy(tbl, pdc_result2, entries * sizeof(*tbl));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+#endif /* CONFIG_64BIT */
+
+/* FIXME: Is this pdc used?  I could not find type reference to ftc_bitmap
+ * so I guessed at unsigned long.  Someone who knows what this does, can fix
+ * it later. :)
+ */
+int pdc_do_firm_test_reset(unsigned long ftc_bitmap)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_FIRM_TEST_RESET,
+                              PDC_FIRM_TEST_MAGIC, ftc_bitmap);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/*
+ * pdc_do_reset - Reset the system.
+ *
+ * Reset the system.
+ */
+int pdc_do_reset(void)
+{
+        int retval;
+	unsigned long flags;
+
+        spin_lock_irqsave(&pdc_lock, flags);
+        retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_RESET);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+        return retval;
+}
+
+/*
+ * pdc_soft_power_info - Enable soft power switch.
+ * @power_reg: address of soft power register
+ *
+ * Return the absolute address of the soft power switch register
+ */
+int __init pdc_soft_power_info(unsigned long *power_reg)
+{
+	int retval;
+	unsigned long flags;
+
+	*power_reg = (unsigned long) (-1);
+	
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_INFO, __pa(pdc_result), 0);
+	if (retval == PDC_OK) {
+                convert_to_wide(pdc_result);
+                *power_reg = f_extend(pdc_result[0]);
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/*
+ * pdc_soft_power_button - Control the soft power button behaviour
+ * @sw_control: 0 for hardware control, 1 for software control 
+ *
+ *
+ * This PDC function places the soft power button under software or
+ * hardware control.
+ * Under software control the OS may control to when to allow to shut 
+ * down the system. Under hardware control pressing the power button 
+ * powers off the system immediately.
+ */
+int pdc_soft_power_button(int sw_control)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/*
+ * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
+ * Primarily a problem on T600 (which parisc-linux doesn't support) but
+ * who knows what other platform firmware might do with this OS "hook".
+ */
+void pdc_io_reset(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	mem_pdc_call(PDC_IO, PDC_IO_RESET, 0);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+/*
+ * pdc_io_reset_devices - Hack to Stop USB controller
+ *
+ * If PDC used the usb controller, the usb controller
+ * is still running and will crash the machines during iommu 
+ * setup, because of still running DMA. This PDC call
+ * stops the USB controller.
+ * Normally called after calling pdc_io_reset().
+ */
+void pdc_io_reset_devices(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	mem_pdc_call(PDC_IO, PDC_IO_RESET_DEVICES, 0);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+#endif /* defined(BOOTLOADER) */
+
+/* locked by pdc_console_lock */
+static int __attribute__((aligned(8)))   iodc_retbuf[32];
+static char __attribute__((aligned(64))) iodc_dbuf[4096];
+
+/**
+ * pdc_iodc_print - Console print using IODC.
+ * @str: the string to output.
+ * @count: length of str
+ *
+ * Note that only these special chars are architected for console IODC io:
+ * BEL, BS, CR, and LF. Others are passed through.
+ * Since the HP console requires CR+LF to perform a 'newline', we translate
+ * "\n" to "\r\n".
+ */
+int pdc_iodc_print(const unsigned char *str, unsigned count)
+{
+	unsigned int i;
+	unsigned long flags;
+
+	for (i = 0; i < count;) {
+		switch(str[i]) {
+		case '\n':
+			iodc_dbuf[i+0] = '\r';
+			iodc_dbuf[i+1] = '\n';
+			i += 2;
+			goto print;
+		default:
+			iodc_dbuf[i] = str[i];
+			i++;
+			break;
+		}
+	}
+
+print:
+        spin_lock_irqsave(&pdc_lock, flags);
+        real32_call(PAGE0->mem_cons.iodc_io,
+                    (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
+                    PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
+                    __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0);
+        spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return i;
+}
+
+#if !defined(BOOTLOADER)
+/**
+ * pdc_iodc_getc - Read a character (non-blocking) from the PDC console.
+ *
+ * Read a character (non-blocking) from the PDC console, returns -1 if
+ * key is not present.
+ */
+int pdc_iodc_getc(void)
+{
+	int ch;
+	int status;
+	unsigned long flags;
+
+	/* Bail if no console input device. */
+	if (!PAGE0->mem_kbd.iodc_io)
+		return 0;
+	
+	/* wait for a keyboard (rs232)-input */
+	spin_lock_irqsave(&pdc_lock, flags);
+	real32_call(PAGE0->mem_kbd.iodc_io,
+		    (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN,
+		    PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), 
+		    __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0);
+
+	ch = *iodc_dbuf;
+	status = *iodc_retbuf;
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	if (status == 0)
+	    return -1;
+	
+	return ch;
+}
+
+int pdc_sti_call(unsigned long func, unsigned long flags,
+                 unsigned long inptr, unsigned long outputr,
+                 unsigned long glob_cfg)
+{
+        int retval;
+	unsigned long irqflags;
+
+        spin_lock_irqsave(&pdc_lock, irqflags);  
+        retval = real32_call(func, flags, inptr, outputr, glob_cfg);
+        spin_unlock_irqrestore(&pdc_lock, irqflags);
+
+        return retval;
+}
+EXPORT_SYMBOL(pdc_sti_call);
+
+#ifdef CONFIG_64BIT
+/**
+ * pdc_pat_cell_get_number - Returns the cell number.
+ * @cell_info: The return buffer.
+ *
+ * This PDC call returns the cell number of the cell from which the call
+ * is made.
+ */
+int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_NUMBER, __pa(pdc_result));
+	memcpy(cell_info, pdc_result, sizeof(*cell_info));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_cell_module - Retrieve the cell's module information.
+ * @actcnt: The number of bytes written to mem_addr.
+ * @ploc: The physical location.
+ * @mod: The module index.
+ * @view_type: The view of the address type.
+ * @mem_addr: The return buffer.
+ *
+ * This PDC call returns information about each module attached to the cell
+ * at the specified location.
+ */
+int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
+			unsigned long view_type, void *mem_addr)
+{
+	int retval;
+	unsigned long flags;
+	static struct pdc_pat_cell_mod_maddr_block result __attribute__ ((aligned (8)));
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_MODULE, __pa(pdc_result), 
+			      ploc, mod, view_type, __pa(&result));
+	if(!retval) {
+		*actcnt = pdc_result[0];
+		memcpy(mem_addr, &result, *actcnt);
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_cpu_get_number - Retrieve the cpu number.
+ * @cpu_info: The return buffer.
+ * @hpa: The Hard Physical Address of the CPU.
+ *
+ * Retrieve the cpu number for the cpu at the specified HPA.
+ */
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_NUMBER,
+			      __pa(&pdc_result), hpa);
+	memcpy(cpu_info, pdc_result, sizeof(*cpu_info));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_get_irt_size - Retrieve the number of entries in the cell's interrupt table.
+ * @num_entries: The return value.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ */
+int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE,
+			      __pa(pdc_result), cell_num);
+	*num_entries = pdc_result[0];
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_get_irt - Retrieve the cell's interrupt table.
+ * @r_addr: The return buffer.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the actual interrupt table for the specified cell.
+ */
+int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE,
+			      __pa(r_addr), cell_num);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
+ * @actlen: The return buffer.
+ * @mem_addr: Pointer to the memory buffer.
+ * @count: The number of bytes to read from the buffer.
+ * @offset: The offset with respect to the beginning of the buffer.
+ *
+ */
+int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, 
+			    unsigned long count, unsigned long offset)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_ADDR_MAP, __pa(pdc_result), 
+			      __pa(pdc_result2), count, offset);
+	*actual_len = pdc_result[0];
+	memcpy(mem_addr, pdc_result2, *actual_len);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
+ * @pci_addr: PCI configuration space address for which the read request is being made.
+ * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4. 
+ * @mem_addr: Pointer to return memory buffer.
+ *
+ */
+int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
+					__pa(pdc_result), pci_addr, pci_size);
+	switch(pci_size) {
+		case 1: *(u8 *) mem_addr =  (u8)  pdc_result[0]; break;
+		case 2: *(u16 *)mem_addr =  (u16) pdc_result[0]; break;
+		case 4: *(u32 *)mem_addr =  (u32) pdc_result[0]; break;
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
+ * @pci_addr: PCI configuration space address for which the write  request is being made.
+ * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4. 
+ * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be 
+ *         written to PCI Config space.
+ *
+ */
+int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
+				pci_addr, pci_size, val);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table
+ * @rinfo: memory pdt information
+ *
+ */
+int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_INFO,
+			__pa(&pdc_result));
+	if (retval == PDC_OK)
+		memcpy(rinfo, &pdc_result, sizeof(*rinfo));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_mem_pdt_cell_info - Retrieve information about page deallocation
+ *				table of a cell
+ * @rinfo: memory pdt information
+ * @cell: cell number
+ *
+ */
+int pdc_pat_mem_pdt_cell_info(struct pdc_pat_mem_cell_pdt_retinfo *rinfo,
+		unsigned long cell)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_INFO,
+			__pa(&pdc_result), cell);
+	if (retval == PDC_OK)
+		memcpy(rinfo, &pdc_result, sizeof(*rinfo));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_mem_read_cell_pdt - Read PDT entries from (old) PAT firmware
+ * @pret: array of PDT entries
+ * @pdt_entries_ptr: ptr to hold number of PDT entries
+ * @max_entries: maximum number of entries to be read
+ *
+ */
+int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long max_entries)
+{
+	int retval;
+	unsigned long flags, entries;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	/* PDC_PAT_MEM_CELL_READ is available on early PAT machines only */
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_READ,
+			__pa(&pdc_result), parisc_cell_num,
+			__pa(pdt_entries_ptr));
+
+	if (retval == PDC_OK) {
+		/* build up return value as for PDC_PAT_MEM_PD_READ */
+		entries = min(pdc_result[0], max_entries);
+		pret->pdt_entries = entries;
+		pret->actual_count_bytes = entries * sizeof(unsigned long);
+	}
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+	WARN_ON(retval == PDC_OK && pdc_result[0] > max_entries);
+
+	return retval;
+}
+/**
+ * pdc_pat_mem_read_pd_pdt - Read PDT entries from (newer) PAT firmware
+ * @pret: array of PDT entries
+ * @pdt_entries_ptr: ptr to hold number of PDT entries
+ * @count: number of bytes to read
+ * @offset: offset to start (in bytes)
+ *
+ */
+int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long count,
+		unsigned long offset)
+{
+	int retval;
+	unsigned long flags, entries;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
+		__pa(&pdc_result), __pa(pdt_entries_ptr),
+		count, offset);
+
+	if (retval == PDC_OK) {
+		entries = min(pdc_result[0], count);
+		pret->actual_count_bytes = entries;
+		pret->pdt_entries = entries / sizeof(unsigned long);
+	}
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware
+ * @pret: ptr to hold returned information
+ * @phys_addr: physical address to examine
+ *
+ */
+int pdc_pat_mem_get_dimm_phys_location(
+		struct pdc_pat_mem_phys_mem_location *pret,
+		unsigned long phys_addr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS,
+		__pa(&pdc_result), phys_addr);
+
+	if (retval == PDC_OK)
+		memcpy(pret, &pdc_result, sizeof(*pret));
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+#endif /* CONFIG_64BIT */
+#endif /* defined(BOOTLOADER) */
+
+
+/***************** 32-bit real-mode calls ***********/
+/* The struct below is used
+ * to overlay real_stack (real2.S), preparing a 32-bit call frame.
+ * real32_call_asm() then uses this stack in narrow real mode
+ */
+
+struct narrow_stack {
+	/* use int, not long which is 64 bits */
+	unsigned int arg13;
+	unsigned int arg12;
+	unsigned int arg11;
+	unsigned int arg10;
+	unsigned int arg9;
+	unsigned int arg8;
+	unsigned int arg7;
+	unsigned int arg6;
+	unsigned int arg5;
+	unsigned int arg4;
+	unsigned int arg3;
+	unsigned int arg2;
+	unsigned int arg1;
+	unsigned int arg0;
+	unsigned int frame_marker[8];
+	unsigned int sp;
+	/* in reality, there's nearly 8k of stack after this */
+};
+
+long real32_call(unsigned long fn, ...)
+{
+	va_list args;
+	extern struct narrow_stack real_stack;
+	extern unsigned long real32_call_asm(unsigned int *,
+					     unsigned int *, 
+					     unsigned int);
+	
+	va_start(args, fn);
+	real_stack.arg0 = va_arg(args, unsigned int);
+	real_stack.arg1 = va_arg(args, unsigned int);
+	real_stack.arg2 = va_arg(args, unsigned int);
+	real_stack.arg3 = va_arg(args, unsigned int);
+	real_stack.arg4 = va_arg(args, unsigned int);
+	real_stack.arg5 = va_arg(args, unsigned int);
+	real_stack.arg6 = va_arg(args, unsigned int);
+	real_stack.arg7 = va_arg(args, unsigned int);
+	real_stack.arg8 = va_arg(args, unsigned int);
+	real_stack.arg9 = va_arg(args, unsigned int);
+	real_stack.arg10 = va_arg(args, unsigned int);
+	real_stack.arg11 = va_arg(args, unsigned int);
+	real_stack.arg12 = va_arg(args, unsigned int);
+	real_stack.arg13 = va_arg(args, unsigned int);
+	va_end(args);
+	
+	return real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
+}
+
+#ifdef CONFIG_64BIT
+/***************** 64-bit real-mode calls ***********/
+
+struct wide_stack {
+	unsigned long arg0;
+	unsigned long arg1;
+	unsigned long arg2;
+	unsigned long arg3;
+	unsigned long arg4;
+	unsigned long arg5;
+	unsigned long arg6;
+	unsigned long arg7;
+	unsigned long arg8;
+	unsigned long arg9;
+	unsigned long arg10;
+	unsigned long arg11;
+	unsigned long arg12;
+	unsigned long arg13;
+	unsigned long frame_marker[2];	/* rp, previous sp */
+	unsigned long sp;
+	/* in reality, there's nearly 8k of stack after this */
+};
+
+long real64_call(unsigned long fn, ...)
+{
+	va_list args;
+	extern struct wide_stack real64_stack;
+	extern unsigned long real64_call_asm(unsigned long *,
+					     unsigned long *, 
+					     unsigned long);
+    
+	va_start(args, fn);
+	real64_stack.arg0 = va_arg(args, unsigned long);
+	real64_stack.arg1 = va_arg(args, unsigned long);
+	real64_stack.arg2 = va_arg(args, unsigned long);
+	real64_stack.arg3 = va_arg(args, unsigned long);
+	real64_stack.arg4 = va_arg(args, unsigned long);
+	real64_stack.arg5 = va_arg(args, unsigned long);
+	real64_stack.arg6 = va_arg(args, unsigned long);
+	real64_stack.arg7 = va_arg(args, unsigned long);
+	real64_stack.arg8 = va_arg(args, unsigned long);
+	real64_stack.arg9 = va_arg(args, unsigned long);
+	real64_stack.arg10 = va_arg(args, unsigned long);
+	real64_stack.arg11 = va_arg(args, unsigned long);
+	real64_stack.arg12 = va_arg(args, unsigned long);
+	real64_stack.arg13 = va_arg(args, unsigned long);
+	va_end(args);
+	
+	return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
+}
+
+#endif /* CONFIG_64BIT */
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/ftrace.c b/src/kernel/linux/v4.14/arch/parisc/kernel/ftrace.c
new file mode 100644
index 0000000..6fa8535
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/ftrace.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for tracing calls in Linux kernel.
+ * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
+ *
+ * based on code for x86 which is:
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * future possible enhancements:
+ * 	- add CONFIG_DYNAMIC_FTRACE
+ *	- add CONFIG_STACK_TRACER
+ */
+
+#include <linux/init.h>
+#include <linux/ftrace.h>
+
+#include <asm/assembly.h>
+#include <asm/sections.h>
+#include <asm/ftrace.h>
+
+
+#define __hot __attribute__ ((__section__ (".text.hot")))
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+static void __hot prepare_ftrace_return(unsigned long *parent,
+					unsigned long self_addr)
+{
+	unsigned long old;
+	struct ftrace_graph_ent trace;
+	extern int parisc_return_to_handler;
+
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		return;
+
+	old = *parent;
+
+	trace.func = self_addr;
+	trace.depth = current->curr_ret_stack + 1;
+
+	/* Only trace if the calling function expects to */
+	if (!ftrace_graph_entry(&trace))
+		return;
+
+        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+				     0, NULL) == -EBUSY)
+                return;
+
+	/* activate parisc_return_to_handler() as return point */
+	*parent = (unsigned long) &parisc_return_to_handler;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+void notrace __hot ftrace_function_trampoline(unsigned long parent,
+				unsigned long self_addr,
+				unsigned long org_sp_gr3)
+{
+	extern ftrace_func_t ftrace_trace_function;  /* depends on CONFIG_DYNAMIC_FTRACE */
+	extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+
+	if (ftrace_trace_function != ftrace_stub) {
+		/* struct ftrace_ops *op, struct pt_regs *regs); */
+		ftrace_trace_function(parent, self_addr, NULL, NULL);
+		return;
+	}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+		ftrace_graph_entry != ftrace_graph_entry_stub) {
+		unsigned long *parent_rp;
+
+		/* calculate pointer to %rp in stack */
+		parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
+		/* sanity check: parent_rp should hold parent */
+		if (*parent_rp != parent)
+			return;
+
+		prepare_ftrace_return(parent_rp, self_addr);
+		return;
+	}
+#endif
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/hardware.c b/src/kernel/linux/v4.14/arch/parisc/kernel/hardware.c
new file mode 100644
index 0000000..af3bc35
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/hardware.c
@@ -0,0 +1,1390 @@
+/*
+ *    Hardware descriptions for HP 9000 based hardware, including
+ *    system types, SCSI controllers, DMA controllers, HPPB controllers
+ *    and lots more.
+ * 
+ *    Based on the document "PA-RISC 1.1 I/O Firmware Architecture 
+ *    Reference Specification", March 7, 1999, version 0.96.  This
+ *    is available at http://parisc-linux.org/documentation/
+ *
+ *    Copyright 1999 by Alex deVries <alex@onefishtwo.ca>
+ *    and copyright 1999 The Puffin Group Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ * 
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+
+#include <asm/hardware.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+/*
+ *	HP PARISC Hardware Database
+ *	Access to this database is only possible during bootup
+ *	so don't reference this table after starting the init process
+ *
+ *	NOTE: Product names which are listed here and ends with a '?'
+ *	are guessed. If you know the correct name, please let us know.
+ */
+ 
+static struct hp_hardware hp_hardware_list[] = {
+	{HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"},
+	{HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"},
+	{HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"},
+	{HPHW_NPROC,0xB,0x4,0x01,"Technical Shogun (845, 645)"},
+	{HPHW_NPROC,0xF,0x4,0x01,"Commercial Shogun (949)"},
+	{HPHW_NPROC,0xC,0x4,0x01,"Cheetah (850, 950)"},
+	{HPHW_NPROC,0x80,0x4,0x01,"Cheetah (950S)"},
+	{HPHW_NPROC,0x81,0x4,0x01,"Jaguar (855, 955)"},
+	{HPHW_NPROC,0x82,0x4,0x01,"Cougar (860, 960)"},
+	{HPHW_NPROC,0x83,0x4,0x13,"Panther (865, 870, 980)"},
+	{HPHW_NPROC,0x100,0x4,0x01,"Burgundy (810)"},
+	{HPHW_NPROC,0x101,0x4,0x01,"SilverFox Low (822, 922)"},
+	{HPHW_NPROC,0x102,0x4,0x01,"SilverFox High (832, 932)"},
+	{HPHW_NPROC,0x103,0x4,0x01,"Lego, SilverLite (815, 808, 920)"},
+	{HPHW_NPROC,0x104,0x4,0x03,"SilverBullet Low (842, 948)"},
+	{HPHW_NPROC,0x105,0x4,0x03,"SilverBullet High (852, 958)"},
+	{HPHW_NPROC,0x106,0x4,0x81,"Oboe"},
+	{HPHW_NPROC,0x180,0x4,0x12,"Dragon"},
+	{HPHW_NPROC,0x181,0x4,0x13,"Chimera (890, 990, 992)"},
+	{HPHW_NPROC,0x182,0x4,0x91,"TNT 100 (891,T500)"},
+	{HPHW_NPROC,0x183,0x4,0x91,"TNT 120 (892,T520)"},
+	{HPHW_NPROC,0x184,0x4,0x91,"Jade 180 U (893,T540)"},
+	{HPHW_NPROC,0x1FF,0x4,0x91,"Hitachi X Processor"},
+	{HPHW_NPROC,0x200,0x4,0x81,"Cobra (720)"},
+	{HPHW_NPROC,0x201,0x4,0x81,"Coral (750)"},
+	{HPHW_NPROC,0x202,0x4,0x81,"King Cobra (730)"},
+	{HPHW_NPROC,0x203,0x4,0x81,"Hardball (735/99)"},
+	{HPHW_NPROC,0x204,0x4,0x81,"Coral II (755/99)"},
+	{HPHW_NPROC,0x205,0x4,0x81,"Coral II (755/125)"},
+	{HPHW_NPROC,0x205,0x4,0x91,"Snake Eagle "},
+	{HPHW_NPROC,0x206,0x4,0x81,"Snake Cheetah (735/130)"},
+	{HPHW_NPROC,0x280,0x4,0x81,"Nova Low (817, 827, 957, 957LX)"},
+	{HPHW_NPROC,0x281,0x4,0x81,"Nova High (837, 847, 857, 967, 967LX)"},
+	{HPHW_NPROC,0x282,0x4,0x81,"Nova8 (807, 917, 917LX, 927,927LX, 937, 937LX, 947,947LX)"},
+	{HPHW_NPROC,0x283,0x4,0x81,"Nova64 (867, 877, 977)"},
+	{HPHW_NPROC,0x284,0x4,0x81,"TNova (887, 897, 987)"},
+	{HPHW_NPROC,0x285,0x4,0x81,"TNova64"},
+	{HPHW_NPROC,0x286,0x4,0x91,"Hydra64 (Nova)"},
+	{HPHW_NPROC,0x287,0x4,0x91,"Hydra96 (Nova)"},
+	{HPHW_NPROC,0x288,0x4,0x81,"TNova96"},
+	{HPHW_NPROC,0x300,0x4,0x81,"Bushmaster (710)"},
+	{HPHW_NPROC,0x302,0x4,0x81,"Flounder (705)"},
+	{HPHW_NPROC,0x310,0x4,0x81,"Scorpio (715/50)"},
+	{HPHW_NPROC,0x311,0x4,0x81,"Scorpio Jr.(715/33)"},
+	{HPHW_NPROC,0x312,0x4,0x81,"Strider-50 (715S/50)"},
+	{HPHW_NPROC,0x313,0x4,0x81,"Strider-33 (715S/33)"},
+	{HPHW_NPROC,0x314,0x4,0x81,"Trailways-50 (715T/50)"},
+	{HPHW_NPROC,0x315,0x4,0x81,"Trailways-33 (715T/33)"},
+	{HPHW_NPROC,0x316,0x4,0x81,"Scorpio Sr.(715/75)"},
+	{HPHW_NPROC,0x317,0x4,0x81,"Scorpio 100 (715/100)"},
+	{HPHW_NPROC,0x318,0x4,0x81,"Spectra (725/50)"},
+	{HPHW_NPROC,0x319,0x4,0x81,"Spectra (725/75)"},
+	{HPHW_NPROC,0x320,0x4,0x81,"Spectra (725/100)"},
+	{HPHW_NPROC,0x401,0x4,0x81,"Pace (745i, 747i)"},
+	{HPHW_NPROC,0x402,0x4,0x81,"Sidewinder (742i)"},
+	{HPHW_NPROC,0x403,0x4,0x81,"Fast Pace"},
+	{HPHW_NPROC,0x480,0x4,0x81,"Orville (E23)"},
+	{HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"},
+	{HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"},
+	{HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"},
+	{HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
+	{HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"},
+	{HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"},
+	{HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"},
+	{HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"},
+	{HPHW_NPROC,0x504,0x4,0x81,"Merlin L2+ 180 (9000/778/B180L)"},
+	{HPHW_NPROC,0x505,0x4,0x81,"Raven L2 132 (9000/778/C132L)"},
+	{HPHW_NPROC,0x506,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+	{HPHW_NPROC,0x507,0x4,0x81,"Raven L2 180 (9000/779/C180L)"},
+	{HPHW_NPROC,0x508,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+	{HPHW_NPROC,0x509,0x4,0x81,"712/132 L2 Upgrade"},
+	{HPHW_NPROC,0x50A,0x4,0x81,"712/160 L2 Upgrade"},
+	{HPHW_NPROC,0x50B,0x4,0x81,"715/132 L2 Upgrade"},
+	{HPHW_NPROC,0x50C,0x4,0x81,"715/160 L2 Upgrade"},
+	{HPHW_NPROC,0x50D,0x4,0x81,"Rocky2 L2 120"},
+	{HPHW_NPROC,0x50E,0x4,0x81,"Rocky2 L2 150"},
+	{HPHW_NPROC,0x50F,0x4,0x81,"Anole L2 132 (744)"},
+	{HPHW_NPROC,0x510,0x4,0x81,"Anole L2 165 (744)"},
+	{HPHW_NPROC,0x511,0x4,0x81,"Kiji L2 132"},
+	{HPHW_NPROC,0x512,0x4,0x81,"UL L2 132 (803/D220,D320)"},
+	{HPHW_NPROC,0x513,0x4,0x81,"UL L2 160 (813/D220,D320)"},
+	{HPHW_NPROC,0x514,0x4,0x81,"Merlin Jr L2 132"},
+	{HPHW_NPROC,0x515,0x4,0x81,"Staccato L2 132"},
+	{HPHW_NPROC,0x516,0x4,0x81,"Staccato L2 180 (A Class 180)"},
+	{HPHW_NPROC,0x580,0x4,0x81,"KittyHawk DC2-100 (K100)"},
+	{HPHW_NPROC,0x581,0x4,0x91,"KittyHawk DC3-120 (K210)"},
+	{HPHW_NPROC,0x582,0x4,0x91,"KittyHawk DC3 100 (K400)"},
+	{HPHW_NPROC,0x583,0x4,0x91,"KittyHawk DC3 120 (K410)"},
+	{HPHW_NPROC,0x584,0x4,0x91,"LighteningHawk T120"},
+	{HPHW_NPROC,0x585,0x4,0x91,"SkyHawk 100"},
+	{HPHW_NPROC,0x586,0x4,0x91,"SkyHawk 120"},
+	{HPHW_NPROC,0x587,0x4,0x81,"UL Proc 1-way T'120"},
+	{HPHW_NPROC,0x588,0x4,0x91,"UL Proc 2-way T'120"},
+	{HPHW_NPROC,0x589,0x4,0x81,"UL Proc 1-way T'100 (821/D250,D350)"},
+	{HPHW_NPROC,0x58A,0x4,0x91,"UL Proc 2-way T'100 (831/D250,D350)"},
+	{HPHW_NPROC,0x58B,0x4,0x91,"KittyHawk DC2 100 (K200)"},
+	{HPHW_NPROC,0x58C,0x4,0x91,"ThunderHawk DC3- 120 1M (K220)"},
+	{HPHW_NPROC,0x58D,0x4,0x91,"ThunderHawk DC3 120 1M (K420)"},
+	{HPHW_NPROC,0x58E,0x4,0x81,"Raven 120 T'"},
+	{HPHW_NPROC,0x58F,0x4,0x91,"Mohawk 160 U 1M DC3 (K450)"},
+	{HPHW_NPROC,0x590,0x4,0x91,"Mohawk 180 U 1M DC3 (K460)"},
+	{HPHW_NPROC,0x591,0x4,0x91,"Mohawk 200 U 1M DC3"},
+	{HPHW_NPROC,0x592,0x4,0x81,"Raven 100 T'"},
+	{HPHW_NPROC,0x593,0x4,0x91,"FireHawk 160 U"},
+	{HPHW_NPROC,0x594,0x4,0x91,"FireHawk 180 U"},
+	{HPHW_NPROC,0x595,0x4,0x91,"FireHawk 220 U"},
+	{HPHW_NPROC,0x596,0x4,0x91,"FireHawk 240 U"},
+	{HPHW_NPROC,0x597,0x4,0x91,"SPP2000 processor"},
+	{HPHW_NPROC,0x598,0x4,0x81,"Raven U 230 (9000/780/C230)"},
+	{HPHW_NPROC,0x599,0x4,0x81,"Raven U 240 (9000/780/C240)"},
+	{HPHW_NPROC,0x59A,0x4,0x91,"Unlisted but reserved"},
+	{HPHW_NPROC,0x59A,0x4,0x81,"Unlisted but reserved"},
+	{HPHW_NPROC,0x59B,0x4,0x81,"Raven U 160 (9000/780/C160)"},
+	{HPHW_NPROC,0x59C,0x4,0x81,"Raven U 180 (9000/780/C180)"},
+	{HPHW_NPROC,0x59D,0x4,0x81,"Raven U 200 (9000/780/C200)"},
+	{HPHW_NPROC,0x59E,0x4,0x91,"ThunderHawk T' 120"},
+	{HPHW_NPROC,0x59F,0x4,0x91,"Raven U 180+ (9000/780)"},
+	{HPHW_NPROC,0x5A0,0x4,0x81,"UL 1w T120 1MB/1MB (841/D260,D360)"},
+	{HPHW_NPROC,0x5A1,0x4,0x91,"UL 2w T120 1MB/1MB (851/D260,D360)"},
+	{HPHW_NPROC,0x5A2,0x4,0x81,"UL 1w U160 512K/512K (861/D270,D370)"},
+	{HPHW_NPROC,0x5A3,0x4,0x91,"UL 2w U160 512K/512K (871/D270,D370)"},
+	{HPHW_NPROC,0x5A4,0x4,0x91,"Mohawk 160 U 1M DC3- (K250)"},
+	{HPHW_NPROC,0x5A5,0x4,0x91,"Mohawk 180 U 1M DC3- (K260)"},
+	{HPHW_NPROC,0x5A6,0x4,0x91,"Mohawk 200 U 1M DC3-"},
+	{HPHW_NPROC,0x5A7,0x4,0x81,"UL proc 1-way U160 1M/1M"},
+	{HPHW_NPROC,0x5A8,0x4,0x91,"UL proc 2-way U160 1M/1M"},
+	{HPHW_NPROC,0x5A9,0x4,0x81,"UL proc 1-way U180 1M/1M"},
+	{HPHW_NPROC,0x5AA,0x4,0x91,"UL proc 2-way U180 1M/1M"},
+	{HPHW_NPROC,0x5AB,0x4,0x91,"Obsolete"},
+	{HPHW_NPROC,0x5AB,0x4,0x81,"Obsolete"},
+	{HPHW_NPROC,0x5AC,0x4,0x91,"Obsolete"},
+	{HPHW_NPROC,0x5AC,0x4,0x81,"Obsolete"},
+	{HPHW_NPROC,0x5AD,0x4,0x91,"BraveHawk 180MHz DC3-"},
+	{HPHW_NPROC,0x5AE,0x4,0x91,"BraveHawk 200MHz DC3- (898/K370)"},
+	{HPHW_NPROC,0x5AF,0x4,0x91,"BraveHawk 220MHz DC3-"},
+	{HPHW_NPROC,0x5B0,0x4,0x91,"BraveHawk 180MHz DC3"},
+	{HPHW_NPROC,0x5B1,0x4,0x91,"BraveHawk 200MHz DC3 (899/K570)"},
+	{HPHW_NPROC,0x5B2,0x4,0x91,"BraveHawk 220MHz DC3"},
+	{HPHW_NPROC,0x5B3,0x4,0x91,"FireHawk 200"},
+	{HPHW_NPROC,0x5B4,0x4,0x91,"SPP2500"},
+	{HPHW_NPROC,0x5B5,0x4,0x91,"SummitHawk U+"},
+	{HPHW_NPROC,0x5B6,0x4,0x91,"DragonHawk U+ 240 DC3"},
+	{HPHW_NPROC,0x5B7,0x4,0x91,"DragonHawk U+ 240 DC3-"},
+	{HPHW_NPROC,0x5B8,0x4,0x91,"SPP2250 240 MHz"},
+	{HPHW_NPROC,0x5B9,0x4,0x81,"UL 1w U+/240 (350/550)"},
+	{HPHW_NPROC,0x5BA,0x4,0x91,"UL 2w U+/240 (350/550)"},
+	{HPHW_NPROC,0x5BB,0x4,0x81,"AllegroHigh W"},
+	{HPHW_NPROC,0x5BC,0x4,0x91,"AllegroLow W"},
+	{HPHW_NPROC,0x5BD,0x4,0x91,"Forte W 2-way"},
+	{HPHW_NPROC,0x5BE,0x4,0x91,"Prelude W"},
+	{HPHW_NPROC,0x5BF,0x4,0x91,"Forte W 4-way"},
+	{HPHW_NPROC,0x5C0,0x4,0x91,"M2250"},
+	{HPHW_NPROC,0x5C1,0x4,0x91,"M2500"},
+	{HPHW_NPROC,0x5C2,0x4,0x91,"Sonata 440"},
+	{HPHW_NPROC,0x5C3,0x4,0x91,"Sonata 360"},
+	{HPHW_NPROC,0x5C4,0x4,0x91,"Rhapsody 440"},
+	{HPHW_NPROC,0x5C5,0x4,0x91,"Rhapsody 360"},
+	{HPHW_NPROC,0x5C6,0x4,0x91,"Raven W 360 (9000/780)"},
+	{HPHW_NPROC,0x5C7,0x4,0x91,"Halfdome W 440"},
+	{HPHW_NPROC,0x5C8,0x4,0x81,"Lego 360 processor"},
+	{HPHW_NPROC,0x5C9,0x4,0x91,"Rhapsody DC- 440"},
+	{HPHW_NPROC,0x5CA,0x4,0x91,"Rhapsody DC- 360"},
+	{HPHW_NPROC,0x5CB,0x4,0x91,"Crescendo 440"},
+	{HPHW_NPROC,0x5CC,0x4,0x91,"Prelude W 440"},
+	{HPHW_NPROC,0x5CD,0x4,0x91,"SPP2600"},
+	{HPHW_NPROC,0x5CE,0x4,0x91,"M2600"},
+	{HPHW_NPROC,0x5CF,0x4,0x81,"Allegro W+"},
+	{HPHW_NPROC,0x5D0,0x4,0x81,"Kazoo W+"},
+	{HPHW_NPROC,0x5D1,0x4,0x91,"Forte W+ 2w"},
+	{HPHW_NPROC,0x5D2,0x4,0x91,"Forte W+ 4w"},
+	{HPHW_NPROC,0x5D3,0x4,0x91,"Prelude W+ 540"},
+	{HPHW_NPROC,0x5D4,0x4,0x91,"Duet W+"},
+	{HPHW_NPROC,0x5D5,0x4,0x91,"Crescendo 550"},
+	{HPHW_NPROC,0x5D6,0x4,0x81,"Crescendo DC- 440"},
+	{HPHW_NPROC,0x5D7,0x4,0x91,"Keystone W+"},
+	{HPHW_NPROC,0x5D8,0x4,0x91,"Rhapsody wave 2 W+ DC-"},
+	{HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"},
+	{HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"},
+	{HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"},
+	{HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"},
+	{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
+	{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
+	{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+	{HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
+	{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
+	{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
+	{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
+	{HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"},
+	{HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"},
+	{HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"},
+	{HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
+	{HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
+	{HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
+	{HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
+	{HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
+	{HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
+	{HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
+	{HPHW_NPROC,0x602,0x4,0x81,"Gecko 100 (712/100)"},
+	{HPHW_NPROC,0x603,0x4,0x81,"Anole 64 (743/64)"},
+	{HPHW_NPROC,0x604,0x4,0x81,"Anole 100 (743/100)"},
+	{HPHW_NPROC,0x605,0x4,0x81,"Gecko 120 (712/120)"},
+	{HPHW_NPROC,0x606,0x4,0x81,"Gila 80"},
+	{HPHW_NPROC,0x607,0x4,0x81,"Gila 100"},
+	{HPHW_NPROC,0x608,0x4,0x81,"Gila 120"},
+	{HPHW_NPROC,0x609,0x4,0x81,"Scorpio-L 80"},
+	{HPHW_NPROC,0x60A,0x4,0x81,"Mirage Jr (715/64)"},
+	{HPHW_NPROC,0x60B,0x4,0x81,"Mirage 100"},
+	{HPHW_NPROC,0x60C,0x4,0x81,"Mirage 100+"},
+	{HPHW_NPROC,0x60D,0x4,0x81,"Electra 100"},
+	{HPHW_NPROC,0x60E,0x4,0x81,"Electra 120"},
+	{HPHW_NPROC,0x610,0x4,0x81,"Scorpio-L 100"},
+	{HPHW_NPROC,0x611,0x4,0x81,"Scorpio-L 120"},
+	{HPHW_NPROC,0x612,0x4,0x81,"Spectra-L 80"},
+	{HPHW_NPROC,0x613,0x4,0x81,"Spectra-L 100"},
+	{HPHW_NPROC,0x614,0x4,0x81,"Spectra-L 120"},
+	{HPHW_NPROC,0x615,0x4,0x81,"Piranha 100"},
+	{HPHW_NPROC,0x616,0x4,0x81,"Piranha 120"},
+	{HPHW_NPROC,0x617,0x4,0x81,"Jason 50"},
+	{HPHW_NPROC,0x618,0x4,0x81,"Jason 100"},
+	{HPHW_NPROC,0x619,0x4,0x81,"Mirage 80"},
+	{HPHW_NPROC,0x61A,0x4,0x81,"SAIC L-80"},
+	{HPHW_NPROC,0x61B,0x4,0x81,"Rocky1 L-60"},
+	{HPHW_NPROC,0x61C,0x4,0x81,"Anole T (743/T)"},
+	{HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
+	{HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
+	{HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
+	{HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
+	{HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
+	{HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
+	{HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
+	{HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
+	{HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
+	{HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
+	{HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
+	{HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
+	{HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
+	{HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
+	{HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
+	{HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
+	{HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
+	{HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
+	{HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
+	{HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
+	{HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
+	{HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
+	{HPHW_NPROC,0x897,0x4,0x91,"Storm Peak DC- Slow Mako+"},
+	{HPHW_NPROC,0x898,0x4,0x91,"Storm Peak DC- Fast Mako+"},
+	{HPHW_NPROC,0x899,0x4,0x91,"Mt. Hamilton Slow Mako+"},
+	{HPHW_NPROC,0x89B,0x4,0x91,"Crestone Peak Mako+ Slow"},
+	{HPHW_NPROC,0x89C,0x4,0x91,"Crestone Peak Mako+ Fast"},
+	{HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"}, 
+	{HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"}, 
+	{HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"}, 
+	{HPHW_A_DIRECT, 0x007, 0x0000D, 0x00, "Dino AP"}, 
+	{HPHW_A_DIRECT, 0x009, 0x0000D, 0x00, "Solaris Direct Connect MUX (J2092A)"}, 
+	{HPHW_A_DIRECT, 0x00A, 0x0000D, 0x00, "Solaris RS-422/423 MUX (J2093A)"}, 
+	{HPHW_A_DIRECT, 0x00B, 0x0000D, 0x00, "Solaris RS-422/423 Quadriloops MUX"}, 
+	{HPHW_A_DIRECT, 0x00C, 0x0000D, 0x00, "Solaris Modem MUX (J2094A)"}, 
+	{HPHW_A_DIRECT, 0x00D, 0x0000D, 0x00, "Twins Direct Connect MUX"}, 
+	{HPHW_A_DIRECT, 0x00E, 0x0000D, 0x00, "Twins Modem MUX"}, 
+	{HPHW_A_DIRECT, 0x00F, 0x0000D, 0x00, "Nautilus RS-485"}, 
+	{HPHW_A_DIRECT, 0x010, 0x0000D, 0x00, "UltraLight CAP/MUX"}, 
+	{HPHW_A_DIRECT, 0x015, 0x0000D, 0x00, "Eole CAP/MUX"}, 
+	{HPHW_A_DIRECT, 0x024, 0x0000D, 0x00, "Sahp Kiuh AP/MUX"}, 
+	{HPHW_A_DIRECT, 0x034, 0x0000D, 0x00, "Sahp Kiuh Low AP/MUX"}, 
+	{HPHW_A_DIRECT, 0x044, 0x0000D, 0x00, "Sahp Baat Kiuh AP/MUX"}, 
+	{HPHW_A_DIRECT, 0x004, 0x0000E, 0x80, "Burgundy RS-232"}, 
+	{HPHW_A_DIRECT, 0x005, 0x0000E, 0x80, "Silverfox RS-232"}, 
+	{HPHW_A_DIRECT, 0x006, 0x0000E, 0x80, "Lego RS-232"}, 
+	{HPHW_A_DIRECT, 0x004, 0x0000F, 0x00, "Peacock Graphics"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00014, 0x80, "Burgundy HIL"}, 
+	{HPHW_A_DIRECT, 0x005, 0x00014, 0x80, "Peacock HIL"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00015, 0x80, "Leonardo"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00016, 0x80, "HP-PB HRM"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00017, 0x80, "HP-PB HRC"}, 
+	{HPHW_A_DIRECT, 0x004, 0x0003A, 0x80, "Skunk Centronics (28655A)"}, 
+	{HPHW_A_DIRECT, 0x024, 0x0003A, 0x80, "Sahp Kiuh Centronics"}, 
+	{HPHW_A_DIRECT, 0x044, 0x0003A, 0x80, "Sahp Baat Kiuh Centronics"}, 
+	{HPHW_A_DIRECT, 0x004, 0x0004E, 0x80, "AT&T DataKit (AMSO)"}, 
+	{HPHW_A_DIRECT, 0x004, 0x0009B, 0x80, "Test&Meas GSC HPIB"}, 
+	{HPHW_A_DIRECT, 0x004, 0x000A8, 0x00, "Rocky2-120 Front Keyboard"}, 
+	{HPHW_A_DIRECT, 0x005, 0x000A8, 0x00, "Rocky2-150 Front Keyboard"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00101, 0x80, "Hitachi Console Module"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00102, 0x80, "Hitachi Boot Module"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00203, 0x80, "MELCO HBMLA MLAIT"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00208, 0x80, "MELCO HBDPC"}, 
+	{HPHW_A_DIRECT, 0x004, 0x00300, 0x00, "DCI TWINAX TERM IO MUX"}, 
+	{HPHW_A_DMA, 0x004, 0x00039, 0x80, "Skunk SCSI (28655A)"}, 
+	{HPHW_A_DMA, 0x005, 0x00039, 0x80, "KittyHawk CSY Core SCSI"}, 
+	{HPHW_A_DMA, 0x014, 0x00039, 0x80, "Diablo SCSI"}, 
+	{HPHW_A_DMA, 0x024, 0x00039, 0x80, "Sahp Kiuh SCSI"}, 
+	{HPHW_A_DMA, 0x034, 0x00039, 0x80, "Sahp Kiuh Low SCSI"}, 
+	{HPHW_A_DMA, 0x044, 0x00039, 0x80, "Sahp Baat Kiuh SCSI"}, 
+	{HPHW_A_DMA, 0x004, 0x0003B, 0x80, "Wizard SCSI"}, 
+	{HPHW_A_DMA, 0x005, 0x0003B, 0x80, "KittyHawk CSY Core FW-SCSI"}, 
+	{HPHW_A_DMA, 0x006, 0x0003B, 0x80, "Symbios EPIC FW-SCSI"}, 
+	{HPHW_A_DMA, 0x004, 0x00040, 0x80, "HP-PB Shazam HPIB (28650A)"}, 
+	{HPHW_A_DMA, 0x005, 0x00040, 0x80, "Burgundy HPIB"}, 
+	{HPHW_A_DMA, 0x004, 0x00041, 0x80, "HP-PB HP-FL"}, 
+	{HPHW_A_DMA, 0x004, 0x00042, 0x80, "HP-PB LoQuix HPIB (28650B)"}, 
+	{HPHW_A_DMA, 0x004, 0x00043, 0x80, "HP-PB Crypt LoQuix"}, 
+	{HPHW_A_DMA, 0x004, 0x00044, 0x80, "HP-PB Shazam GPIO (28651A)"}, 
+	{HPHW_A_DMA, 0x004, 0x00045, 0x80, "HP-PB LoQuix GPIO"}, 
+	{HPHW_A_DMA, 0x004, 0x00046, 0x80, "2-Port X.25 NIO_ACC (AMSO)"}, 
+	{HPHW_A_DMA, 0x004, 0x00047, 0x80, "4-Port X.25 NIO_ACC (AMSO)"}, 
+	{HPHW_A_DMA, 0x004, 0x0004B, 0x80, "LGB Control"}, 
+	{HPHW_A_DMA, 0x004, 0x0004C, 0x80, "Martian RTI (AMSO)"}, 
+	{HPHW_A_DMA, 0x004, 0x0004D, 0x80, "ACC Mux (AMSO)"}, 
+	{HPHW_A_DMA, 0x004, 0x00050, 0x80, "Lanbrusca 802.3 (36967A)"}, 
+	{HPHW_A_DMA, 0x004, 0x00056, 0x80, "HP-PB LoQuix FDDI"}, 
+	{HPHW_A_DMA, 0x004, 0x00057, 0x80, "HP-PB LoQuix FDDI (28670A)"}, 
+	{HPHW_A_DMA, 0x004, 0x0005E, 0x00, "Gecko Add-on Token Ring"}, 
+	{HPHW_A_DMA, 0x012, 0x00089, 0x80, "Barracuda Add-on FW-SCSI"}, 
+	{HPHW_A_DMA, 0x013, 0x00089, 0x80, "Bluefish Add-on FW-SCSI"}, 
+	{HPHW_A_DMA, 0x014, 0x00089, 0x80, "Shrike Add-on FW-SCSI"}, 
+	{HPHW_A_DMA, 0x015, 0x00089, 0x80, "KittyHawk GSY Core FW-SCSI"}, 
+	{HPHW_A_DMA, 0x017, 0x00089, 0x80, "Shrike Jade Add-on FW-SCSI (A3644A)"}, 
+	{HPHW_A_DMA, 0x01F, 0x00089, 0x80, "SkyHawk 100/120 FW-SCSI"}, 
+	{HPHW_A_DMA, 0x027, 0x00089, 0x80, "Piranha 100 FW-SCSI"}, 
+	{HPHW_A_DMA, 0x032, 0x00089, 0x80, "Raven T' Core FW-SCSI"}, 
+	{HPHW_A_DMA, 0x03B, 0x00089, 0x80, "Raven U/L2 Core FW-SCSI"}, 
+	{HPHW_A_DMA, 0x03C, 0x00089, 0x80, "Merlin 132 Core FW-SCSI"},
+	{HPHW_A_DMA, 0x03D, 0x00089, 0x80, "Merlin 160 Core FW-SCSI"},
+	{HPHW_A_DMA, 0x044, 0x00089, 0x80, "Mohawk Core FW-SCSI"}, 
+	{HPHW_A_DMA, 0x051, 0x00089, 0x80, "Firehawk FW-SCSI"}, 
+	{HPHW_A_DMA, 0x058, 0x00089, 0x80, "FireHawk 200 FW-SCSI"}, 
+	{HPHW_A_DMA, 0x05C, 0x00089, 0x80, "SummitHawk 230 Ultra-SCSI"}, 
+	{HPHW_A_DMA, 0x014, 0x00091, 0x80, "Baby Hugo Add-on Net FC (A3406A)"}, 
+	{HPHW_A_DMA, 0x020, 0x00091, 0x80, "Baby Jade Add-on Net FC (A3638A)"}, 
+	{HPHW_A_DMA, 0x004, 0x00092, 0x80, "GSC+ YLIASTER ATM"}, 
+	{HPHW_A_DMA, 0x004, 0x00095, 0x80, "Hamlyn GSC+ Network Card"}, 
+	{HPHW_A_DMA, 0x004, 0x00098, 0x80, "Lo-fat Emulator"}, 
+	{HPHW_A_DMA, 0x004, 0x0009A, 0x80, "GSC+ Venus ATM"}, 
+	{HPHW_A_DMA, 0x005, 0x0009A, 0x80, "GSC+ Samorobrive ATM"}, 
+	{HPHW_A_DMA, 0x004, 0x0009D, 0x80, "HP HSC-PCI Cards"}, 
+	{HPHW_A_DMA, 0x004, 0x0009E, 0x80, "Alaxis GSC+ 155Mb ATM"}, 
+	{HPHW_A_DMA, 0x005, 0x0009E, 0x80, "Alaxis GSC+ 622Mb ATM"}, 
+	{HPHW_A_DMA, 0x05C, 0x0009F, 0x80, "SummitHawk 230 USB"}, 
+	{HPHW_A_DMA, 0x05C, 0x000A0, 0x80, "SummitHawk 230 100BaseT"}, 
+	{HPHW_A_DMA, 0x015, 0x000A7, 0x80, "Baby Hugo Add-on mass FC (A3404A)"}, 
+	{HPHW_A_DMA, 0x018, 0x000A7, 0x80, "Mombasa GS Add-on mass FC (A3591)"}, 
+	{HPHW_A_DMA, 0x021, 0x000A7, 0x80, "Baby Jade Add-on mass FC (A3636A)"}, 
+	{HPHW_A_DMA, 0x004, 0x00201, 0x80, "MELCO HCMAP"}, 
+	{HPHW_A_DMA, 0x004, 0x00202, 0x80, "MELCO HBMLA MLAMA"}, 
+	{HPHW_A_DMA, 0x004, 0x00205, 0x80, "MELCO HBRFU"}, 
+	{HPHW_A_DMA, 0x004, 0x00380, 0x80, "Interphase NIO-FC"}, 
+	{HPHW_A_DMA, 0x004, 0x00381, 0x80, "Interphase NIO-ATM"}, 
+	{HPHW_A_DMA, 0x004, 0x00382, 0x80, "Interphase NIO-100BaseTX"}, 
+	{HPHW_BA, 0x004, 0x00070, 0x0, "Cobra Core BA"}, 
+	{HPHW_BA, 0x005, 0x00070, 0x0, "Coral Core BA"}, 
+	{HPHW_BA, 0x006, 0x00070, 0x0, "Bushmaster Core BA"}, 
+	{HPHW_BA, 0x007, 0x00070, 0x0, "Scorpio Core BA"}, 
+	{HPHW_BA, 0x008, 0x00070, 0x0, "Flounder Core BA"}, 
+	{HPHW_BA, 0x009, 0x00070, 0x0, "Outfield Core BA"}, 
+	{HPHW_BA, 0x00A, 0x00070, 0x0, "CoralII Core BA"}, 
+	{HPHW_BA, 0x00B, 0x00070, 0x0, "Scorpio Jr. Core BA"}, 
+	{HPHW_BA, 0x00C, 0x00070, 0x0, "Strider-50 Core BA"}, 
+	{HPHW_BA, 0x00D, 0x00070, 0x0, "Strider-33 Core BA"}, 
+	{HPHW_BA, 0x00E, 0x00070, 0x0, "Trailways-50 Core BA"}, 
+	{HPHW_BA, 0x00F, 0x00070, 0x0, "Trailways-33 Core BA"}, 
+	{HPHW_BA, 0x010, 0x00070, 0x0, "Pace Core BA"}, 
+	{HPHW_BA, 0x011, 0x00070, 0x0, "Sidewinder Core BA"}, 
+	{HPHW_BA, 0x019, 0x00070, 0x0, "Scorpio Sr. Core BA"}, 
+	{HPHW_BA, 0x020, 0x00070, 0x0, "Scorpio 100 Core BA"}, 
+	{HPHW_BA, 0x021, 0x00070, 0x0, "Spectra 50 Core BA"}, 
+	{HPHW_BA, 0x022, 0x00070, 0x0, "Spectra 75 Core BA"}, 
+	{HPHW_BA, 0x023, 0x00070, 0x0, "Spectra 100 Core BA"}, 
+	{HPHW_BA, 0x024, 0x00070, 0x0, "Fast Pace Core BA"}, 
+	{HPHW_BA, 0x026, 0x00070, 0x0, "CoralII Jaguar Core BA"}, 
+	{HPHW_BA, 0x004, 0x00076, 0x0, "Cobra EISA BA"}, 
+	{HPHW_BA, 0x005, 0x00076, 0x0, "Coral EISA BA"}, 
+	{HPHW_BA, 0x007, 0x00076, 0x0, "Scorpio EISA BA"}, 
+	{HPHW_BA, 0x00A, 0x00076, 0x0, "CoralII EISA BA"}, 
+	{HPHW_BA, 0x00B, 0x00076, 0x0, "Scorpio Jr. EISA BA"}, 
+	{HPHW_BA, 0x00C, 0x00076, 0x0, "Strider-50 Core EISA"}, 
+	{HPHW_BA, 0x00D, 0x00076, 0x0, "Strider-33 Core EISA"}, 
+	{HPHW_BA, 0x00E, 0x00076, 0x0, "Trailways-50 Core EISA"}, 
+	{HPHW_BA, 0x00F, 0x00076, 0x0, "Trailways-33 Core EISA"}, 
+	{HPHW_BA, 0x010, 0x00076, 0x0, "Pace Core EISA"}, 
+	{HPHW_BA, 0x019, 0x00076, 0x0, "Scorpio Sr. EISA BA"}, 
+	{HPHW_BA, 0x020, 0x00076, 0x0, "Scorpio 100 EISA BA"}, 
+	{HPHW_BA, 0x021, 0x00076, 0x0, "Spectra 50 EISA BA"}, 
+	{HPHW_BA, 0x022, 0x00076, 0x0, "Spectra 75 EISA BA"}, 
+	{HPHW_BA, 0x023, 0x00076, 0x0, "Spectra 100 EISA BA"}, 
+	{HPHW_BA, 0x026, 0x00076, 0x0, "CoralII Jaguar EISA BA"}, 
+	{HPHW_BA, 0x010, 0x00078, 0x0, "Pace VME BA"}, 
+	{HPHW_BA, 0x011, 0x00078, 0x0, "Sidewinder VME BA"}, 
+	{HPHW_BA, 0x01A, 0x00078, 0x0, "Anole 64 VME BA"}, 
+	{HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"}, 
+	{HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"}, 
+	{HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"}, 
+	{HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"}, 
+	{HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"}, 
+	{HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"}, 
+	{HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"}, 
+	{HPHW_BA, 0x013, 0x00081, 0x0, "Wilbur UX Core BA"}, 
+	{HPHW_BA, 0x014, 0x00081, 0x0, "WB-80 Core BA"}, 
+	{HPHW_BA, 0x015, 0x00081, 0x0, "KittyHawk GSY Core BA"}, 
+	{HPHW_BA, 0x016, 0x00081, 0x0, "Gecko Core BA"}, 
+	{HPHW_BA, 0x018, 0x00081, 0x0, "Gecko Optional BA"}, 
+	{HPHW_BA, 0x01A, 0x00081, 0x0, "Anole 64 Core BA"}, 
+	{HPHW_BA, 0x01B, 0x00081, 0x0, "Anole 100 Core BA"}, 
+	{HPHW_BA, 0x01C, 0x00081, 0x0, "Gecko 80 Core BA"}, 
+	{HPHW_BA, 0x01D, 0x00081, 0x0, "Gecko 100 Core BA"}, 
+	{HPHW_BA, 0x01F, 0x00081, 0x0, "SkyHawk 100/120 Core BA"}, 
+	{HPHW_BA, 0x027, 0x00081, 0x0, "Piranha 100 Core BA"}, 
+	{HPHW_BA, 0x028, 0x00081, 0x0, "Mirage Jr Core BA"}, 
+	{HPHW_BA, 0x029, 0x00081, 0x0, "Mirage Core BA"}, 
+	{HPHW_BA, 0x02A, 0x00081, 0x0, "Electra Core BA"}, 
+	{HPHW_BA, 0x02B, 0x00081, 0x0, "Mirage 80 Core BA"}, 
+	{HPHW_BA, 0x02C, 0x00081, 0x0, "Mirage 100+ Core BA"}, 
+	{HPHW_BA, 0x02E, 0x00081, 0x0, "UL 350 Lasi Core BA"}, 
+	{HPHW_BA, 0x02F, 0x00081, 0x0, "UL 550 Lasi Core BA"}, 
+	{HPHW_BA, 0x032, 0x00081, 0x0, "Raven T' Core BA"}, 
+	{HPHW_BA, 0x033, 0x00081, 0x0, "Anole T Core BA"}, 
+	{HPHW_BA, 0x034, 0x00081, 0x0, "SAIC L-80 Core BA"}, 
+	{HPHW_BA, 0x035, 0x00081, 0x0, "PCX-L2 712/132 Core BA"}, 
+	{HPHW_BA, 0x036, 0x00081, 0x0, "PCX-L2 712/160 Core BA"}, 
+	{HPHW_BA, 0x03B, 0x00081, 0x0, "Raven U/L2 Core BA"}, 
+	{HPHW_BA, 0x03C, 0x00081, 0x0, "Merlin 132 Core BA"}, 
+	{HPHW_BA, 0x03D, 0x00081, 0x0, "Merlin 160 Core BA"}, 
+	{HPHW_BA, 0x03E, 0x00081, 0x0, "Merlin+ 132 Core BA"}, 
+	{HPHW_BA, 0x03F, 0x00081, 0x0, "Merlin+ 180 Core BA"}, 
+	{HPHW_BA, 0x044, 0x00081, 0x0, "Mohawk Core BA"}, 
+	{HPHW_BA, 0x045, 0x00081, 0x0, "Rocky1 Core BA"}, 
+	{HPHW_BA, 0x046, 0x00081, 0x0, "Rocky2 120 Core BA"}, 
+	{HPHW_BA, 0x047, 0x00081, 0x0, "Rocky2 150 Core BA"}, 
+	{HPHW_BA, 0x04B, 0x00081, 0x0, "Anole L2 132 Core BA"}, 
+	{HPHW_BA, 0x04D, 0x00081, 0x0, "Anole L2 165 Core BA"}, 
+	{HPHW_BA, 0x04E, 0x00081, 0x0, "Kiji L2 132 Core BA"}, 
+	{HPHW_BA, 0x050, 0x00081, 0x0, "Merlin Jr 132 Core BA"}, 
+	{HPHW_BA, 0x051, 0x00081, 0x0, "Firehawk Core BA"}, 
+	{HPHW_BA, 0x056, 0x00081, 0x0, "Raven+ w SE FWSCSI Core BA"}, 
+	{HPHW_BA, 0x057, 0x00081, 0x0, "Raven+ w Diff FWSCSI Core BA"}, 
+	{HPHW_BA, 0x058, 0x00081, 0x0, "FireHawk 200 Core BA"}, 
+	{HPHW_BA, 0x05C, 0x00081, 0x0, "SummitHawk 230 Core BA"}, 
+	{HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 132 Core BA"}, 
+	{HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 180 Core BA"}, 
+	{HPHW_BA, 0x05F, 0x00081, 0x0, "Staccato 180 Lasi"}, 
+	{HPHW_BA, 0x800, 0x00081, 0x0, "Hitachi Tiny 64 Core BA"}, 
+	{HPHW_BA, 0x801, 0x00081, 0x0, "Hitachi Tiny 80 Core BA"}, 
+	{HPHW_BA, 0x004, 0x0008B, 0x0, "Anole Optional PCMCIA BA"}, 
+	{HPHW_BA, 0x004, 0x0008E, 0x0, "GSC ITR Wax BA"}, 
+	{HPHW_BA, 0x00C, 0x0008E, 0x0, "Gecko Optional Wax BA"}, 
+	{HPHW_BA, 0x010, 0x0008E, 0x0, "Pace Wax BA"}, 
+	{HPHW_BA, 0x011, 0x0008E, 0x0, "SuperPace Wax BA"}, 
+	{HPHW_BA, 0x012, 0x0008E, 0x0, "Mirage Jr Wax BA"}, 
+	{HPHW_BA, 0x013, 0x0008E, 0x0, "Mirage Wax BA"}, 
+	{HPHW_BA, 0x014, 0x0008E, 0x0, "Electra Wax BA"}, 
+	{HPHW_BA, 0x017, 0x0008E, 0x0, "Raven Backplane Wax BA"}, 
+	{HPHW_BA, 0x01E, 0x0008E, 0x0, "Raven T' Wax BA"}, 
+	{HPHW_BA, 0x01F, 0x0008E, 0x0, "SkyHawk Wax BA"}, 
+	{HPHW_BA, 0x023, 0x0008E, 0x0, "Rocky1 Wax BA"}, 
+	{HPHW_BA, 0x02B, 0x0008E, 0x0, "Mirage 80 Wax BA"}, 
+	{HPHW_BA, 0x02C, 0x0008E, 0x0, "Mirage 100+ Wax BA"}, 
+	{HPHW_BA, 0x030, 0x0008E, 0x0, "UL 350 Core Wax BA"}, 
+	{HPHW_BA, 0x031, 0x0008E, 0x0, "UL 550 Core Wax BA"}, 
+	{HPHW_BA, 0x034, 0x0008E, 0x0, "SAIC L-80 Wax BA"}, 
+	{HPHW_BA, 0x03A, 0x0008E, 0x0, "Merlin+ Wax BA"}, 
+	{HPHW_BA, 0x040, 0x0008E, 0x0, "Merlin 132 Wax BA"}, 
+	{HPHW_BA, 0x041, 0x0008E, 0x0, "Merlin 160 Wax BA"}, 
+	{HPHW_BA, 0x043, 0x0008E, 0x0, "Merlin 132/160 Wax BA"}, 
+	{HPHW_BA, 0x052, 0x0008E, 0x0, "Raven+ Hi Power Backplane w/EISA Wax BA"}, 
+	{HPHW_BA, 0x054, 0x0008E, 0x0, "Raven+ Lo Power Backplane w/EISA Wax BA"}, 
+	{HPHW_BA, 0x059, 0x0008E, 0x0, "FireHawk 200 Wax BA"}, 
+	{HPHW_BA, 0x05A, 0x0008E, 0x0, "Raven+ L2 Backplane w/EISA Wax BA"}, 
+	{HPHW_BA, 0x05D, 0x0008E, 0x0, "SummitHawk Wax BA"}, 
+	{HPHW_BA, 0x800, 0x0008E, 0x0, "Hitachi Tiny 64 Wax BA"}, 
+	{HPHW_BA, 0x801, 0x0008E, 0x0, "Hitachi Tiny 80 Wax BA"}, 
+	{HPHW_BA, 0x011, 0x00090, 0x0, "SuperPace Wax EISA BA"}, 
+	{HPHW_BA, 0x017, 0x00090, 0x0, "Raven Backplane Wax EISA BA"}, 
+	{HPHW_BA, 0x01E, 0x00090, 0x0, "Raven T' Wax EISA BA"}, 
+	{HPHW_BA, 0x01F, 0x00090, 0x0, "SkyHawk 100/120 Wax EISA BA"}, 
+	{HPHW_BA, 0x027, 0x00090, 0x0, "Piranha 100 Wax EISA BA"}, 
+	{HPHW_BA, 0x028, 0x00090, 0x0, "Mirage Jr Wax EISA BA"}, 
+	{HPHW_BA, 0x029, 0x00090, 0x0, "Mirage Wax EISA BA"}, 
+	{HPHW_BA, 0x02A, 0x00090, 0x0, "Electra Wax EISA BA"}, 
+	{HPHW_BA, 0x02B, 0x00090, 0x0, "Mirage 80 Wax EISA BA"}, 
+	{HPHW_BA, 0x02C, 0x00090, 0x0, "Mirage 100+ Wax EISA BA"}, 
+	{HPHW_BA, 0x030, 0x00090, 0x0, "UL 350 Wax EISA BA"}, 
+	{HPHW_BA, 0x031, 0x00090, 0x0, "UL 550 Wax EISA BA"}, 
+	{HPHW_BA, 0x034, 0x00090, 0x0, "SAIC L-80 Wax EISA BA"}, 
+	{HPHW_BA, 0x03A, 0x00090, 0x0, "Merlin+ Wax EISA BA"}, 
+	{HPHW_BA, 0x040, 0x00090, 0x0, "Merlin 132 Wax EISA BA"}, 
+	{HPHW_BA, 0x041, 0x00090, 0x0, "Merlin 160 Wax EISA BA"}, 
+	{HPHW_BA, 0x043, 0x00090, 0x0, "Merlin 132/160 Wax EISA BA"}, 
+	{HPHW_BA, 0x052, 0x00090, 0x0, "Raven Hi Power Backplane Wax EISA BA"}, 
+	{HPHW_BA, 0x054, 0x00090, 0x0, "Raven Lo Power Backplane Wax EISA BA"}, 
+	{HPHW_BA, 0x059, 0x00090, 0x0, "FireHawk 200 Wax EISA BA"}, 
+	{HPHW_BA, 0x05A, 0x00090, 0x0, "Raven L2 Backplane Wax EISA BA"}, 
+	{HPHW_BA, 0x05D, 0x00090, 0x0, "SummitHawk Wax EISA BA"}, 
+	{HPHW_BA, 0x800, 0x00090, 0x0, "Hitachi Tiny 64 Wax EISA BA"}, 
+	{HPHW_BA, 0x801, 0x00090, 0x0, "Hitachi Tiny 80 Wax EISA BA"}, 
+	{HPHW_BA, 0x01A, 0x00093, 0x0, "Anole 64 TIMI BA"}, 
+	{HPHW_BA, 0x01B, 0x00093, 0x0, "Anole 100 TIMI BA"}, 
+	{HPHW_BA, 0x034, 0x00093, 0x0, "Anole T TIMI BA"}, 
+	{HPHW_BA, 0x04A, 0x00093, 0x0, "Anole L2 132 TIMI BA"}, 
+	{HPHW_BA, 0x04C, 0x00093, 0x0, "Anole L2 165 TIMI BA"}, 
+	{HPHW_BA, 0x582, 0x000A5, 0x00, "Epic PCI Bridge"}, 
+	{HPHW_BCPORT, 0x504, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x505, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x503, 0x0000C, 0x00, "Java BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x57F, 0x0000C, 0x00, "Hitachi Ghostview GSC+ Port"}, 
+	{HPHW_BCPORT, 0x501, 0x0000C, 0x00, "U2-IOA BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x502, 0x0000C, 0x00, "Uturn-IOA BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x780, 0x0000C, 0x00, "Astro BC Ropes Port"}, 
+	{HPHW_BCPORT, 0x506, 0x0000C, 0x00, "NEC-IOS BC HSC Port"}, 
+	{HPHW_BCPORT, 0x004, 0x0000C, 0x00, "Cheetah BC SMB Port"}, 
+	{HPHW_BCPORT, 0x006, 0x0000C, 0x00, "Cheetah BC MID_BUS Port"}, 
+	{HPHW_BCPORT, 0x005, 0x0000C, 0x00, "Condor BC MID_BUS Port"}, 
+	{HPHW_BCPORT, 0x100, 0x0000C, 0x00, "Condor BC HP-PB Port"}, 
+	{HPHW_BCPORT, 0x184, 0x0000C, 0x00, "Summit BC Port"}, 
+	{HPHW_BCPORT, 0x101, 0x0000C, 0x00, "Summit BC HP-PB Port"}, 
+	{HPHW_BCPORT, 0x102, 0x0000C, 0x00, "HP-PB Port (prefetch)"}, 
+	{HPHW_BCPORT, 0x500, 0x0000C, 0x00, "Gecko BOA BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x103, 0x0000C, 0x00, "Gecko BOA BC HP-PB Port"}, 
+	{HPHW_BCPORT, 0x507, 0x0000C, 0x00, "Keyaki BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x508, 0x0000C, 0x00, "Keyaki-DX BC GSC+ Port"}, 
+	{HPHW_BCPORT, 0x584, 0x0000C, 0x10, "DEW BC Runway Port"}, 
+	{HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"}, 
+	{HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"}, 
+	{HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"}, 
+	{HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"}, 
+	{HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"}, 
+	{HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, 
+	{HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, 
+	{HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, 
+	{HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"}, 
+	{HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"}, 
+	{HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"}, 
+	{HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"}, 
+	{HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"}, 
+	{HPHW_B_DMA, 0x004, 0x0002F, 0x80, "HP-PB Transit PSI (36960A)"}, 
+	{HPHW_B_DMA, 0x008, 0x00051, 0x80, "HP-PB Transit 802.3"}, 
+	{HPHW_B_DMA, 0x004, 0x00052, 0x80, "Miura LAN/Console (J2146A)"}, 
+	{HPHW_B_DMA, 0x008, 0x00058, 0x80, "HP-PB Transit 802.4"}, 
+	{HPHW_B_DMA, 0x005, 0x00060, 0x80, "KittyHawk CSY Core LAN/Console"}, 
+	{HPHW_B_DMA, 0x014, 0x00060, 0x80, "Diablo LAN/Console"}, 
+	{HPHW_B_DMA, 0x054, 0x00060, 0x80, "Countach LAN/Console"}, 
+	{HPHW_B_DMA, 0x004, 0x00094, 0x80, "KittyHawk GSC+ Exerciser"}, 
+	{HPHW_B_DMA, 0x004, 0x00100, 0x80, "HP-PB HF Interface"}, 
+	{HPHW_B_DMA, 0x000, 0x00206, 0x80, "MELCO HMPHA"}, 
+	{HPHW_B_DMA, 0x005, 0x00206, 0x80, "MELCO HMPHA_10"}, 
+	{HPHW_B_DMA, 0x006, 0x00206, 0x80, "MELCO HMQHA"}, 
+	{HPHW_B_DMA, 0x007, 0x00206, 0x80, "MELCO HMQHA_10"}, 
+	{HPHW_B_DMA, 0x004, 0x207, 0x80, "MELCO HNDWA MDWS-70"}, 
+	{HPHW_CIO, 0x004, 0x00010, 0x00, "VLSI CIO"}, 
+	{HPHW_CIO, 0x005, 0x00010, 0x00, "Silverfox CIO"}, 
+	{HPHW_CIO, 0x006, 0x00010, 0x00, "Emerald CIO"}, 
+	{HPHW_CIO, 0x008, 0x00010, 0x00, "Discrete CIO"}, 
+	{HPHW_CONSOLE, 0x004, 0x0001C, 0x00, "Cheetah console"}, 
+	{HPHW_CONSOLE, 0x005, 0x0001C, 0x00, "Emerald console"}, 
+	{HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"}, 
+	{HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"}, 
+	{HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"}, 
+	{HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
+	{HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"}, 
+	{HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"}, 
+	{HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
+	{HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"}, 
+	{HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"}, 
+	{HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"}, 
+	{HPHW_FIO, 0x004, 0x00071, 0x0, "Cobra Core SCSI"}, 
+	{HPHW_FIO, 0x005, 0x00071, 0x0, "Coral Core SCSI"}, 
+	{HPHW_FIO, 0x006, 0x00071, 0x0, "Bushmaster Core SCSI"}, 
+	{HPHW_FIO, 0x007, 0x00071, 0x0, "Scorpio Core SCSI"}, 
+	{HPHW_FIO, 0x008, 0x00071, 0x0, "Flounder Core SCSI"}, 
+	{HPHW_FIO, 0x009, 0x00071, 0x0, "Outfield Core SCSI"}, 
+	{HPHW_FIO, 0x00A, 0x00071, 0x0, "CoralII Core SCSI"}, 
+	{HPHW_FIO, 0x00B, 0x00071, 0x0, "Scorpio Jr. Core SCSI"}, 
+	{HPHW_FIO, 0x00C, 0x00071, 0x0, "Strider-50 Core SCSI"}, 
+	{HPHW_FIO, 0x00D, 0x00071, 0x0, "Strider-33 Core SCSI"}, 
+	{HPHW_FIO, 0x00E, 0x00071, 0x0, "Trailways-50 Core SCSI"}, 
+	{HPHW_FIO, 0x00F, 0x00071, 0x0, "Trailways-33 Core SCSI"}, 
+	{HPHW_FIO, 0x010, 0x00071, 0x0, "Pace Core SCSI"}, 
+	{HPHW_FIO, 0x011, 0x00071, 0x0, "Sidewinder Core SCSI"}, 
+	{HPHW_FIO, 0x019, 0x00071, 0x0, "Scorpio Sr. Core SCSI"}, 
+	{HPHW_FIO, 0x020, 0x00071, 0x0, "Scorpio 100 Core SCSI"}, 
+	{HPHW_FIO, 0x021, 0x00071, 0x0, "Spectra 50 Core SCSI"}, 
+	{HPHW_FIO, 0x022, 0x00071, 0x0, "Spectra 75 Core SCSI"}, 
+	{HPHW_FIO, 0x023, 0x00071, 0x0, "Spectra 100 Core SCSI"}, 
+	{HPHW_FIO, 0x024, 0x00071, 0x0, "Fast Pace Core SCSI"}, 
+	{HPHW_FIO, 0x026, 0x00071, 0x0, "CoralII Jaguar Core SCSI"}, 
+	{HPHW_FIO, 0x004, 0x00072, 0x0, "Cobra Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x005, 0x00072, 0x0, "Coral Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x006, 0x00072, 0x0, "Bushmaster Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x007, 0x00072, 0x0, "Scorpio Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x008, 0x00072, 0x0, "Flounder Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x009, 0x00072, 0x0, "Outfield Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00A, 0x00072, 0x0, "CoralII Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00B, 0x00072, 0x0, "Scorpio Jr. Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00C, 0x00072, 0x0, "Strider-50 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00D, 0x00072, 0x0, "Strider-33 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00E, 0x00072, 0x0, "Trailways-50 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x00F, 0x00072, 0x0, "Trailways-33 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x010, 0x00072, 0x0, "Pace Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x011, 0x00072, 0x0, "Sidewinder Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x019, 0x00072, 0x0, "Scorpio Sr. Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x020, 0x00072, 0x0, "Scorpio 100 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x021, 0x00072, 0x0, "Spectra 50 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x022, 0x00072, 0x0, "Spectra 75 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x023, 0x00072, 0x0, "Spectra 100 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x024, 0x00072, 0x0, "Fast Pace Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x026, 0x00072, 0x0, "CoralII Jaguar Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x004, 0x00073, 0x0, "Cobra Core HIL"}, 
+	{HPHW_FIO, 0x005, 0x00073, 0x0, "Coral Core HIL"}, 
+	{HPHW_FIO, 0x006, 0x00073, 0x0, "Bushmaster Core HIL"}, 
+	{HPHW_FIO, 0x007, 0x00073, 0x0, "Scorpio Core HIL"}, 
+	{HPHW_FIO, 0x008, 0x00073, 0x0, "Flounder Core HIL"}, 
+	{HPHW_FIO, 0x009, 0x00073, 0x0, "Outfield Core HIL"}, 
+	{HPHW_FIO, 0x00A, 0x00073, 0x0, "CoralII Core HIL"}, 
+	{HPHW_FIO, 0x00B, 0x00073, 0x0, "Scorpio Jr. Core HIL"}, 
+	{HPHW_FIO, 0x00C, 0x00073, 0x0, "Strider-50 Core HIL"}, 
+	{HPHW_FIO, 0x00D, 0x00073, 0x0, "Strider-33 Core HIL"}, 
+	{HPHW_FIO, 0x00E, 0x00073, 0x0, "Trailways-50 Core HIL"}, 
+	{HPHW_FIO, 0x00F, 0x00073, 0x0, "Trailways-33 Core HIL"}, 
+	{HPHW_FIO, 0x010, 0x00073, 0x0, "Pace Core HIL"}, 
+	{HPHW_FIO, 0x011, 0x00073, 0xcc, "SuperPace Wax HIL"}, 
+	{HPHW_FIO, 0x012, 0x00073, 0x0, "Mirage Jr Wax HIL"}, 
+	{HPHW_FIO, 0x013, 0x00073, 0x0, "Mirage 100 Wax HIL"}, 
+	{HPHW_FIO, 0x014, 0x00073, 0x0, "Electra Wax HIL"}, 
+	{HPHW_FIO, 0x017, 0x00073, 0x0, "Raven Backplane Wax HIL"}, 
+	{HPHW_FIO, 0x019, 0x00073, 0x0, "Scorpio Sr. Core HIL"}, 
+	{HPHW_FIO, 0x01E, 0x00073, 0x0, "Raven T' Wax HIL"}, 
+	{HPHW_FIO, 0x01F, 0x00073, 0x0, "SkyHawk 100/120 Wax HIL"}, 
+	{HPHW_FIO, 0x020, 0x00073, 0x0, "Scorpio 100 Core HIL"}, 
+	{HPHW_FIO, 0x021, 0x00073, 0x0, "Spectra 50 Core HIL"}, 
+	{HPHW_FIO, 0x022, 0x00073, 0x0, "Spectra 75 Core HIL"}, 
+	{HPHW_FIO, 0x023, 0x00073, 0x0, "Spectra 100 Core HIL"}, 
+	{HPHW_FIO, 0x024, 0x00073, 0x0, "Fast Pace Core HIL"}, 
+	{HPHW_FIO, 0x026, 0x00073, 0x0, "CoralII Jaguar Core HIL"}, 
+	{HPHW_FIO, 0x02B, 0x00073, 0x0, "Mirage 80 Wax HIL"}, 
+	{HPHW_FIO, 0x02C, 0x00073, 0x0, "Mirage 100+ Wax HIL"}, 
+	{HPHW_FIO, 0x03A, 0x00073, 0x0, "Merlin+ Wax HIL"}, 
+	{HPHW_FIO, 0x040, 0x00073, 0x0, "Merlin 132 Wax HIL"}, 
+	{HPHW_FIO, 0x041, 0x00073, 0x0, "Merlin 160 Wax HIL"}, 
+	{HPHW_FIO, 0x043, 0x00073, 0x0, "Merlin 132/160 Wax HIL"}, 
+	{HPHW_FIO, 0x052, 0x00073, 0x0, "Raven+ Hi Power Backplane w/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x053, 0x00073, 0x0, "Raven+ Hi Power Backplane wo/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x054, 0x00073, 0x0, "Raven+ Lo Power Backplane w/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x055, 0x00073, 0x0, "Raven+ Lo Power Backplane wo/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x059, 0x00073, 0x0, "FireHawk 200 Wax HIL"}, 
+	{HPHW_FIO, 0x05A, 0x00073, 0x0, "Raven+ L2 Backplane w/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x05B, 0x00073, 0x0, "Raven+ L2 Backplane wo/EISA Wax HIL"}, 
+	{HPHW_FIO, 0x05D, 0x00073, 0x0, "SummitHawk Wax HIL"}, 
+	{HPHW_FIO, 0x800, 0x00073, 0x0, "Hitachi Tiny 64 Wax HIL"}, 
+	{HPHW_FIO, 0x801, 0x00073, 0x0, "Hitachi Tiny 80 Wax HIL"}, 
+	{HPHW_FIO, 0x004, 0x00074, 0x0, "Cobra Core Centronics"}, 
+	{HPHW_FIO, 0x005, 0x00074, 0x0, "Coral Core Centronics"}, 
+	{HPHW_FIO, 0x006, 0x00074, 0x0, "Bushmaster Core Centronics"}, 
+	{HPHW_FIO, 0x007, 0x00074, 0x0, "Scorpio Core Centronics"}, 
+	{HPHW_FIO, 0x008, 0x00074, 0x0, "Flounder Core Centronics"}, 
+	{HPHW_FIO, 0x009, 0x00074, 0x0, "Outfield Core Centronics"}, 
+	{HPHW_FIO, 0x00A, 0x00074, 0x0, "CoralII Core Centronics"}, 
+	{HPHW_FIO, 0x00B, 0x00074, 0x0, "Scorpio Jr. Core Centronics"}, 
+	{HPHW_FIO, 0x00C, 0x00074, 0x0, "Strider-50 Core Centronics"}, 
+	{HPHW_FIO, 0x00D, 0x00074, 0x0, "Strider-33 Core Centronics"}, 
+	{HPHW_FIO, 0x00E, 0x00074, 0x0, "Trailways-50 Core Centronics"}, 
+	{HPHW_FIO, 0x00F, 0x00074, 0x0, "Trailways-33 Core Centronics"}, 
+	{HPHW_FIO, 0x010, 0x00074, 0x0, "Pace Core Centronics"}, 
+	{HPHW_FIO, 0x011, 0x00074, 0x0, "Sidewinder Core Centronics"}, 
+	{HPHW_FIO, 0x015, 0x00074, 0x0, "KittyHawk GSY Core Centronics"}, 
+	{HPHW_FIO, 0x016, 0x00074, 0x0, "Gecko Core Centronics"}, 
+	{HPHW_FIO, 0x019, 0x00074, 0x0, "Scorpio Sr. Core Centronics"}, 
+	{HPHW_FIO, 0x01A, 0x00074, 0x0, "Anole 64 Core Centronics"}, 
+	{HPHW_FIO, 0x01B, 0x00074, 0x0, "Anole 100 Core Centronics"}, 
+	{HPHW_FIO, 0x01C, 0x00074, 0x0, "Gecko 80 Core Centronics"}, 
+	{HPHW_FIO, 0x01D, 0x00074, 0x0, "Gecko 100 Core Centronics"}, 
+	{HPHW_FIO, 0x01F, 0x00074, 0x0, "SkyHawk 100/120 Core Centronics"}, 
+	{HPHW_FIO, 0x020, 0x00074, 0x0, "Scorpio 100 Core Centronics"}, 
+	{HPHW_FIO, 0x021, 0x00074, 0x0, "Spectra 50 Core Centronics"}, 
+	{HPHW_FIO, 0x022, 0x00074, 0x0, "Spectra 75 Core Centronics"}, 
+	{HPHW_FIO, 0x023, 0x00074, 0x0, "Spectra 100 Core Centronics"}, 
+	{HPHW_FIO, 0x024, 0x00074, 0x0, "Fast Pace Core Centronics"}, 
+	{HPHW_FIO, 0x026, 0x00074, 0x0, "CoralII Jaguar Core Centronics"}, 
+	{HPHW_FIO, 0x027, 0x00074, 0x0, "Piranha 100 Core Centronics"}, 
+	{HPHW_FIO, 0x028, 0x00074, 0x0, "Mirage Jr Core Centronics"}, 
+	{HPHW_FIO, 0x029, 0x00074, 0x0, "Mirage Core Centronics"}, 
+	{HPHW_FIO, 0x02A, 0x00074, 0x0, "Electra Core Centronics"}, 
+	{HPHW_FIO, 0x02B, 0x00074, 0x0, "Mirage 80 Core Centronics"}, 
+	{HPHW_FIO, 0x02C, 0x00074, 0x0, "Mirage 100+ Core Centronics"}, 
+	{HPHW_FIO, 0x02E, 0x00074, 0x0, "UL 350 Core Centronics"}, 
+	{HPHW_FIO, 0x02F, 0x00074, 0x0, "UL 550 Core Centronics"}, 
+	{HPHW_FIO, 0x032, 0x00074, 0x0, "Raven T' Core Centronics"}, 
+	{HPHW_FIO, 0x033, 0x00074, 0x0, "Anole T Core Centronics"}, 
+	{HPHW_FIO, 0x034, 0x00074, 0x0, "SAIC L-80 Core Centronics"}, 
+	{HPHW_FIO, 0x035, 0x00074, 0x0, "PCX-L2 712/132 Core Centronics"}, 
+	{HPHW_FIO, 0x036, 0x00074, 0x0, "PCX-L2 712/160 Core Centronics"}, 
+	{HPHW_FIO, 0x03B, 0x00074, 0x0, "Raven U/L2 Core Centronics"}, 
+	{HPHW_FIO, 0x03C, 0x00074, 0x0, "Merlin 132 Core Centronics"}, 
+	{HPHW_FIO, 0x03D, 0x00074, 0x0, "Merlin 160 Core Centronics"}, 
+	{HPHW_FIO, 0x03E, 0x00074, 0x0, "Merlin+ 132 Core Centronics"}, 
+	{HPHW_FIO, 0x03F, 0x00074, 0x0, "Merlin+ 180 Core Centronics"}, 
+	{HPHW_FIO, 0x044, 0x00074, 0x0, "Mohawk Core Centronics"}, 
+	{HPHW_FIO, 0x045, 0x00074, 0x0, "Rocky1 Core Centronics"}, 
+	{HPHW_FIO, 0x046, 0x00074, 0x0, "Rocky2 120 Core Centronics"}, 
+	{HPHW_FIO, 0x047, 0x00074, 0x0, "Rocky2 150 Core Centronics"}, 
+	{HPHW_FIO, 0x04B, 0x00074, 0x0, "Anole L2 132 Core Centronics"}, 
+	{HPHW_FIO, 0x04D, 0x00074, 0x0, "Anole L2 165 Core Centronics"}, 
+	{HPHW_FIO, 0x050, 0x00074, 0x0, "Merlin Jr 132 Core Centronics"}, 
+	{HPHW_FIO, 0x051, 0x00074, 0x0, "Firehawk Core Centronics"}, 
+	{HPHW_FIO, 0x056, 0x00074, 0x0, "Raven+ w SE FWSCSI Core Centronics"}, 
+	{HPHW_FIO, 0x057, 0x00074, 0x0, "Raven+ w Diff FWSCSI Core Centronics"}, 
+	{HPHW_FIO, 0x058, 0x00074, 0x0, "FireHawk 200 Core Centronics"}, 
+	{HPHW_FIO, 0x05C, 0x00074, 0x0, "SummitHawk 230 Core Centronics"}, 
+	{HPHW_FIO, 0x800, 0x00074, 0x0, "Hitachi Tiny 64 Core Centronics"}, 
+	{HPHW_FIO, 0x801, 0x00074, 0x0, "Hitachi Tiny 80 Core Centronics"}, 
+	{HPHW_FIO, 0x004, 0x00075, 0x0, "Cobra Core RS-232"}, 
+	{HPHW_FIO, 0x005, 0x00075, 0x0, "Coral Core RS-232"}, 
+	{HPHW_FIO, 0x006, 0x00075, 0x0, "Bushmaster Core RS-232"}, 
+	{HPHW_FIO, 0x007, 0x00075, 0x0, "Scorpio Core RS-232"}, 
+	{HPHW_FIO, 0x008, 0x00075, 0x0, "Flounder Core RS-232"}, 
+	{HPHW_FIO, 0x009, 0x00075, 0x0, "Outfield Core RS-232"}, 
+	{HPHW_FIO, 0x00A, 0x00075, 0x0, "CoralII Core RS-232"}, 
+	{HPHW_FIO, 0x00B, 0x00075, 0x0, "Scorpio Jr. Core RS-232"}, 
+	{HPHW_FIO, 0x00C, 0x00075, 0x0, "Strider-50 Core RS-232"}, 
+	{HPHW_FIO, 0x00D, 0x00075, 0x0, "Strider-33 Core RS-232"}, 
+	{HPHW_FIO, 0x00E, 0x00075, 0x0, "Trailways-50 Core RS-232"}, 
+	{HPHW_FIO, 0x00F, 0x00075, 0x0, "Trailways-33 Core RS-232"}, 
+	{HPHW_FIO, 0x010, 0x00075, 0x0, "Pace Core RS-232"}, 
+	{HPHW_FIO, 0x011, 0x00075, 0x0, "Sidewinder Core RS-232"}, 
+	{HPHW_FIO, 0x019, 0x00075, 0x0, "Scorpio Sr. Core RS-232"}, 
+	{HPHW_FIO, 0x020, 0x00075, 0x0, "Scorpio 100 Core RS-232"}, 
+	{HPHW_FIO, 0x021, 0x00075, 0x0, "Spectra 50 Core RS-232"}, 
+	{HPHW_FIO, 0x022, 0x00075, 0x0, "Spectra 75 Core RS-232"}, 
+	{HPHW_FIO, 0x023, 0x00075, 0x0, "Spectra 100 Core RS-232"}, 
+	{HPHW_FIO, 0x024, 0x00075, 0x0, "Fast Pace Core RS-232"}, 
+	{HPHW_FIO, 0x026, 0x00075, 0x0, "CoralII Jaguar Core RS-232"}, 
+	{HPHW_FIO, 0x004, 0x00077, 0x0, "Coral SGC Graphics"}, 
+	{HPHW_FIO, 0x005, 0x00077, 0x0, "Hyperdrive Optional Graphics"}, 
+	{HPHW_FIO, 0x006, 0x00077, 0x0, "Stinger Optional Graphics"}, 
+	{HPHW_FIO, 0x007, 0x00077, 0x0, "Scorpio Builtin Graphics"}, 
+	{HPHW_FIO, 0x008, 0x00077, 0x0, "Anole Hyperdrive Optional Graphics"}, 
+	{HPHW_FIO, 0x009, 0x00077, 0x0, "Thunder II graphics EISA form"}, 
+	{HPHW_FIO, 0x00A, 0x00077, 0x0, "Thunder II graphics GSA form"}, 
+	{HPHW_FIO, 0x00B, 0x00077, 0x0, "Scorpio Jr Builtin Graphics"}, 
+	{HPHW_FIO, 0x00C, 0x00077, 0x0, "Strider-50 SSC Graphics"}, 
+	{HPHW_FIO, 0x00D, 0x00077, 0x0, "Strider-33 SSC Graphics"}, 
+	{HPHW_FIO, 0x00E, 0x00077, 0x0, "Trailways-50 SSC Graphics"}, 
+	{HPHW_FIO, 0x00F, 0x00077, 0x0, "Trailways-33 SSC Graphics"}, 
+	{HPHW_FIO, 0x010, 0x00077, 0x0, "Pace SGC Graphics"}, 
+	{HPHW_FIO, 0x011, 0x00077, 0x0, "Mohawk Opt. 2D Graphics (Kid)"}, 
+	{HPHW_FIO, 0x012, 0x00077, 0x0, "Raven Opt. 2D Graphics (Goat)"}, 
+	{HPHW_FIO, 0x016, 0x00077, 0x0, "Lego 24 SCG Graphics"}, 
+	{HPHW_FIO, 0x017, 0x00077, 0x0, "Lego 24Z SCG Graphics"}, 
+	{HPHW_FIO, 0x018, 0x00077, 0x0, "Lego 48Z SCG Graphics"}, 
+	{HPHW_FIO, 0x019, 0x00077, 0x0, "Scorpio Sr Builtin Graphics"}, 
+	{HPHW_FIO, 0x020, 0x00077, 0x0, "Scorpio 100 Builtin Graphics"}, 
+	{HPHW_FIO, 0x021, 0x00077, 0x0, "Spectra 50 Builtin Graphics"}, 
+	{HPHW_FIO, 0x022, 0x00077, 0x0, "Spectra 75 Builtin Graphics"}, 
+	{HPHW_FIO, 0x023, 0x00077, 0x0, "Spectra 100 Builtin Graphics"}, 
+	{HPHW_FIO, 0x024, 0x00077, 0x0, "Fast Pace SGC Graphics"}, 
+	{HPHW_FIO, 0x006, 0x0007A, 0x0, "Bushmaster Audio"}, 
+	{HPHW_FIO, 0x008, 0x0007A, 0x0, "Flounder Audio"}, 
+	{HPHW_FIO, 0x004, 0x0007B, 0x0, "UL Optional Audio"}, 
+	{HPHW_FIO, 0x007, 0x0007B, 0x0, "Scorpio Audio"}, 
+	{HPHW_FIO, 0x00B, 0x0007B, 0x0, "Scorpio Jr. Audio"}, 
+	{HPHW_FIO, 0x00C, 0x0007B, 0x0, "Strider-50 Audio"}, 
+	{HPHW_FIO, 0x00D, 0x0007B, 0x0, "Strider-33 Audio"}, 
+	{HPHW_FIO, 0x00E, 0x0007B, 0x0, "Trailways-50 Audio"}, 
+	{HPHW_FIO, 0x00F, 0x0007B, 0x0, "Trailways-33 Audio"}, 
+	{HPHW_FIO, 0x015, 0x0007B, 0x0, "KittyHawk GSY Core Audio"}, 
+	{HPHW_FIO, 0x016, 0x0007B, 0x0, "Gecko Audio"}, 
+	{HPHW_FIO, 0x019, 0x0007B, 0x0, "Scorpio Sr. Audio"}, 
+	{HPHW_FIO, 0x01A, 0x0007B, 0x0, "Anole 64 Audio"}, 
+	{HPHW_FIO, 0x01B, 0x0007B, 0x0, "Anole 100 Audio"}, 
+	{HPHW_FIO, 0x01C, 0x0007B, 0x0, "Gecko 80 Audio"}, 
+	{HPHW_FIO, 0x01D, 0x0007B, 0x0, "Gecko 100 Audio"}, 
+	{HPHW_FIO, 0x01F, 0x0007B, 0x0, "SkyHawk 100/120 Audio"}, 
+	{HPHW_FIO, 0x020, 0x0007B, 0x0, "Scorpio 100 Audio"}, 
+	{HPHW_FIO, 0x021, 0x0007B, 0x0, "Spectra 50 Audio"}, 
+	{HPHW_FIO, 0x022, 0x0007B, 0x0, "Spectra 75 Audio"}, 
+	{HPHW_FIO, 0x023, 0x0007B, 0x0, "Spectra 100 Audio"}, 
+	{HPHW_FIO, 0x028, 0x0007B, 0x0, "Mirage Jr Audio"}, 
+	{HPHW_FIO, 0x029, 0x0007B, 0x0, "Mirage Audio"}, 
+	{HPHW_FIO, 0x02A, 0x0007B, 0x0, "Electra Audio"}, 
+	{HPHW_FIO, 0x02B, 0x0007B, 0x0, "Mirage 80 Audio"}, 
+	{HPHW_FIO, 0x02C, 0x0007B, 0x0, "Mirage 100+ Audio"}, 
+	{HPHW_FIO, 0x032, 0x0007B, 0x0, "Raven T' Audio"}, 
+	{HPHW_FIO, 0x034, 0x0007B, 0x0, "SAIC L-80 Audio"}, 
+	{HPHW_FIO, 0x035, 0x0007B, 0x0, "PCX-L2 712/132 Core Audio"}, 
+	{HPHW_FIO, 0x036, 0x0007B, 0x0, "PCX-L2 712/160 Core Audio"}, 
+	{HPHW_FIO, 0x03B, 0x0007B, 0x0, "Raven U/L2 Core Audio"}, 
+	{HPHW_FIO, 0x03C, 0x0007B, 0x0, "Merlin 132 Core Audio"}, 
+	{HPHW_FIO, 0x03D, 0x0007B, 0x0, "Merlin 160 Core Audio"}, 
+	{HPHW_FIO, 0x03E, 0x0007B, 0x0, "Merlin+ 132 Core Audio"}, 
+	{HPHW_FIO, 0x03F, 0x0007B, 0x0, "Merlin+ 180 Core Audio"}, 
+	{HPHW_FIO, 0x044, 0x0007B, 0x0, "Mohawk Core Audio"}, 
+	{HPHW_FIO, 0x046, 0x0007B, 0x0, "Rocky2 120 Core Audio"}, 
+	{HPHW_FIO, 0x047, 0x0007B, 0x0, "Rocky2 150 Core Audio"}, 
+	{HPHW_FIO, 0x04B, 0x0007B, 0x0, "Anole L2 132 Core Audio"}, 
+	{HPHW_FIO, 0x04D, 0x0007B, 0x0, "Anole L2 165 Core Audio"}, 
+	{HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"}, 
+	{HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"}, 
+	{HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"}, 
+	{HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"}, 
+	{HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"}, 
+	{HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"}, 
+	{HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"}, 
+	{HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"}, 
+	{HPHW_FIO, 0x801, 0x0007B, 0x0, "Hitachi Tiny 80 Audio"}, 
+	{HPHW_FIO, 0x009, 0x0007C, 0x0, "Outfield FW SCSI"}, 
+	{HPHW_FIO, 0x00A, 0x0007C, 0x0, "CoralII FW SCSI"}, 
+	{HPHW_FIO, 0x026, 0x0007C, 0x0, "CoralII Jaguar FW SCSI"}, 
+	{HPHW_FIO, 0x009, 0x0007D, 0x0, "Outfield FDDI"}, 
+	{HPHW_FIO, 0x00A, 0x0007D, 0x0, "CoralII FDDI"}, 
+	{HPHW_FIO, 0x026, 0x0007D, 0x0, "CoralII Jaguar FDDI"}, 
+	{HPHW_FIO, 0x010, 0x0007E, 0x0, "Pace Audio"}, 
+	{HPHW_FIO, 0x024, 0x0007E, 0x0, "Fast Pace Audio"}, 
+	{HPHW_FIO, 0x009, 0x0007F, 0x0, "Outfield Audio"}, 
+	{HPHW_FIO, 0x00A, 0x0007F, 0x0, "CoralII Audio"}, 
+	{HPHW_FIO, 0x026, 0x0007F, 0x0, "CoralII Jaguar Audio"}, 
+	{HPHW_FIO, 0x010, 0x00080, 0x0, "Pace Core HPIB"}, 
+	{HPHW_FIO, 0x024, 0x00080, 0x0, "Fast Pace Core HPIB"}, 
+	{HPHW_FIO, 0x015, 0x00082, 0x0, "KittyHawk GSY Core SCSI"},
+	{HPHW_FIO, 0x016, 0x00082, 0x0, "Gecko Core SCSI"}, 
+	{HPHW_FIO, 0x01A, 0x00082, 0x0, "Anole 64 Core SCSI"}, 
+	{HPHW_FIO, 0x01B, 0x00082, 0x0, "Anole 100 Core SCSI"}, 
+	{HPHW_FIO, 0x01C, 0x00082, 0x0, "Gecko 80 Core SCSI"}, 
+	{HPHW_FIO, 0x01D, 0x00082, 0x0, "Gecko 100 Core SCSI"}, 
+	{HPHW_FIO, 0x01F, 0x00082, 0x0, "SkyHawk 100/120 Core SCSI"}, 
+	{HPHW_FIO, 0x027, 0x00082, 0x0, "Piranha 100 Core SCSI"}, 
+	{HPHW_FIO, 0x028, 0x00082, 0x0, "Mirage Jr Core SCSI"}, 
+	{HPHW_FIO, 0x029, 0x00082, 0x0, "Mirage Core SCSI"}, 
+	{HPHW_FIO, 0x02A, 0x00082, 0x0, "Electra Core SCSI"}, 
+	{HPHW_FIO, 0x02B, 0x00082, 0x0, "Mirage 80 Core SCSI"}, 
+	{HPHW_FIO, 0x02C, 0x00082, 0x0, "Mirage 100+ Core SCSI"}, 
+	{HPHW_FIO, 0x02E, 0x00082, 0x0, "UL 350 Core SCSI"}, 
+	{HPHW_FIO, 0x02F, 0x00082, 0x0, "UL 550 Core SCSI"}, 
+	{HPHW_FIO, 0x032, 0x00082, 0x0, "Raven T' Core SCSI"}, 
+	{HPHW_FIO, 0x033, 0x00082, 0x0, "Anole T Core SCSI"}, 
+	{HPHW_FIO, 0x034, 0x00082, 0x0, "SAIC L-80 Core SCSI"}, 
+	{HPHW_FIO, 0x035, 0x00082, 0x0, "PCX-L2 712/132 Core SCSI"}, 
+	{HPHW_FIO, 0x036, 0x00082, 0x0, "PCX-L2 712/160 Core SCSI"}, 
+	{HPHW_FIO, 0x03B, 0x00082, 0x0, "Raven U/L2 Core SCSI"}, 
+	{HPHW_FIO, 0x03C, 0x00082, 0x0, "Merlin 132 Core SCSI"}, 
+	{HPHW_FIO, 0x03D, 0x00082, 0x0, "Merlin 160 Core SCSI"}, 
+	{HPHW_FIO, 0x03E, 0x00082, 0x0, "Merlin+ 132 Core SCSI"}, 
+	{HPHW_FIO, 0x03F, 0x00082, 0x0, "Merlin+ 180 Core SCSI"}, 
+	{HPHW_FIO, 0x044, 0x00082, 0x0, "Mohawk Core SCSI"}, 
+	{HPHW_FIO, 0x045, 0x00082, 0x0, "Rocky1 Core SCSI"}, 
+	{HPHW_FIO, 0x046, 0x00082, 0x0, "Rocky2 120 Core SCSI"}, 
+	{HPHW_FIO, 0x047, 0x00082, 0x0, "Rocky2 150 Core SCSI"}, 
+	{HPHW_FIO, 0x04B, 0x00082, 0x0, "Anole L2 132 Core SCSI"}, 
+	{HPHW_FIO, 0x04D, 0x00082, 0x0, "Anole L2 165 Core SCSI"}, 
+	{HPHW_FIO, 0x04E, 0x00082, 0x0, "Kiji L2 132 Core SCSI"}, 
+	{HPHW_FIO, 0x050, 0x00082, 0x0, "Merlin Jr 132 Core SCSI"}, 
+	{HPHW_FIO, 0x051, 0x00082, 0x0, "Firehawk Core SCSI"}, 
+	{HPHW_FIO, 0x056, 0x00082, 0x0, "Raven+ w SE FWSCSI Core SCSI"}, 
+	{HPHW_FIO, 0x057, 0x00082, 0x0, "Raven+ w Diff FWSCSI Core SCSI"}, 
+	{HPHW_FIO, 0x058, 0x00082, 0x0, "FireHawk 200 Core SCSI"}, 
+	{HPHW_FIO, 0x05C, 0x00082, 0x0, "SummitHawk 230 Core SCSI"}, 
+	{HPHW_FIO, 0x05E, 0x00082, 0x0, "Staccato 132 Core SCSI"}, 
+	{HPHW_FIO, 0x05F, 0x00082, 0x0, "Staccato 180 Core SCSI"}, 
+	{HPHW_FIO, 0x800, 0x00082, 0x0, "Hitachi Tiny 64 Core SCSI"}, 
+	{HPHW_FIO, 0x801, 0x00082, 0x0, "Hitachi Tiny 80 Core SCSI"}, 
+	{HPHW_FIO, 0x016, 0x00083, 0x0, "Gecko Core PC Floppy"}, 
+	{HPHW_FIO, 0x01C, 0x00083, 0x0, "Gecko 80 Core PC Floppy"}, 
+	{HPHW_FIO, 0x01D, 0x00083, 0x0, "Gecko 100 Core PC Floppy"}, 
+	{HPHW_FIO, 0x051, 0x00083, 0x0, "Firehawk Core PC Floppy"}, 
+	{HPHW_FIO, 0x058, 0x00083, 0x0, "FireHawk 200 Core PC Floppy"}, 
+	{HPHW_FIO, 0x027, 0x00083, 0x0, "Piranha 100 Core PC Floppy"}, 
+	{HPHW_FIO, 0x028, 0x00083, 0x0, "Mirage Jr Core PC Floppy"}, 
+	{HPHW_FIO, 0x029, 0x00083, 0x0, "Mirage Core PC Floppy"}, 
+	{HPHW_FIO, 0x02A, 0x00083, 0x0, "Electra Core PC Floppy"}, 
+	{HPHW_FIO, 0x02B, 0x00083, 0x0, "Mirage 80 Core PC Floppy"}, 
+	{HPHW_FIO, 0x02C, 0x00083, 0x0, "Mirage 100+ Core PC Floppy"}, 
+	{HPHW_FIO, 0x02E, 0x00083, 0x0, "UL 350 Core PC Floppy"}, 
+	{HPHW_FIO, 0x02F, 0x00083, 0x0, "UL 550 Core PC Floppy"}, 
+	{HPHW_FIO, 0x032, 0x00083, 0x0, "Raven T' Core PC Floppy"}, 
+	{HPHW_FIO, 0x034, 0x00083, 0x0, "SAIC L-80 Core PC Floppy"}, 
+	{HPHW_FIO, 0x035, 0x00083, 0x0, "PCX-L2 712/132 Core Floppy"}, 
+	{HPHW_FIO, 0x036, 0x00083, 0x0, "PCX-L2 712/160 Core Floppy"}, 
+	{HPHW_FIO, 0x03B, 0x00083, 0x0, "Raven U/L2 Core PC Floppy"}, 
+	{HPHW_FIO, 0x03C, 0x00083, 0x0, "Merlin 132 Core PC Floppy"}, 
+	{HPHW_FIO, 0x03D, 0x00083, 0x0, "Merlin 160 Core PC Floppy"}, 
+	{HPHW_FIO, 0x03E, 0x00083, 0x0, "Merlin+ 132 Core PC Floppy"}, 
+	{HPHW_FIO, 0x03F, 0x00083, 0x0, "Merlin+ 180 Core PC Floppy"}, 
+	{HPHW_FIO, 0x045, 0x00083, 0x0, "Rocky1 Core PC Floppy"}, 
+	{HPHW_FIO, 0x046, 0x00083, 0x0, "Rocky2 120 Core PC Floppy"}, 
+	{HPHW_FIO, 0x047, 0x00083, 0x0, "Rocky2 150 Core PC Floppy"}, 
+	{HPHW_FIO, 0x04E, 0x00083, 0x0, "Kiji L2 132 Core PC Floppy"}, 
+	{HPHW_FIO, 0x050, 0x00083, 0x0, "Merlin Jr 132 Core PC Floppy"}, 
+	{HPHW_FIO, 0x056, 0x00083, 0x0, "Raven+ w SE FWSCSI Core PC Floppy"}, 
+	{HPHW_FIO, 0x057, 0x00083, 0x0, "Raven+ w Diff FWSCSI Core PC Floppy"}, 
+	{HPHW_FIO, 0x800, 0x00083, 0x0, "Hitachi Tiny 64 Core PC Floppy"}, 
+	{HPHW_FIO, 0x801, 0x00083, 0x0, "Hitachi Tiny 80 Core PC Floppy"},
+	{HPHW_FIO, 0x015, 0x00084, 0x0, "KittyHawk GSY Core PS/2 Port"}, 
+	{HPHW_FIO, 0x016, 0x00084, 0x0, "Gecko Core PS/2 Port"}, 
+	{HPHW_FIO, 0x018, 0x00084, 0x0, "Gecko Optional PS/2 Port"}, 
+	{HPHW_FIO, 0x01A, 0x00084, 0x0, "Anole 64 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x01B, 0x00084, 0x0, "Anole 100 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x01C, 0x00084, 0x0, "Gecko 80 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x01D, 0x00084, 0x0, "Gecko 100 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x01F, 0x00084, 0x0, "SkyHawk 100/120 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x027, 0x00084, 0x0, "Piranha 100 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x028, 0x00084, 0x0, "Mirage Jr Core PS/2 Port"}, 
+	{HPHW_FIO, 0x029, 0x00084, 0x0, "Mirage Core PS/2 Port"}, 
+	{HPHW_FIO, 0x02A, 0x00084, 0x0, "Electra Core PS/2 Port"}, 
+	{HPHW_FIO, 0x02B, 0x00084, 0x0, "Mirage 80 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x02C, 0x00084, 0x0, "Mirage 100+ Core PS/2 Port"}, 
+	{HPHW_FIO, 0x02E, 0x00084, 0x0, "UL 350 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x02F, 0x00084, 0x0, "UL 550 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x032, 0x00084, 0x0, "Raven T' Core PS/2 Port"}, 
+	{HPHW_FIO, 0x033, 0x00084, 0x0, "Anole T Core PS/2 Port"}, 
+	{HPHW_FIO, 0x034, 0x00084, 0x0, "SAIC L-80 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x035, 0x00084, 0x0, "PCX-L2 712/132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x036, 0x00084, 0x0, "PCX-L2 712/160 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x03B, 0x00084, 0x0, "Raven U/L2 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x03C, 0x00084, 0x0, "Merlin 132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x03D, 0x00084, 0x0, "Merlin 160 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x03E, 0x00084, 0x0, "Merlin+ 132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x03F, 0x00084, 0x0, "Merlin+ 180 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x044, 0x00084, 0x0, "Mohawk Core PS/2 Port"}, 
+	{HPHW_FIO, 0x045, 0x00084, 0x0, "Rocky1 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x046, 0x00084, 0x0, "Rocky2 120 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x047, 0x00084, 0x0, "Rocky2 150 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x048, 0x00084, 0x0, "Rocky2 120 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x049, 0x00084, 0x0, "Rocky2 150 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x04B, 0x00084, 0x0, "Anole L2 132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x04D, 0x00084, 0x0, "Anole L2 165 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x04E, 0x00084, 0x0, "Kiji L2 132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x050, 0x00084, 0x0, "Merlin Jr 132 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x051, 0x00084, 0x0, "Firehawk Core PS/2 Port"}, 
+	{HPHW_FIO, 0x056, 0x00084, 0x0, "Raven+ w SE FWSCSI Core PS/2 Port"}, 
+	{HPHW_FIO, 0x057, 0x00084, 0x0, "Raven+ w Diff FWSCSI Core PS/2 Port"}, 
+	{HPHW_FIO, 0x058, 0x00084, 0x0, "FireHawk 200 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x05C, 0x00084, 0x0, "SummitHawk 230 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x800, 0x00084, 0x0, "Hitachi Tiny 64 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x801, 0x00084, 0x0, "Hitachi Tiny 80 Core PS/2 Port"}, 
+	{HPHW_FIO, 0x004, 0x00085, 0x0, "Solo GSC Optional Graphics"}, 
+	{HPHW_FIO, 0x005, 0x00085, 0x0, "Duet GSC Optional Graphics"}, 
+	{HPHW_FIO, 0x008, 0x00085, 0x0, "Anole Artist Optional Graphics"}, 
+	{HPHW_FIO, 0x010, 0x00085, 0x0, "Mirage 80 GSC Builtin Graphics"}, 
+	{HPHW_FIO, 0x011, 0x00085, 0x0, "Mirage 100+ GSC Builtin Graphics"}, 
+	{HPHW_FIO, 0x012, 0x00085, 0x0, "Mirage Jr GSC Builtin Graphics"}, 
+	{HPHW_FIO, 0x013, 0x00085, 0x0, "Mirage GSC Builtin Graphics"}, 
+	{HPHW_FIO, 0x014, 0x00085, 0x0, "Electra GSC Builtin Graphics"}, 
+	{HPHW_FIO, 0x016, 0x00085, 0x0, "Gecko GSC Core Graphics"}, 
+	{HPHW_FIO, 0x017, 0x00085, 0x0, "Gecko GSC Optional Graphics"}, 
+	{HPHW_FIO, 0x01A, 0x00085, 0x0, "Anole 64 Artist Builtin Graphics"}, 
+	{HPHW_FIO, 0x01B, 0x00085, 0x0, "Anole 100 Artist Builtin Graphics"}, 
+	{HPHW_FIO, 0x01C, 0x00085, 0x0, "Gecko 80 GSC Core Graphics"}, 
+	{HPHW_FIO, 0x01D, 0x00085, 0x0, "Gecko 100 GSC Core Graphics"}, 
+	{HPHW_FIO, 0x032, 0x00085, 0x0, "Raven T' GSC Core Graphics"}, 
+	{HPHW_FIO, 0x033, 0x00085, 0x0, "Anole T Artist Builtin Graphics"}, 
+	{HPHW_FIO, 0x034, 0x00085, 0x0, "SAIC L-80 GSC Core Graphics"}, 
+	{HPHW_FIO, 0x035, 0x00085, 0x0, "PCX-L2 712/132 Core Graphics"}, 
+	{HPHW_FIO, 0x036, 0x00085, 0x0, "PCX-L2 712/160 Core Graphics"}, 
+	{HPHW_FIO, 0x03B, 0x00085, 0x0, "Raven U/L2 Core Graphics"}, 
+	{HPHW_FIO, 0x03C, 0x00085, 0x0, "Merlin 132 Core Graphics"}, 
+	{HPHW_FIO, 0x03D, 0x00085, 0x0, "Merlin 160 Core Graphics"}, 
+	{HPHW_FIO, 0x03E, 0x00085, 0x0, "Merlin+ 132 Core Graphics"}, 
+	{HPHW_FIO, 0x03F, 0x00085, 0x0, "Merlin+ 180 Core Graphics"}, 
+	{HPHW_FIO, 0x045, 0x00085, 0x0, "Rocky1 Core Graphics"}, 
+	{HPHW_FIO, 0x046, 0x00085, 0x0, "Rocky2 120 Core Graphics"}, 
+	{HPHW_FIO, 0x047, 0x00085, 0x0, "Rocky2 150 Core Graphics"}, 
+	{HPHW_FIO, 0x04B, 0x00085, 0x0, "Anole L2 132 Core Graphics"}, 
+	{HPHW_FIO, 0x04D, 0x00085, 0x0, "Anole L2 165 Core Graphics"}, 
+	{HPHW_FIO, 0x04E, 0x00085, 0x0, "Kiji L2 132 Core Graphics"}, 
+	{HPHW_FIO, 0x050, 0x00085, 0x0, "Merlin Jr 132 Core Graphics"}, 
+	{HPHW_FIO, 0x056, 0x00085, 0x0, "Raven+ w SE FWSCSI Core Graphics"}, 
+	{HPHW_FIO, 0x057, 0x00085, 0x0, "Raven+ w Diff FWSCSI Core Graphics"}, 
+	{HPHW_FIO, 0x800, 0x00085, 0x0, "Hitachi Tiny 64 Core Graphics"}, 
+	{HPHW_FIO, 0x801, 0x00085, 0x0, "Hitachi Tiny 80 Core Graphics"}, 
+	{HPHW_FIO, 0x004, 0x00086, 0x0, "GSC IBM Token Ring"}, 
+	{HPHW_FIO, 0x015, 0x00087, 0x0, "Gecko Optional ISDN"}, 
+	{HPHW_FIO, 0x016, 0x00087, 0x0, "Gecko Core ISDN"}, 
+	{HPHW_FIO, 0x01C, 0x00087, 0x0, "Gecko 80 Core ISDN"}, 
+	{HPHW_FIO, 0x01D, 0x00087, 0x0, "Gecko 100 Core ISDN"}, 
+	{HPHW_FIO, 0x010, 0x00088, 0x0, "Pace VME Networking"}, 
+	{HPHW_FIO, 0x011, 0x00088, 0x0, "Sidewinder VME Networking"}, 
+	{HPHW_FIO, 0x01A, 0x00088, 0x0, "Anole 64 VME Networking"}, 
+	{HPHW_FIO, 0x01B, 0x00088, 0x0, "Anole 100 VME Networking"}, 
+	{HPHW_FIO, 0x024, 0x00088, 0x0, "Fast Pace VME Networking"}, 
+	{HPHW_FIO, 0x034, 0x00088, 0x0, "Anole T VME Networking"}, 
+	{HPHW_FIO, 0x04A, 0x00088, 0x0, "Anole L2 132 VME Networking"}, 
+	{HPHW_FIO, 0x04C, 0x00088, 0x0, "Anole L2 165 VME Networking"}, 
+	{HPHW_FIO, 0x011, 0x0008A, 0x0, "WB-96 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x012, 0x0008A, 0x0, "Orville Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x013, 0x0008A, 0x0, "Wilbur Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x014, 0x0008A, 0x0, "WB-80 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x015, 0x0008A, 0x0, "KittyHawk GSY Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x016, 0x0008A, 0x0, "Gecko Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x018, 0x0008A, 0x0, "Gecko Optional LAN (802.3)"}, 
+	{HPHW_FIO, 0x01A, 0x0008A, 0x0, "Anole 64 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x01B, 0x0008A, 0x0, "Anole 100 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x01C, 0x0008A, 0x0, "Gecko 80 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x01D, 0x0008A, 0x0, "Gecko 100 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x01F, 0x0008A, 0x0, "SkyHawk 100/120 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x027, 0x0008A, 0x0, "Piranha 100 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x028, 0x0008A, 0x0, "Mirage Jr Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x029, 0x0008A, 0x0, "Mirage Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x02A, 0x0008A, 0x0, "Electra Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x02B, 0x0008A, 0x0, "Mirage 80 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x02C, 0x0008A, 0x0, "Mirage 100+ Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x02E, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x02F, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x032, 0x0008A, 0x0, "Raven T' Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x033, 0x0008A, 0x0, "Anole T Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x034, 0x0008A, 0x0, "SAIC L-80 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x035, 0x0008A, 0x0, "PCX-L2 712/132 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x036, 0x0008A, 0x0, "PCX-L2 712/160 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x03B, 0x0008A, 0x0, "Raven U/L2 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x03C, 0x0008A, 0x0, "Merlin 132 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x03D, 0x0008A, 0x0, "Merlin 160 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x044, 0x0008A, 0x0, "Mohawk Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x045, 0x0008A, 0x0, "Rocky1 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x046, 0x0008A, 0x0, "Rocky2 120 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x047, 0x0008A, 0x0, "Rocky2 150 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x04B, 0x0008A, 0x0, "Anole L2 132 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x04D, 0x0008A, 0x0, "Anole L2 165 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x04E, 0x0008A, 0x0, "Kiji L2 132 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x050, 0x0008A, 0x0, "Merlin Jr 132 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x058, 0x0008A, 0x0, "FireHawk 200 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x800, 0x0008A, 0x0, "Hitachi Tiny 64 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x801, 0x0008A, 0x0, "Hitachi Tiny 80 Core LAN (802.3)"}, 
+	{HPHW_FIO, 0x004, 0x0008C, 0x0, "SkyHawk 100/120 Wax RS-232"}, 
+	{HPHW_FIO, 0x005, 0x0008C, 0x0, "SAIC L-80 Wax RS-232"}, 
+	{HPHW_FIO, 0x006, 0x0008C, 0x0, "Raven U/L2 Dino RS-232"}, 
+	{HPHW_FIO, 0x007, 0x0008C, 0x0, "Dino RS-232"}, 
+	{HPHW_FIO, 0x008, 0x0008C, 0x0, "Merlin 132 Dino RS-232"}, 
+	{HPHW_FIO, 0x009, 0x0008C, 0x0, "Merlin 160 Dino RS-232"}, 
+	{HPHW_FIO, 0x00A, 0x0008C, 0x0, "Merlin Jr 132 Dino RS-232"}, 
+	{HPHW_FIO, 0x010, 0x0008C, 0x0, "Mirage 80 Wax RS-232"}, 
+	{HPHW_FIO, 0x011, 0x0008C, 0x0, "Mirage 100+ Wax RS-232"}, 
+	{HPHW_FIO, 0x012, 0x0008C, 0x0, "Mirage Jr Wax RS-232"}, 
+	{HPHW_FIO, 0x013, 0x0008C, 0x0, "Mirage Wax RS-232"}, 
+	{HPHW_FIO, 0x014, 0x0008C, 0x0, "Electra Wax RS-232"}, 
+	{HPHW_FIO, 0x015, 0x0008C, 0x0, "KittyHawk GSY Core RS-232"}, 
+	{HPHW_FIO, 0x016, 0x0008C, 0x0, "Gecko Core RS-232"}, 
+	{HPHW_FIO, 0x017, 0x0008C, 0x0, "Raven Backplane RS-232"}, 
+	{HPHW_FIO, 0x018, 0x0008C, 0x0, "Gecko Optional RS-232"}, 
+	{HPHW_FIO, 0x019, 0x0008C, 0x0, "Merlin+ 180 Dino RS-232"}, 
+	{HPHW_FIO, 0x01A, 0x0008C, 0x0, "Anole 64 Core RS-232"}, 
+	{HPHW_FIO, 0x01B, 0x0008C, 0x0, "Anole 100 Core RS-232"}, 
+	{HPHW_FIO, 0x01C, 0x0008C, 0x0, "Gecko 80 Core RS-232"}, 
+	{HPHW_FIO, 0x01D, 0x0008C, 0x0, "Gecko 100 Core RS-232"}, 
+	{HPHW_FIO, 0x01E, 0x0008C, 0x0, "Raven T' Wax RS-232"}, 
+	{HPHW_FIO, 0x01F, 0x0008C, 0x0, "SkyHawk 100/120 Core RS-232"}, 
+	{HPHW_FIO, 0x020, 0x0008C, 0x0, "Anole 64 Timi RS-232"}, 
+	{HPHW_FIO, 0x021, 0x0008C, 0x0, "Anole 100 Timi RS-232"}, 
+	{HPHW_FIO, 0x022, 0x0008C, 0x0, "Merlin+ 132 Dino RS-232"}, 
+	{HPHW_FIO, 0x023, 0x0008C, 0x0, "Rocky1 Wax RS-232"}, 
+	{HPHW_FIO, 0x025, 0x0008C, 0x0, "Armyknife Optional RS-232"}, 
+	{HPHW_FIO, 0x026, 0x0008C, 0x0, "Piranha 100 Wax RS-232"}, 
+	{HPHW_FIO, 0x027, 0x0008C, 0x0, "Piranha 100 Core RS-232"}, 
+	{HPHW_FIO, 0x028, 0x0008C, 0x0, "Mirage Jr Core RS-232"}, 
+	{HPHW_FIO, 0x029, 0x0008C, 0x0, "Mirage Core RS-232"}, 
+	{HPHW_FIO, 0x02A, 0x0008C, 0x0, "Electra Core RS-232"}, 
+	{HPHW_FIO, 0x02B, 0x0008C, 0x0, "Mirage 80 Core RS-232"}, 
+	{HPHW_FIO, 0x02C, 0x0008C, 0x0, "Mirage 100+ Core RS-232"}, 
+	{HPHW_FIO, 0x02E, 0x0008C, 0x0, "UL 350 Lasi Core RS-232"}, 
+	{HPHW_FIO, 0x02F, 0x0008C, 0x0, "UL 550 Lasi Core RS-232"}, 
+	{HPHW_FIO, 0x030, 0x0008C, 0x0, "UL 350 Wax Core RS-232"}, 
+	{HPHW_FIO, 0x031, 0x0008C, 0x0, "UL 550 Wax Core RS-232"}, 
+	{HPHW_FIO, 0x032, 0x0008C, 0x0, "Raven T' Lasi Core RS-232"}, 
+	{HPHW_FIO, 0x033, 0x0008C, 0x0, "Anole T Core RS-232"}, 
+	{HPHW_FIO, 0x034, 0x0008C, 0x0, "SAIC L-80 Core RS-232"}, 
+	{HPHW_FIO, 0x035, 0x0008C, 0x0, "PCX-L2 712/132 Core RS-232"}, 
+	{HPHW_FIO, 0x036, 0x0008C, 0x0, "PCX-L2 712/160 Core RS-232"}, 
+	{HPHW_FIO, 0x03A, 0x0008C, 0x0, "Merlin+ Wax RS-232"}, 
+	{HPHW_FIO, 0x03B, 0x0008C, 0x0, "Raven U/L2 Core RS-232"}, 
+	{HPHW_FIO, 0x03C, 0x0008C, 0x0, "Merlin 132 Core RS-232"}, 
+	{HPHW_FIO, 0x03D, 0x0008C, 0x0, "Merlin 160 Core RS-232"}, 
+	{HPHW_FIO, 0x03E, 0x0008C, 0x0, "Merlin+ 132 Core RS-232"}, 
+	{HPHW_FIO, 0x03F, 0x0008C, 0x0, "Merlin+ 180 Core RS-232"}, 
+	{HPHW_FIO, 0x040, 0x0008C, 0x0, "Merlin 132 Wax RS-232"}, 
+	{HPHW_FIO, 0x041, 0x0008C, 0x0, "Merlin 160 Wax RS-232"}, 
+	{HPHW_FIO, 0x043, 0x0008C, 0x0, "Merlin 132/160 Wax RS-232"}, 
+	{HPHW_FIO, 0x044, 0x0008C, 0x0, "Mohawk Core RS-232"}, 
+	{HPHW_FIO, 0x045, 0x0008C, 0x0, "Rocky1 Core RS-232"}, 
+	{HPHW_FIO, 0x046, 0x0008C, 0x0, "Rocky2 120 Core RS-232"}, 
+	{HPHW_FIO, 0x047, 0x0008C, 0x0, "Rocky2 150 Core RS-232"}, 
+	{HPHW_FIO, 0x048, 0x0008C, 0x0, "Rocky2 120 Dino RS-232"}, 
+	{HPHW_FIO, 0x049, 0x0008C, 0x0, "Rocky2 150 Dino RS-232"}, 
+	{HPHW_FIO, 0x04A, 0x0008C, 0x0, "Anole L2 132 TIMI RS-232"}, 
+	{HPHW_FIO, 0x04B, 0x0008C, 0x0, "Anole L2 l32 Core RS-232"}, 
+	{HPHW_FIO, 0x04C, 0x0008D, 0x0, "Anole L2 165 TIMI RS-232"}, 
+	{HPHW_FIO, 0x04D, 0x0008C, 0x0, "Anole L2 165 Core RS-232"}, 
+	{HPHW_FIO, 0x04E, 0x0008C, 0x0, "Kiji L2 132 Core RS-232"}, 
+	{HPHW_FIO, 0x04F, 0x0008C, 0x0, "Kiji L2 132 Dino RS-232"}, 
+	{HPHW_FIO, 0x050, 0x0008C, 0x0, "Merlin Jr 132 Core RS-232"}, 
+	{HPHW_FIO, 0x051, 0x0008C, 0x0, "Firehawk Core RS-232"}, 
+	{HPHW_FIO, 0x052, 0x0008C, 0x0, "Raven+ Hi Power Backplane w EISA RS-232"}, 
+	{HPHW_FIO, 0x053, 0x0008C, 0x0, "Raven+ Hi Power Backplane w/o EISA RS-232"}, 
+	{HPHW_FIO, 0x054, 0x0008C, 0x0, "Raven+ Lo Power Backplane w EISA RS-232"}, 
+	{HPHW_FIO, 0x055, 0x0008C, 0x0, "Raven+ Lo Power Backplane w/o EISA RS-232"}, 
+	{HPHW_FIO, 0x056, 0x0008C, 0x0, "Raven+ w SE FWSCSI Core RS-232"}, 
+	{HPHW_FIO, 0x057, 0x0008C, 0x0, "Raven+ w Diff FWSCSI Core RS-232"}, 
+	{HPHW_FIO, 0x058, 0x0008C, 0x0, "FireHawk 200 Core RS-232"}, 
+	{HPHW_FIO, 0x059, 0x0008C, 0x0, "FireHawk 200 Wax RS-232"}, 
+	{HPHW_FIO, 0x05A, 0x0008C, 0x0, "Raven+ L2 Backplane w EISA RS-232"}, 
+	{HPHW_FIO, 0x05B, 0x0008C, 0x0, "Raven+ L2 Backplane w/o EISA RS-232"}, 
+	{HPHW_FIO, 0x05D, 0x0008C, 0x0, "SummitHawk Dino RS-232"}, 
+	{HPHW_FIO, 0x05E, 0x0008C, 0x0, "Staccato 132 Core LAN RS-232"}, 
+	{HPHW_FIO, 0x05F, 0x0008C, 0x0, "Staccato 180 Core LAN RS-232"}, 
+	{HPHW_FIO, 0x800, 0x0008C, 0x0, "Hitachi Tiny 64 Core RS-232"}, 
+	{HPHW_FIO, 0x801, 0x0008C, 0x0, "Hitachi Tiny 80 Core RS-232"}, 
+	{HPHW_FIO, 0x015, 0x0008D, 0x0, "Gecko Optional RJ-16"}, 
+	{HPHW_FIO, 0x016, 0x0008D, 0x0, "Gecko Core RJ-16"}, 
+	{HPHW_FIO, 0x01C, 0x0008D, 0x0, "Gecko 80 Core RJ-16"}, 
+	{HPHW_FIO, 0x01D, 0x0008D, 0x0, "Gecko 100 Core RJ-16"}, 
+	{HPHW_FIO, 0x004, 0x0008F, 0x0, "Anole Boot Rom"}, 
+	{HPHW_FIO, 0x005, 0x0008F, 0x0, "Rocky1 Boot Rom"}, 
+	{HPHW_FIO, 0x006, 0x0008F, 0x0, "Rocky2 120 Boot Rom"}, 
+	{HPHW_FIO, 0x007, 0x0008F, 0x0, "Rocky2 150 Boot Rom"}, 
+	{HPHW_FIO, 0x01B, 0x0008F, 0x0, "Anole 100 Boot Rom"}, 
+	{HPHW_FIO, 0x006, 0x00096, 0x0, "Raven U/L2 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x007, 0x00096, 0x0, "Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x008, 0x00096, 0x0, "Merlin 132 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x009, 0x00096, 0x0, "Merlin 160 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x00A, 0x00096, 0x0, "Merlin Jr 132 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x019, 0x00096, 0x0, "Merlin+ 180 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x022, 0x00096, 0x0, "Merlin+ 132 Dino PS/2 Port"}, 
+	{HPHW_FIO, 0x004, 0x00097, 0x0, "Cascade EISA 100VG LAN"}, 
+	{HPHW_FIO, 0x023, 0x00099, 0x0, "Rocky1 Wax HPIB"}, 
+	{HPHW_FIO, 0x048, 0x00099, 0x0, "Rocky2 120 Clark/Dino HPIB"}, 
+	{HPHW_FIO, 0x049, 0x00099, 0x0, "Rocky2 150 Clark/Dino HPIB"}, 
+	{HPHW_FIO, 0x004, 0x000A1, 0x0, "SPP2000 Console TTY"}, 
+	{HPHW_FIO, 0x004, 0x000A2, 0x0, "Forte Core PCI 10/100BT LAN"}, 
+	{HPHW_FIO, 0x005, 0x000A2, 0x0, "AllegroLow PCI 10/100BT LAN"}, 
+	{HPHW_FIO, 0x006, 0x000A2, 0x0, "AllegroHIgh Core PCI 10/100BT LAN"}, 
+	{HPHW_FIO, 0x007, 0x000A2, 0x0, "PCI Plug-in LAN"}, 
+	{HPHW_FIO, 0x00A, 0x000A2, 0x0, "Lego 360 Core PCI 10/100BT LAN"}, 
+	{HPHW_FIO, 0x03E, 0x000A2, 0x0, "Merlin+ 132 Core PCI LAN"}, 
+	{HPHW_FIO, 0x03F, 0x000A2, 0x0, "Merlin+ 180 Core PCI LAN"}, 
+	{HPHW_FIO, 0x056, 0x000A2, 0x0, "Raven+ w SE FWSCSI Core PCI LAN"}, 
+	{HPHW_FIO, 0x057, 0x000A2, 0x0, "Raven+ w Diff FWSCSI Core PCI LAN"}, 
+	{HPHW_FIO, 0x05E, 0x000A2, 0x0, "Staccato 132 PCI LAN"}, 
+	{HPHW_FIO, 0x05F, 0x000A2, 0x0, "Staccato 180 PCI LAN"}, 
+	{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI LVD Ultra2 SCSI"}, 
+	{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI SE UltraSCSI"}, 
+	{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI IDE/ATAPI CD-ROM"}, 
+	{HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI LVD Ultra2 SCSI"}, 
+	{HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI IDE/ATAPI CD-ROM"}, 
+	{HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI LVD Ultra2 SCSI"}, 
+	{HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI IDE/ATAPI CD-ROM"}, 
+	{HPHW_FIO, 0x007, 0x000A3, 0x0, "PCI Plug-in Disk"}, 
+	{HPHW_FIO, 0x008, 0x000A3, 0x0, "A5158A S FC Tachlite HBA"}, 
+	{HPHW_FIO, 0x009, 0x000A3, 0x0, "A5157A D FC HBA"}, 
+	{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI LVD Ultra2 SCSI"}, 
+	{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI NSE UltraSCSI"}, 
+	{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI WSE UltraSCSI"}, 
+	{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI IDE/ATAPI CD-ROM"}, 
+	{HPHW_FIO, 0x03E, 0x000A3, 0x0, "Merlin+ 132 Core SE FWSCSI PCI Disk"}, 
+	{HPHW_FIO, 0x03F, 0x000A3, 0x0, "Merlin+ 180 Core SE FWSCSI PCI Disk"}, 
+	{HPHW_FIO, 0x056, 0x000A3, 0x0, "Raven+ w SE FWSCSI Core PCI Disk"}, 
+	{HPHW_FIO, 0x057, 0x000A3, 0x0, "Raven+ w Diff FWSCSI Core PCI Disk"}, 
+	{HPHW_FIO, 0x004, 0x000A4, 0x0, "SPP2000 Core BA"}, 
+	{HPHW_FIO, 0x004, 0x000A6, 0x0, "Sonic Ethernet 802.3 Card"}, 
+	{HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI SuperIO RS-232"}, 
+	{HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI USB KB"}, 
+	{HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI SuperIO RS-232"}, 
+	{HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI USB KB"}, 
+	{HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI SuperIO RS-232"}, 
+	{HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI USB KB"}, 
+	{HPHW_FIO, 0x007, 0x000A9, 0x0, "Miscellaneous PCI Plug-in"},
+	{HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI SuperIO RS-232"}, 
+	{HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI USB KB"}, 
+	{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
+	{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
+	{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+	{HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+	{HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
+	{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
+	{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
+	{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
+	{HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
+	{HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
+	{HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
+	{HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
+	{HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"}, 
+	{HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"}, 
+	{HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"}, 
+	{HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"}, 
+	{HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
+	{HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
+	{HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"}, 
+	{HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"}, 
+	{HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"}, 
+	{HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"}, 
+	{HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"}, 
+	{HPHW_FAULTY, 0, }  /* Special Marker for last entry */
+};
+
+
+static struct hp_cpu_type_mask {
+	unsigned short model;
+	unsigned short mask;
+	enum cpu_type cpu;
+} hp_cpu_type_mask_list[] = {
+
+	{ 0x0000, 0x0ff0, pcx    },  /* 0x0000 - 0x000f */
+	{ 0x0048, 0x0ff0, pcxl   },  /* 0x0040 - 0x004f */
+	{ 0x0080, 0x0ff0, pcx    },  /* 0x0080 - 0x008f */
+	{ 0x0100, 0x0ff0, pcx    },  /* 0x0100 - 0x010f */
+	{ 0x0182, 0x0ffe, pcx    },  /* 0x0182 - 0x0183 */
+	{ 0x0182, 0x0ffe, pcxt   },  /* 0x0182 - 0x0183 */
+	{ 0x0184, 0x0fff, pcxu   },  /* 0x0184 - 0x0184 */
+	{ 0x0200, 0x0ffe, pcxs   },  /* 0x0200 - 0x0201 */
+	{ 0x0202, 0x0fff, pcxs   },  /* 0x0202 - 0x0202 */
+	{ 0x0203, 0x0fff, pcxt   },  /* 0x0203 - 0x0203 */
+	{ 0x0204, 0x0ffc, pcxt   },  /* 0x0204 - 0x0207 */
+	{ 0x0280, 0x0ffc, pcxs   },  /* 0x0280 - 0x0283 */
+	{ 0x0284, 0x0ffc, pcxt   },  /* 0x0284 - 0x0287 */
+	{ 0x0288, 0x0fff, pcxt   },  /* 0x0288 - 0x0288 */
+	{ 0x0300, 0x0ffc, pcxs   },  /* 0x0300 - 0x0303 */
+	{ 0x0310, 0x0ff0, pcxt   },  /* 0x0310 - 0x031f */
+	{ 0x0320, 0x0ff0, pcxt   },  /* 0x0320 - 0x032f */
+	{ 0x0400, 0x0ff0, pcxt   },  /* 0x0400 - 0x040f */
+	{ 0x0480, 0x0ff0, pcxl   },  /* 0x0480 - 0x048f */
+	{ 0x0500, 0x0ff0, pcxl2  },  /* 0x0500 - 0x050f */
+	{ 0x0510, 0x0ff0, pcxl2  },  /* 0x0510 - 0x051f */
+	{ 0x0580, 0x0ff8, pcxt_  },  /* 0x0580 - 0x0587 */
+	{ 0x0588, 0x0ffc, pcxt_  },  /* 0x0588 - 0x058b */
+	{ 0x058c, 0x0ffe, pcxt_  },  /* 0x058c - 0x058d */
+	{ 0x058e, 0x0fff, pcxt_  },  /* 0x058e - 0x058e */
+	{ 0x058f, 0x0fff, pcxu   },  /* 0x058f - 0x058f */
+	{ 0x0590, 0x0ffe, pcxu   },  /* 0x0590 - 0x0591 */
+	{ 0x0592, 0x0fff, pcxt_  },  /* 0x0592 - 0x0592 */
+	{ 0x0593, 0x0fff, pcxu   },  /* 0x0593 - 0x0593 */
+	{ 0x0594, 0x0ffc, pcxu   },  /* 0x0594 - 0x0597 */
+	{ 0x0598, 0x0ffe, pcxu_  },  /* 0x0598 - 0x0599 */
+	{ 0x059a, 0x0ffe, pcxu   },  /* 0x059a - 0x059b */
+	{ 0x059c, 0x0fff, pcxu   },  /* 0x059c - 0x059c */
+	{ 0x059d, 0x0fff, pcxu_  },  /* 0x059d - 0x059d */
+	{ 0x059e, 0x0fff, pcxt_  },  /* 0x059e - 0x059e */
+	{ 0x059f, 0x0fff, pcxu   },  /* 0x059f - 0x059f */
+	{ 0x05a0, 0x0ffe, pcxt_  },  /* 0x05a0 - 0x05a1 */
+	{ 0x05a2, 0x0ffe, pcxu   },  /* 0x05a2 - 0x05a3 */
+	{ 0x05a4, 0x0ffc, pcxu   },  /* 0x05a4 - 0x05a7 */
+	{ 0x05a8, 0x0ffc, pcxu   },  /* 0x05a8 - 0x05ab */
+	{ 0x05ad, 0x0fff, pcxu_  },  /* 0x05ad - 0x05ad */
+	{ 0x05ae, 0x0ffe, pcxu_  },  /* 0x05ae - 0x05af */
+	{ 0x05b0, 0x0ffe, pcxu_  },  /* 0x05b0 - 0x05b1 */
+	{ 0x05b2, 0x0fff, pcxu_  },  /* 0x05b2 - 0x05b2 */
+	{ 0x05b3, 0x0fff, pcxu   },  /* 0x05b3 - 0x05b3 */
+	{ 0x05b4, 0x0fff, pcxw   },  /* 0x05b4 - 0x05b4 */
+	{ 0x05b5, 0x0fff, pcxu_  },  /* 0x05b5 - 0x05b5 */
+	{ 0x05b6, 0x0ffe, pcxu_  },  /* 0x05b6 - 0x05b7 */
+	{ 0x05b8, 0x0ffe, pcxu_  },  /* 0x05b8 - 0x05b9 */
+	{ 0x05ba, 0x0fff, pcxu_  },  /* 0x05ba - 0x05ba */
+	{ 0x05bb, 0x0fff, pcxw   },  /* 0x05bb - 0x05bb */
+	{ 0x05bc, 0x0ffc, pcxw   },  /* 0x05bc - 0x05bf */
+	{ 0x05c0, 0x0ffc, pcxw 	 },  /* 0x05c0 - 0x05c3 */
+	{ 0x05c4, 0x0ffe, pcxw 	 },  /* 0x05c4 - 0x05c5 */
+	{ 0x05c6, 0x0fff, pcxw 	 },  /* 0x05c6 - 0x05c6 */
+	{ 0x05c7, 0x0fff, pcxw_  },  /* 0x05c7 - 0x05c7 */
+	{ 0x05c8, 0x0ffc, pcxw 	 },  /* 0x05c8 - 0x05cb */
+	{ 0x05cc, 0x0ffe, pcxw 	 },  /* 0x05cc - 0x05cd */
+	{ 0x05ce, 0x0ffe, pcxw_  },  /* 0x05ce - 0x05cf */
+	{ 0x05d0, 0x0ffc, pcxw_  },  /* 0x05d0 - 0x05d3 */
+	{ 0x05d4, 0x0ffe, pcxw_  },  /* 0x05d4 - 0x05d5 */
+	{ 0x05d6, 0x0fff, pcxw 	 },  /* 0x05d6 - 0x05d6 */
+	{ 0x05d7, 0x0fff, pcxw_  },  /* 0x05d7 - 0x05d7 */
+	{ 0x05d8, 0x0ffc, pcxw_  },  /* 0x05d8 - 0x05db */
+	{ 0x05dc, 0x0ffe, pcxw2  },  /* 0x05dc - 0x05dd */
+	{ 0x05de, 0x0fff, pcxw_  },  /* 0x05de - 0x05de */
+	{ 0x05df, 0x0fff, pcxw2  },  /* 0x05df - 0x05df */
+	{ 0x05e0, 0x0ffc, pcxw2  },  /* 0x05e0 - 0x05e3 */
+	{ 0x05e4, 0x0fff, pcxw2  },  /* 0x05e4 - 0x05e4 */
+	{ 0x05e5, 0x0fff, pcxw_  },  /* 0x05e5 - 0x05e5 */
+	{ 0x05e6, 0x0ffe, pcxw2  },  /* 0x05e6 - 0x05e7 */
+	{ 0x05e8, 0x0ff8, pcxw2  },  /* 0x05e8 - 0x05ef */
+	{ 0x05f0, 0x0ff0, pcxw2  },  /* 0x05f0 - 0x05ff */
+	{ 0x0600, 0x0fe0, pcxl   },  /* 0x0600 - 0x061f */
+	{ 0x0880, 0x0ff0, mako   },  /* 0x0880 - 0x088f */
+	{ 0x0890, 0x0ff0, mako2  },  /* 0x0890 - 0x089f */
+	{ 0x0000, 0x0000, pcx    }	/* terminate table */
+};
+
+const char * const cpu_name_version[][2] = {
+	[pcx]	= { "PA7000 (PCX)",	"1.0" },
+	[pcxs]	= { "PA7000 (PCX-S)",	"1.1a" },
+	[pcxt]	= { "PA7100 (PCX-T)",	"1.1b" },
+	[pcxt_]	= { "PA7200 (PCX-T')",	"1.1c" },
+	[pcxl]	= { "PA7100LC (PCX-L)",	"1.1d" },
+	[pcxl2]	= { "PA7300LC (PCX-L2)",	"1.1e" },
+	[pcxu]	= { "PA8000 (PCX-U)",	"2.0" },
+	[pcxu_]	= { "PA8200 (PCX-U+)",	"2.0" },
+	[pcxw]	= { "PA8500 (PCX-W)",	"2.0" },
+	[pcxw_]	= { "PA8600 (PCX-W+)",	"2.0" },
+	[pcxw2]	= { "PA8700 (PCX-W2)",	"2.0" },
+	[mako]	= { "PA8800 (Mako)",	"2.0" },
+	[mako2] = { "PA8900 (Shortfin)",	"2.0" }
+};
+
+const char *parisc_hardware_description(struct parisc_device_id *id)
+{
+	struct hp_hardware *listptr;
+	
+	for (listptr = hp_hardware_list; listptr->hw_type != HPHW_FAULTY; listptr++) {
+		if ((listptr->hw_type == id->hw_type) &&
+				(listptr->hversion == id->hversion) &&
+				(listptr->sversion == id->sversion)){
+			return listptr->name;
+		}
+	}
+
+	/*
+	 * ok, the above hardware table isn't complete, and we haven't found
+	 * our device in this table. So let's now try to find a generic name
+	 * to describe the given hardware...
+	 */
+	switch (id->hw_type) {
+		case HPHW_NPROC:
+			return "Unknown machine";
+
+		case HPHW_A_DIRECT:
+			switch (id->sversion) {
+				case 0x0D: return "MUX port";
+				case 0x0E: return "RS-232 port";
+			}
+			break;
+			
+		case HPHW_MEMORY:
+			return "Memory";
+			
+	}
+	
+	return "unknown device";
+}
+
+
+/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
+enum cpu_type
+parisc_get_cpu_type(unsigned long hversion)
+{
+	struct hp_cpu_type_mask *ptr;
+	unsigned short model = ((unsigned short) (hversion)) >> 4;
+
+	for (ptr = hp_cpu_type_mask_list; 0 != ptr->mask; ptr++) {
+		if (ptr->model == (model & ptr->mask))
+			return ptr->cpu;
+	}
+	panic("could not identify CPU type\n");
+
+	return pcx;	/* not reached: */
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/head.S b/src/kernel/linux/v4.14/arch/parisc/kernel/head.S
new file mode 100644
index 0000000..9b99eb0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/head.S
@@ -0,0 +1,369 @@
+/* This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
+ * Copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
+ * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
+ * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
+ *
+ * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/psw.h>
+#include <asm/pdc.h>
+	
+#include <asm/assembly.h>
+#include <asm/pgtable.h>
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	.level	PA_ASM_LEVEL
+
+	__INITDATA
+ENTRY(boot_args)
+	.word 0 /* arg0 */
+	.word 0 /* arg1 */
+	.word 0 /* arg2 */
+	.word 0 /* arg3 */
+END(boot_args)
+
+	__HEAD
+
+	.align	4
+	.import init_thread_union,data
+	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
+#ifndef CONFIG_64BIT
+        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
+	.import	$global$		/* forward declaration */
+#endif /*!CONFIG_64BIT*/
+ENTRY(parisc_kernel_start)
+	.proc
+	.callinfo
+
+	/* Make sure sr4-sr7 are set to zero for the kernel address space */
+	mtsp	%r0,%sr4
+	mtsp	%r0,%sr5
+	mtsp	%r0,%sr6
+	mtsp	%r0,%sr7
+
+	/* Clear BSS (shouldn't the boot loader do this?) */
+
+	.import __bss_start,data
+	.import __bss_stop,data
+
+	load32		PA(__bss_start),%r3
+	load32		PA(__bss_stop),%r4
+$bss_loop:
+	cmpb,<<,n       %r3,%r4,$bss_loop
+	stw,ma          %r0,4(%r3)
+
+	/* Save away the arguments the boot loader passed in (32 bit args) */
+	load32		PA(boot_args),%r1
+	stw,ma          %arg0,4(%r1)
+	stw,ma          %arg1,4(%r1)
+	stw,ma          %arg2,4(%r1)
+	stw,ma          %arg3,4(%r1)
+
+	/* Initialize startup VM. Just map first 16/32 MB of memory */
+	load32		PA(swapper_pg_dir),%r4
+	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
+	mtctl		%r4,%cr25	/* Initialize user root pointer */
+
+#if CONFIG_PGTABLE_LEVELS == 3
+	/* Set pmd in pgd */
+	load32		PA(pmd0),%r5
+	shrd            %r5,PxD_VALUE_SHIFT,%r3	
+	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
+	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
+#else
+	/* 2-level page table, so pmd == pgd */
+	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+	/* Fill in pmd with enough pte directories */
+	load32		PA(pg0),%r1
+	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
+	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+
+	ldi		ASM_PT_INITIAL,%r1
+
+1:
+	stw		%r3,0(%r4)
+	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+	addib,>		-1,%r1,1b
+#if CONFIG_PGTABLE_LEVELS == 3
+	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
+#else
+	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+
+	/* Now initialize the PTEs themselves.  We use RWX for
+	 * everything ... it will get remapped correctly later */
+	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
+	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
+	load32		PA(pg0),%r1
+
+$pgt_fill_loop:
+	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
+	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
+	addib,>		-1,%r11,$pgt_fill_loop
+	nop
+
+	/* Load the return address...er...crash 'n burn */
+	copy		%r0,%r2
+
+	/* And the RFI Target address too */
+	load32		start_parisc,%r11
+
+	/* And the initial task pointer */
+	load32		init_thread_union,%r6
+	mtctl           %r6,%cr30
+
+	/* And the stack pointer too */
+	ldo             THREAD_SZ_ALGN(%r6),%sp
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
+	.import _mcount,data
+	/* initialize mcount FPTR */
+	/* Get the global data pointer */
+	loadgp
+	load32		PA(_mcount), %r10
+	std		%dp,0x18(%r10)
+#endif
+
+#ifdef CONFIG_SMP
+	/* Set the smp rendezvous address into page zero.
+	** It would be safer to do this in init_smp_config() but
+	** it's just way easier to deal with here because
+	** of 64-bit function ptrs and the address is local to this file.
+	*/
+	load32		PA(smp_slave_stext),%r10
+	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
+	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
+
+	/* FALLTHROUGH */
+	.procend
+
+	/*
+	** Code Common to both Monarch and Slave processors.
+	** Entry:
+	**
+	**  1.1:	
+	**    %r11 must contain RFI target address.
+	**    %r25/%r26 args to pass to target function
+	**    %r2  in case rfi target decides it didn't like something
+	**
+	**  2.0w:
+	**    %r3  PDCE_PROC address
+	**    %r11 RFI target address
+	**
+	** Caller must init: SR4-7, %sp, %r10, %cr24/25, 
+	*/
+common_stext:
+	.proc
+	.callinfo
+#else
+	/* Clear PDC entry point - we won't use it */
+	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
+	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
+#endif /*CONFIG_SMP*/
+
+#ifdef CONFIG_64BIT
+	tophys_r1	%sp
+
+	/* Save the rfi target address */
+	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+	tophys_r1       %r10
+	std             %r11,  TASK_PT_GR11(%r10)
+	/* Switch to wide mode Superdome doesn't support narrow PDC
+	** calls.
+	*/
+1:	mfia            %rp             /* clear upper part of pcoq */
+	ldo             2f-1b(%rp),%rp
+	depdi           0,31,32,%rp
+	bv              (%rp)
+	ssm             PSW_SM_W,%r0
+
+        /* Set Wide mode as the "Default" (eg for traps)
+        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
+        ** Someday, palo might not do this for the Monarch either.
+        */
+2:
+#define MEM_PDC_LO 0x388
+#define MEM_PDC_HI 0x35C
+	ldw             MEM_PDC_LO(%r0),%r3
+	ldw             MEM_PDC_HI(%r0),%r6
+	depd            %r6, 31, 32, %r3        /* move to upper word */
+
+	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
+
+	ldo             PDC_PSW(%r0),%arg0              /* 21 */
+	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
+	load32          PA(stext_pdc_ret), %rp
+	bv              (%r3)
+	copy            %r0,%arg3
+
+stext_pdc_ret:
+	mtctl		%r6,%cr30		/* restore task thread info */
+
+	/* restore rfi target address*/
+	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+	tophys_r1       %r10
+	ldd             TASK_PT_GR11(%r10), %r11
+	tovirt_r1       %sp
+#endif
+	
+	/* PARANOID: clear user scratch/user space SR's */
+	mtsp	%r0,%sr0
+	mtsp	%r0,%sr1
+	mtsp	%r0,%sr2
+	mtsp	%r0,%sr3
+
+	/* Initialize Protection Registers */
+	mtctl	%r0,%cr8
+	mtctl	%r0,%cr9
+	mtctl	%r0,%cr12
+	mtctl	%r0,%cr13
+
+	/* Initialize the global data pointer */
+	loadgp
+
+	/* Set up our interrupt table.  HPMCs might not work after this! 
+	 *
+	 * We need to install the correct iva for PA1.1 or PA2.0. The
+	 * following short sequence of instructions can determine this
+	 * (without being illegal on a PA1.1 machine).
+	 */
+#ifndef CONFIG_64BIT
+	ldi		32,%r10
+	mtctl		%r10,%cr11
+	.level 2.0
+	mfctl,w		%cr11,%r10
+	.level 1.1
+	comib,<>,n	0,%r10,$is_pa20
+	ldil		L%PA(fault_vector_11),%r10
+	b		$install_iva
+	ldo		R%PA(fault_vector_11)(%r10),%r10
+
+$is_pa20:
+	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
+#endif /*!CONFIG_64BIT*/
+	load32		PA(fault_vector_20),%r10
+
+$install_iva:
+	mtctl		%r10,%cr14
+
+	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
+	nop
+
+	.align 128
+aligned_rfi:
+	pcxt_ssm_bug
+
+	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
+	/* Don't need NOPs, have 8 compliant insn before rfi */
+
+	mtctl		%r0,%cr17	/* Clear IIASQ tail */
+	mtctl		%r0,%cr17	/* Clear IIASQ head */
+
+	/* Load RFI target into PC queue */
+	mtctl		%r11,%cr18	/* IIAOQ head */
+	ldo		4(%r11),%r11
+	mtctl		%r11,%cr18	/* IIAOQ tail */
+
+	load32		KERNEL_PSW,%r10
+	mtctl		%r10,%ipsw
+	
+	/* Jump through hyperspace to Virt Mode */
+	rfi
+	nop
+
+	.procend
+
+#ifdef CONFIG_SMP
+
+	.import smp_init_current_idle_task,data
+	.import	smp_callin,code
+
+#ifndef CONFIG_64BIT
+smp_callin_rtn:
+        .proc
+	.callinfo
+	break	1,1		/*  Break if returned from start_secondary */
+	nop
+	nop
+        .procend
+#endif /*!CONFIG_64BIT*/
+
+/***************************************************************************
+* smp_slave_stext is executed by all non-monarch Processors when the Monarch
+* pokes the slave CPUs in smp.c:smp_boot_cpus().
+*
+* Once here, registers values are initialized in order to branch to virtual
+* mode. Once all available/eligible CPUs are in virtual mode, all are
+* released and start out by executing their own idle task.
+*****************************************************************************/
+smp_slave_stext:
+        .proc
+	.callinfo
+
+	/*
+	** Initialize Space registers
+	*/
+	mtsp	   %r0,%sr4
+	mtsp	   %r0,%sr5
+	mtsp	   %r0,%sr6
+	mtsp	   %r0,%sr7
+
+	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
+	load32		PA(smp_init_current_idle_task),%sp
+	LDREG		0(%sp),%sp	/* load task address */
+	tophys_r1	%sp
+	LDREG		TASK_THREAD_INFO(%sp),%sp
+	mtctl           %sp,%cr30       /* store in cr30 */
+	ldo             THREAD_SZ_ALGN(%sp),%sp
+
+	/* point CPU to kernel page tables */
+	load32		PA(swapper_pg_dir),%r4
+	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
+	mtctl		%r4,%cr25	/* Initialize user root pointer */
+
+#ifdef CONFIG_64BIT
+	/* Setup PDCE_PROC entry */
+	copy            %arg0,%r3
+#else
+	/* Load RFI *return* address in case smp_callin bails */
+	load32		smp_callin_rtn,%r2
+#endif
+	
+	/* Load RFI target address.  */
+	load32		smp_callin,%r11
+	
+	/* ok...common code can handle the rest */
+	b		common_stext
+	nop
+
+	.procend
+#endif /* CONFIG_SMP */
+
+ENDPROC(parisc_kernel_start)
+
+#ifndef CONFIG_64BIT
+	.section .data..read_mostly
+
+	.align	4
+	.export	$global$,data
+
+	.type	$global$,@object
+	.size	$global$,4
+$global$:	
+	.word 0
+#endif /*!CONFIG_64BIT*/
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/hpmc.S b/src/kernel/linux/v4.14/arch/parisc/kernel/hpmc.S
new file mode 100644
index 0000000..fde6541
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/hpmc.S
@@ -0,0 +1,314 @@
+/* 
+ * HPMC (High Priority Machine Check) handler.
+ *
+ * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+/*
+ * This HPMC handler retrieves the HPMC pim data, resets IO and
+ * returns to the default trap handler with code set to 1 (HPMC).
+ * The default trap handler calls handle interruption, which
+ * does a stack and register dump. This at least allows kernel
+ * developers to get back to C code in virtual mode, where they
+ * have the option to examine and print values from memory that
+ * would help in debugging an HPMC caused by a software bug.
+ *
+ * There is more to do here:
+ *
+ *      1) On MP systems we need to synchronize processors
+ *         before calling pdc/iodc.
+ *      2) We should be checking the system state and not
+ *         returning to the fault handler if things are really
+ *         bad.
+ *
+ */
+
+	.level		1.1
+
+#include <asm/assembly.h>
+#include <asm/pdc.h>
+#include <asm/psw.h>
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	/*
+	 * stack for os_hpmc, the HPMC handler.
+	 * buffer for IODC procedures (for the HPMC handler).
+	 *
+	 * IODC requires 7K byte stack.  That leaves 1K byte for os_hpmc.
+	 */
+
+	__PAGE_ALIGNED_BSS
+	.align 4096
+hpmc_stack:
+	.block 16384
+
+#define HPMC_IODC_BUF_SIZE 0x8000
+
+	__PAGE_ALIGNED_BSS
+	.align 4096
+hpmc_iodc_buf:
+	.block HPMC_IODC_BUF_SIZE
+
+	.section .bss
+	.align 8
+hpmc_raddr:
+	.block 128
+
+#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
+
+	.section .bss
+	.align 8
+ENTRY(hpmc_pim_data)
+	.block HPMC_PIM_DATA_SIZE
+END(hpmc_pim_data)
+
+	.text
+
+	.import intr_save, code
+	.align 16
+ENTRY(os_hpmc)
+.os_hpmc:
+
+	/*
+	 * registers modified:
+	 *
+	 *   Using callee saves registers without saving them.  The
+	 *   original values are in the pim dump if we need them.
+	 *
+	 *   r2   (rp)  return pointer
+	 *   r3   address of PDCE_PROC
+	 *   r4   scratch
+	 *   r5   scratch
+	 *   r23  (arg3) procedure arg
+	 *   r24  (arg2) procedure arg
+	 *   r25  (arg1) procedure arg
+	 *   r26  (arg0) procedure arg
+	 *   r30  (sp)   stack pointer
+	 *
+	 * registers read:
+	 *
+	 *   r26  contains address of PDCE_PROC on entry
+	 *   r28  (ret0) return value from procedure
+	 */
+
+	copy    arg0, %r3       /* save address of PDCE_PROC */
+
+	/*
+	 *  disable nested HPMCs
+	 *
+	 * Increment os_hpmc checksum to invalidate it.
+	 * Do this before turning the PSW M bit off.
+	 */
+
+	mfctl   %cr14, %r4
+	ldw     52(%r4),%r5
+	addi    1,%r5,%r5
+	stw     %r5,52(%r4)
+
+	/* MP_FIXME: synchronize all processors. */
+
+	/* Setup stack pointer. */
+
+	load32	PA(hpmc_stack),sp
+	
+	ldo     128(sp),sp /* leave room for arguments */
+
+	/*
+	 * Most PDC routines require that the M bit be off.
+	 * So turn on the Q bit and turn off the M bit.
+	 */
+
+	ldi     PSW_SM_Q,%r4                   /* PSW Q on, PSW M off */
+	mtctl   %r4,ipsw
+	mtctl   %r0,pcsq
+	mtctl   %r0,pcsq
+	load32	PA(os_hpmc_1),%r4
+	mtctl   %r4,pcoq
+	ldo     4(%r4),%r4
+	mtctl   %r4,pcoq
+	rfi
+	nop
+
+os_hpmc_1:
+
+	/* Call PDC_PIM to get HPMC pim info */
+
+	/*
+	 * Note that on some newer boxes, PDC_PIM must be called
+	 * before PDC_IO if you want IO to be reset. PDC_PIM sets
+	 * a flag that PDC_IO examines.
+	 */
+
+	ldo     PDC_PIM(%r0), arg0
+	ldo     PDC_PIM_HPMC(%r0),arg1          /* Transfer HPMC data */
+	load32	PA(hpmc_raddr),arg2
+	load32	PA(hpmc_pim_data),arg3
+	load32	HPMC_PIM_DATA_SIZE,%r4
+	stw     %r4,-52(sp)
+
+	ldil    L%PA(os_hpmc_2), rp
+	bv      (r3)                            /* call pdce_proc */
+	ldo     R%PA(os_hpmc_2)(rp), rp
+
+os_hpmc_2:
+	comib,<>  0,ret0, os_hpmc_fail
+
+	/* Reset IO by calling the hversion dependent PDC_IO routine */
+
+	ldo     PDC_IO(%r0),arg0
+	ldo     0(%r0),arg1                     /* log IO errors */
+	ldo     0(%r0),arg2                     /* reserved */
+	ldo     0(%r0),arg3                     /* reserved */
+	stw     %r0,-52(sp)                     /* reserved */
+
+	ldil    L%PA(os_hpmc_3),rp
+	bv      (%r3)                           /* call pdce_proc */
+	ldo     R%PA(os_hpmc_3)(rp),rp
+
+os_hpmc_3:
+
+	/* FIXME? Check for errors from PDC_IO (-1 might be OK) */
+
+	/*
+	 * Initialize the IODC console device (HPA,SPA, path etc.
+	 * are stored on page 0.
+	 */
+
+	/*
+	 * Load IODC into hpmc_iodc_buf by calling PDC_IODC.
+	 * Note that PDC_IODC handles flushing the appropriate
+	 * data and instruction cache lines.
+	 */
+
+	ldo     PDC_IODC(%r0),arg0
+	ldo     PDC_IODC_READ(%r0),arg1
+	load32	PA(hpmc_raddr),arg2
+	ldw     BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
+	ldo     PDC_IODC_RI_INIT(%r0),%r4
+	stw     %r4,-52(sp)
+	load32	PA(hpmc_iodc_buf),%r4
+	stw     %r4,-56(sp)
+	load32	HPMC_IODC_BUF_SIZE,%r4
+	stw     %r4,-60(sp)
+
+	ldil    L%PA(os_hpmc_4),rp
+	bv      (%r3)                            /* call pdce_proc */
+	ldo     R%PA(os_hpmc_4)(rp),rp
+
+os_hpmc_4:
+	comib,<>  0,ret0,os_hpmc_fail
+
+	/* Call the entry init (just loaded by PDC_IODC) */
+
+	ldw     BOOT_CONSOLE_HPA_OFFSET(%r0),arg0  /* console hpa */
+	ldo     ENTRY_INIT_MOD_DEV(%r0), arg1
+	ldw     BOOT_CONSOLE_SPA_OFFSET(%r0),arg2  /* console spa */
+	depi    0,31,11,arg2                       /* clear bits 21-31    */
+	ldo     BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
+	load32	PA(hpmc_raddr),%r4
+	stw     %r4, -52(sp)
+	stw     %r0, -56(sp)                    /* HV                  */
+	stw     %r0, -60(sp)                    /* HV                  */
+	stw     %r0, -64(sp)                    /* HV                  */
+	stw     %r0, -68(sp)                    /* lang, must be zero  */
+
+	load32	PA(hpmc_iodc_buf),%r5
+	ldil    L%PA(os_hpmc_5),rp
+	bv      (%r5)
+	ldo     R%PA(os_hpmc_5)(rp),rp
+
+os_hpmc_5:
+	comib,<>  0,ret0,os_hpmc_fail
+
+	/* Prepare to call intr_save */
+
+	/*
+	 * Load kernel page directory (load into user also, since
+	 * we don't intend to ever return to user land anyway)
+	 */
+
+	load32		PA(swapper_pg_dir),%r4
+	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
+	mtctl		%r4,%cr25	/* Initialize user root pointer */
+
+	/* Clear sr4-sr7 */
+
+	mtsp	%r0, %sr4
+	mtsp	%r0, %sr5
+	mtsp	%r0, %sr6
+	mtsp	%r0, %sr7
+
+	tovirt_r1 %r30      /* make sp virtual */
+
+	rsm     PSW_SM_Q,%r0           /* Clear Q bit */
+	ldi     1,%r8       /* Set trap code to "1" for HPMC */
+	load32	PA(intr_save),%r1
+	be      0(%sr7,%r1)
+	nop
+
+os_hpmc_fail:
+
+	/*
+	 * Reset the system
+	 *
+	 * Some systems may lockup from a broadcast reset, so try the
+	 * hversion PDC_BROADCAST_RESET() first.
+	 * MP_FIXME: reset all processors if more than one central bus.
+	 */
+
+	/* PDC_BROADCAST_RESET() */
+
+	ldo     PDC_BROADCAST_RESET(%r0),arg0
+	ldo     0(%r0),arg1                     /* do reset */
+
+	ldil    L%PA(os_hpmc_6),rp
+	bv      (%r3)                           /* call pdce_proc */
+	ldo     R%PA(os_hpmc_6)(rp),rp
+
+os_hpmc_6:
+
+	/*
+	 * possible return values:
+	 *  -1  non-existent procedure
+	 *  -2  non-existent option
+	 *  -16 unaligned stack
+	 *
+	 * If call returned, do a broadcast reset.
+	 */
+
+	ldil    L%0xfffc0000,%r4        /* IO_BROADCAST */
+	ldo     5(%r0),%r5
+	stw     %r5,48(%r4)             /*  CMD_RESET to IO_COMMAND offset */
+
+	b .
+	nop
+	.align 16	/* make function length multiple of 16 bytes */
+.os_hpmc_end:
+
+
+	__INITRODATA
+.globl os_hpmc_size
+	.align 4
+	.type   os_hpmc_size, @object
+	.size   os_hpmc_size, 4
+os_hpmc_size:
+	.word .os_hpmc_end-.os_hpmc
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/inventory.c b/src/kernel/linux/v4.14/arch/parisc/kernel/inventory.c
new file mode 100644
index 0000000..b0fe19a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/inventory.c
@@ -0,0 +1,631 @@
+/*
+ * inventory.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
+ *
+ * These are the routines to discover what hardware exists in this box.
+ * This task is complicated by there being 3 different ways of
+ * performing an inventory, depending largely on the age of the box.
+ * The recommended way to do this is to check to see whether the machine
+ * is a `Snake' first, then try System Map, then try PAT.  We try System
+ * Map before checking for a Snake -- this probably doesn't cause any
+ * problems, but...
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mmzone.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/parisc-device.h>
+
+/*
+** Debug options
+** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
+*/
+#undef DEBUG_PAT
+
+int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
+
+/* cell number and location (PAT firmware only) */
+unsigned long parisc_cell_num __read_mostly;
+unsigned long parisc_cell_loc __read_mostly;
+
+
+void __init setup_pdc(void)
+{
+	long status;
+	unsigned int bus_id;
+	struct pdc_system_map_mod_info module_result;
+	struct pdc_module_path module_path;
+	struct pdc_model model;
+#ifdef CONFIG_64BIT
+	struct pdc_pat_cell_num cell_info;
+#endif
+
+	/* Determine the pdc "type" used on this machine */
+
+	printk(KERN_INFO "Determining PDC firmware type: ");
+
+	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
+	if (status == PDC_OK) {
+		pdc_type = PDC_TYPE_SYSTEM_MAP;
+		pr_cont("System Map.\n");
+		return;
+	}
+
+	/*
+	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
+	 * is a pdc pat box, or it is an older box. All 64 bit capable
+	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
+	 */
+
+	/*
+	 * TODO: We should test for 64 bit capability and give a
+	 * clearer message.
+	 */
+
+#ifdef CONFIG_64BIT
+	status = pdc_pat_cell_get_number(&cell_info);
+	if (status == PDC_OK) {
+		pdc_type = PDC_TYPE_PAT;
+		pr_cont("64 bit PAT.\n");
+		parisc_cell_num = cell_info.cell_num;
+		parisc_cell_loc = cell_info.cell_loc;
+		pr_info("PAT: Running on cell %lu and location %lu.\n",
+			parisc_cell_num, parisc_cell_loc);
+		return;
+	}
+#endif
+
+	/* Check the CPU's bus ID.  There's probably a better test.  */
+
+	status = pdc_model_info(&model);
+
+	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
+
+	switch (bus_id) {
+	case 0x4:		/* 720, 730, 750, 735, 755 */
+	case 0x6:		/* 705, 710 */
+	case 0x7:		/* 715, 725 */
+	case 0x8:		/* 745, 747, 742 */
+	case 0xA:		/* 712 and similar */
+	case 0xC:		/* 715/64, at least */
+
+		pdc_type = PDC_TYPE_SNAKE;
+		pr_cont("Snake.\n");
+		return;
+
+	default:		/* Everything else */
+
+		pr_cont("Unsupported.\n");
+		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
+	}
+}
+
+#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
+
+static void __init
+set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
+	       unsigned long pages4k)
+{
+	/* Rather than aligning and potentially throwing away
+	 * memory, we'll assume that any ranges are already
+	 * nicely aligned with any reasonable page size, and
+	 * panic if they are not (it's more likely that the
+	 * pdc info is bad in this case).
+	 */
+
+	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
+	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
+
+		panic("Memory range doesn't align with page size!\n");
+	}
+
+	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
+	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
+}
+
+static void __init pagezero_memconfig(void)
+{
+	unsigned long npages;
+
+	/* Use the 32 bit information from page zero to create a single
+	 * entry in the pmem_ranges[] table.
+	 *
+	 * We currently don't support machines with contiguous memory
+	 * >= 4 Gb, who report that memory using 64 bit only fields
+	 * on page zero. It's not worth doing until it can be tested,
+	 * and it is not clear we can support those machines for other
+	 * reasons.
+	 *
+	 * If that support is done in the future, this is where it
+	 * should be done.
+	 */
+
+	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
+	set_pmem_entry(pmem_ranges,0UL,npages);
+	npmem_ranges = 1;
+}
+
+#ifdef CONFIG_64BIT
+
+/* All of the PDC PAT specific code is 64-bit only */
+
+/*
+**  The module object is filled via PDC_PAT_CELL[Return Cell Module].
+**  If a module is found, register module will get the IODC bytes via
+**  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
+**
+**  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
+**  only for SBAs and LBAs.  This view will cause an invalid
+**  argument error for all other cell module types.
+**
+*/
+
+static int __init 
+pat_query_module(ulong pcell_loc, ulong mod_index)
+{
+	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
+	unsigned long bytecnt;
+	unsigned long temp;	/* 64-bit scratch value */
+	long status;		/* PDC return value status */
+	struct parisc_device *dev;
+
+	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+	if (!pa_pdc_cell)
+		panic("couldn't allocate memory for PDC_PAT_CELL!");
+
+	/* return cell module (PA or Processor view) */
+	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+				     PA_VIEW, pa_pdc_cell);
+
+	if (status != PDC_OK) {
+		/* no more cell modules or error */
+		kfree(pa_pdc_cell);
+		return status;
+	}
+
+	temp = pa_pdc_cell->cba;
+	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
+	if (!dev) {
+		kfree(pa_pdc_cell);
+		return PDC_OK;
+	}
+
+	/* alloc_pa_dev sets dev->hpa */
+
+	/*
+	** save parameters in the parisc_device
+	** (The idea being the device driver will call pdc_pat_cell_module()
+	** and store the results in its own data structure.)
+	*/
+	dev->pcell_loc = pcell_loc;
+	dev->mod_index = mod_index;
+
+	/* save generic info returned from the call */
+	/* REVISIT: who is the consumer of this? not sure yet... */
+	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
+	dev->pmod_loc = pa_pdc_cell->mod_location;
+	dev->mod0 = pa_pdc_cell->mod[0];
+
+	register_parisc_device(dev);	/* advertise device */
+
+#ifdef DEBUG_PAT
+	/* dump what we see so far... */
+	switch (PAT_GET_ENTITY(dev->mod_info)) {
+		pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
+		unsigned long i;
+
+	case PAT_ENTITY_PROC:
+		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
+			pa_pdc_cell->mod[0]);
+		break;
+
+	case PAT_ENTITY_MEM:
+		printk(KERN_DEBUG 
+			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
+			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
+			pa_pdc_cell->mod[2]);
+		break;
+	case PAT_ENTITY_CA:
+		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
+		break;
+
+	case PAT_ENTITY_PBC:
+		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
+		goto print_ranges;
+
+	case PAT_ENTITY_SBA:
+		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
+		goto print_ranges;
+
+	case PAT_ENTITY_LBA:
+		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
+
+ print_ranges:
+		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+				    IO_VIEW, &io_pdc_cell);
+		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
+		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
+			printk(KERN_DEBUG 
+				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
+				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
+				pa_pdc_cell->mod[3 + i * 3],	/* start */
+				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
+			printk(KERN_DEBUG 
+				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
+				i, io_pdc_cell.mod[2 + i * 3],	/* type */
+				io_pdc_cell.mod[3 + i * 3],	/* start */
+				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
+		}
+		printk(KERN_DEBUG "\n");
+		break;
+	}
+#endif /* DEBUG_PAT */
+
+	kfree(pa_pdc_cell);
+
+	return PDC_OK;
+}
+
+
+/* pat pdc can return information about a variety of different
+ * types of memory (e.g. firmware,i/o, etc) but we only care about
+ * the usable physical ram right now. Since the firmware specific
+ * information is allocated on the stack, we'll be generous, in
+ * case there is a lot of other information we don't care about.
+ */
+
+#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
+
+static void __init pat_memconfig(void)
+{
+	unsigned long actual_len;
+	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
+	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
+	physmem_range_t *pmem_ptr;
+	long status;
+	int entries;
+	unsigned long length;
+	int i;
+
+	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
+
+	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
+
+	if ((status != PDC_OK)
+	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
+
+		/* The above pdc call shouldn't fail, but, just in
+		 * case, just use the PAGE0 info.
+		 */
+
+		printk("\n\n\n");
+		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
+			"All memory may not be used!\n\n\n");
+		pagezero_memconfig();
+		return;
+	}
+
+	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
+
+	if (entries > PAT_MAX_RANGES) {
+		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+		printk(KERN_WARNING "Some memory may not be used!\n");
+	}
+
+	/* Copy information into the firmware independent pmem_ranges
+	 * array, skipping types we don't care about. Notice we said
+	 * "may" above. We'll use all the entries that were returned.
+	 */
+
+	npmem_ranges = 0;
+	mtbl_ptr = mem_table;
+	pmem_ptr = pmem_ranges; /* Global firmware independent table */
+	for (i = 0; i < entries; i++,mtbl_ptr++) {
+		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
+		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
+		    || (mtbl_ptr->pages == 0)
+		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
+			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
+			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
+
+			continue;
+		}
+
+		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
+			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+			printk(KERN_WARNING "Some memory will not be used!\n");
+			break;
+		}
+
+		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+		npmem_ranges++;
+	}
+}
+
+static int __init pat_inventory(void)
+{
+	int status;
+	ulong mod_index = 0;
+	struct pdc_pat_cell_num cell_info;
+
+	/*
+	** Note:  Prelude (and it's successors: Lclass, A400/500) only
+	**        implement PDC_PAT_CELL sub-options 0 and 2.
+	*/
+	status = pdc_pat_cell_get_number(&cell_info);
+	if (status != PDC_OK) {
+		return 0;
+	}
+
+#ifdef DEBUG_PAT
+	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, 
+	       cell_info.cell_loc);
+#endif
+
+	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
+		mod_index++;
+	}
+
+	return mod_index;
+}
+
+/* We only look for extended memory ranges on a 64 bit capable box */
+static void __init sprockets_memconfig(void)
+{
+	struct pdc_memory_table_raddr r_addr;
+	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
+	struct pdc_memory_table *mtbl_ptr;
+	physmem_range_t *pmem_ptr;
+	long status;
+	int entries;
+	int i;
+
+	status = pdc_mem_mem_table(&r_addr,mem_table,
+				(unsigned long)MAX_PHYSMEM_RANGES);
+
+	if (status != PDC_OK) {
+
+		/* The above pdc call only works on boxes with sprockets
+		 * firmware (newer B,C,J class). Other non PAT PDC machines
+		 * do support more than 3.75 Gb of memory, but we don't
+		 * support them yet.
+		 */
+
+		pagezero_memconfig();
+		return;
+	}
+
+	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
+		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+		printk(KERN_WARNING "Some memory will not be used!\n");
+	}
+
+	entries = (int)r_addr.entries_returned;
+
+	npmem_ranges = 0;
+	mtbl_ptr = mem_table;
+	pmem_ptr = pmem_ranges; /* Global firmware independent table */
+	for (i = 0; i < entries; i++,mtbl_ptr++) {
+		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+		npmem_ranges++;
+	}
+}
+
+#else   /* !CONFIG_64BIT */
+
+#define pat_inventory() do { } while (0)
+#define pat_memconfig() do { } while (0)
+#define sprockets_memconfig() pagezero_memconfig()
+
+#endif	/* !CONFIG_64BIT */
+
+
+#ifndef CONFIG_PA20
+
+/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
+
+static struct parisc_device * __init
+legacy_create_device(struct pdc_memory_map *r_addr,
+		struct pdc_module_path *module_path)
+{
+	struct parisc_device *dev;
+	int status = pdc_mem_map_hpa(r_addr, module_path);
+	if (status != PDC_OK)
+		return NULL;
+
+	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
+	if (dev == NULL)
+		return NULL;
+
+	register_parisc_device(dev);
+	return dev;
+}
+
+/**
+ * snake_inventory
+ *
+ * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
+ * To use it, we initialise the mod_path.bc to 0xff and try all values of
+ * mod to get the HPA for the top-level devices.  Bus adapters may have
+ * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
+ * module, then trying all possible functions.
+ */
+static void __init snake_inventory(void)
+{
+	int mod;
+	for (mod = 0; mod < 16; mod++) {
+		struct parisc_device *dev;
+		struct pdc_module_path module_path;
+		struct pdc_memory_map r_addr;
+		unsigned int func;
+
+		memset(module_path.path.bc, 0xff, 6);
+		module_path.path.mod = mod;
+		dev = legacy_create_device(&r_addr, &module_path);
+		if ((!dev) || (dev->id.hw_type != HPHW_BA))
+			continue;
+
+		memset(module_path.path.bc, 0xff, 4);
+		module_path.path.bc[4] = mod;
+
+		for (func = 0; func < 16; func++) {
+			module_path.path.bc[5] = 0;
+			module_path.path.mod = func;
+			legacy_create_device(&r_addr, &module_path);
+		}
+	}
+}
+
+#else /* CONFIG_PA20 */
+#define snake_inventory() do { } while (0)
+#endif  /* CONFIG_PA20 */
+
+/* Common 32/64 bit based code goes here */
+
+/**
+ * add_system_map_addresses - Add additional addresses to the parisc device.
+ * @dev: The parisc device.
+ * @num_addrs: Then number of addresses to add;
+ * @module_instance: The system_map module instance.
+ *
+ * This function adds any additional addresses reported by the system_map
+ * firmware to the parisc device.
+ */
+static void __init
+add_system_map_addresses(struct parisc_device *dev, int num_addrs, 
+			 int module_instance)
+{
+	int i;
+	long status;
+	struct pdc_system_map_addr_info addr_result;
+
+	dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
+	if(!dev->addr) {
+		printk(KERN_ERR "%s %s(): memory allocation failure\n",
+		       __FILE__, __func__);
+		return;
+	}
+
+	for(i = 1; i <= num_addrs; ++i) {
+		status = pdc_system_map_find_addrs(&addr_result, 
+						   module_instance, i);
+		if(PDC_OK == status) {
+			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
+			dev->num_addrs++;
+		} else {
+			printk(KERN_WARNING 
+			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
+			       status, i);
+		}
+	}
+}
+
+/**
+ * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
+ *
+ * This function attempts to retrieve and register all the devices firmware
+ * knows about via the SYSTEM_MAP PDC call.
+ */
+static void __init system_map_inventory(void)
+{
+	int i;
+	long status = PDC_OK;
+    
+	for (i = 0; i < 256; i++) {
+		struct parisc_device *dev;
+		struct pdc_system_map_mod_info module_result;
+		struct pdc_module_path module_path;
+
+		status = pdc_system_map_find_mods(&module_result,
+				&module_path, i);
+		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
+			break;
+		if (status != PDC_OK)
+			continue;
+
+		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
+		if (!dev)
+			continue;
+		
+		register_parisc_device(dev);
+
+		/* if available, get the additional addresses for a module */
+		if (!module_result.add_addrs)
+			continue;
+
+		add_system_map_addresses(dev, module_result.add_addrs, i);
+	}
+
+	walk_central_bus();
+	return;
+}
+
+void __init do_memory_inventory(void)
+{
+	switch (pdc_type) {
+
+	case PDC_TYPE_PAT:
+		pat_memconfig();
+		break;
+
+	case PDC_TYPE_SYSTEM_MAP:
+		sprockets_memconfig();
+		break;
+
+	case PDC_TYPE_SNAKE:
+		pagezero_memconfig();
+		return;
+
+	default:
+		panic("Unknown PDC type!\n");
+	}
+
+	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
+		printk(KERN_WARNING "Bad memory configuration returned!\n");
+		printk(KERN_WARNING "Some memory may not be used!\n");
+		pagezero_memconfig();
+	}
+}
+
+void __init do_device_inventory(void)
+{
+	printk(KERN_INFO "Searching for devices...\n");
+
+	init_parisc_bus();
+
+	switch (pdc_type) {
+
+	case PDC_TYPE_PAT:
+		pat_inventory();
+		break;
+
+	case PDC_TYPE_SYSTEM_MAP:
+		system_map_inventory();
+		break;
+
+	case PDC_TYPE_SNAKE:
+		snake_inventory();
+		break;
+
+	default:
+		panic("Unknown PDC type!\n");
+	}
+	printk(KERN_INFO "Found devices:\n");
+	print_parisc_devices();
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/irq.c b/src/kernel/linux/v4.14/arch/parisc/kernel/irq.c
new file mode 100644
index 0000000..0ca2540
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/irq.c
@@ -0,0 +1,611 @@
+/* 
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
+ * Copyright (C) 1999-2000 Grant Grundler
+ * Copyright (c) 2005 Matthew Wilcox
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <asm/smp.h>
+#include <asm/ldcw.h>
+
+#undef PARISC_IRQ_CR16_COUNTS
+
+extern irqreturn_t timer_interrupt(int, void *);
+extern irqreturn_t ipi_interrupt(int, void *);
+
+#define EIEM_MASK(irq)       (1UL<<(CPU_IRQ_MAX - irq))
+
+/* Bits in EIEM correlate with cpu_irq_action[].
+** Numbered *Big Endian*! (ie bit 0 is MSB)
+*/
+static volatile unsigned long cpu_eiem = 0;
+
+/*
+** local ACK bitmap ... habitually set to 1, but reset to zero
+** between ->ack() and ->end() of the interrupt to prevent
+** re-interruption of a processing interrupt.
+*/
+static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
+
+static void cpu_mask_irq(struct irq_data *d)
+{
+	unsigned long eirr_bit = EIEM_MASK(d->irq);
+
+	cpu_eiem &= ~eirr_bit;
+	/* Do nothing on the other CPUs.  If they get this interrupt,
+	 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
+	 * handle it, and the set_eiem() at the bottom will ensure it
+	 * then gets disabled */
+}
+
+static void __cpu_unmask_irq(unsigned int irq)
+{
+	unsigned long eirr_bit = EIEM_MASK(irq);
+
+	cpu_eiem |= eirr_bit;
+
+	/* This is just a simple NOP IPI.  But what it does is cause
+	 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
+	 * of the interrupt handler */
+	smp_send_all_nop();
+}
+
+static void cpu_unmask_irq(struct irq_data *d)
+{
+	__cpu_unmask_irq(d->irq);
+}
+
+void cpu_ack_irq(struct irq_data *d)
+{
+	unsigned long mask = EIEM_MASK(d->irq);
+	int cpu = smp_processor_id();
+
+	/* Clear in EIEM so we can no longer process */
+	per_cpu(local_ack_eiem, cpu) &= ~mask;
+
+	/* disable the interrupt */
+	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+
+	/* and now ack it */
+	mtctl(mask, 23);
+}
+
+void cpu_eoi_irq(struct irq_data *d)
+{
+	unsigned long mask = EIEM_MASK(d->irq);
+	int cpu = smp_processor_id();
+
+	/* set it in the eiems---it's no longer in process */
+	per_cpu(local_ack_eiem, cpu) |= mask;
+
+	/* enable the interrupt */
+	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+}
+
+#ifdef CONFIG_SMP
+int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
+{
+	int cpu_dest;
+
+	/* timer and ipi have to always be received on all CPUs */
+	if (irqd_is_per_cpu(d))
+		return -EINVAL;
+
+	/* whatever mask they set, we just allow one CPU */
+	cpu_dest = cpumask_first_and(dest, cpu_online_mask);
+
+	return cpu_dest;
+}
+
+static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+				bool force)
+{
+	int cpu_dest;
+
+	cpu_dest = cpu_check_affinity(d, dest);
+	if (cpu_dest < 0)
+		return -1;
+
+	cpumask_copy(irq_data_get_affinity_mask(d), dest);
+
+	return 0;
+}
+#endif
+
+static struct irq_chip cpu_interrupt_type = {
+	.name			= "CPU",
+	.irq_mask		= cpu_mask_irq,
+	.irq_unmask		= cpu_unmask_irq,
+	.irq_ack		= cpu_ack_irq,
+	.irq_eoi		= cpu_eoi_irq,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= cpu_set_affinity_irq,
+#endif
+	/* XXX: Needs to be written.  We managed without it so far, but
+	 * we really ought to write it.
+	 */
+	.irq_retrigger	= NULL,
+};
+
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+#define irq_stats(x)		(&per_cpu(irq_stat, x))
+
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	int j;
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	seq_printf(p, "%*s: ", prec, "STK");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
+	seq_puts(p, "  Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+	seq_printf(p, "%*s: ", prec, "IST");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+	seq_puts(p, "  Interrupt stack usage\n");
+# endif
+#endif
+#ifdef CONFIG_SMP
+	seq_printf(p, "%*s: ", prec, "RES");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+	seq_puts(p, "  Rescheduling interrupts\n");
+#endif
+	seq_printf(p, "%*s: ", prec, "UAH");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+	seq_puts(p, "  Unaligned access handler traps\n");
+	seq_printf(p, "%*s: ", prec, "FPA");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+	seq_puts(p, "  Floating point assist traps\n");
+	seq_printf(p, "%*s: ", prec, "TLB");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
+	seq_puts(p, "  TLB shootdowns\n");
+	return 0;
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	int i = *(loff_t *) v, j;
+	unsigned long flags;
+
+	if (i == 0) {
+		seq_puts(p, "    ");
+		for_each_online_cpu(j)
+			seq_printf(p, "       CPU%d", j);
+
+#ifdef PARISC_IRQ_CR16_COUNTS
+		seq_printf(p, " [min/avg/max] (CPU cycle counts)");
+#endif
+		seq_putc(p, '\n');
+	}
+
+	if (i < NR_IRQS) {
+		struct irq_desc *desc = irq_to_desc(i);
+		struct irqaction *action;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
+		if (!action)
+			goto skip;
+		seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+#else
+		seq_printf(p, "%10u ", kstat_irqs(i));
+#endif
+
+		seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
+#ifndef PARISC_IRQ_CR16_COUNTS
+		seq_printf(p, "  %s", action->name);
+
+		while ((action = action->next))
+			seq_printf(p, ", %s", action->name);
+#else
+		for ( ;action; action = action->next) {
+			unsigned int k, avg, min, max;
+
+			min = max = action->cr16_hist[0];
+
+			for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
+				int hist = action->cr16_hist[k];
+
+				if (hist) {
+					avg += hist;
+				} else
+					break;
+
+				if (hist > max) max = hist;
+				if (hist < min) min = hist;
+			}
+
+			avg /= k;
+			seq_printf(p, " %s[%d/%d/%d]", action->name,
+					min,avg,max);
+		}
+#endif
+
+		seq_putc(p, '\n');
+ skip:
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
+	}
+
+	if (i == NR_IRQS)
+		arch_show_interrupts(p, 3);
+
+	return 0;
+}
+
+
+
+/*
+** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
+** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
+**
+** To use txn_XXX() interfaces, get a Virtual IRQ first.
+** Then use that to get the Transaction address and data.
+*/
+
+int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
+{
+	if (irq_has_action(irq))
+		return -EBUSY;
+	if (irq_get_chip(irq) != &cpu_interrupt_type)
+		return -EBUSY;
+
+	/* for iosapic interrupts */
+	if (type) {
+		irq_set_chip_and_handler(irq, type, handle_percpu_irq);
+		irq_set_chip_data(irq, data);
+		__cpu_unmask_irq(irq);
+	}
+	return 0;
+}
+
+int txn_claim_irq(int irq)
+{
+	return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
+}
+
+/*
+ * The bits_wide parameter accommodates the limitations of the HW/SW which
+ * use these bits:
+ * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
+ * V-class (EPIC):          6 bits
+ * N/L/A-class (iosapic):   8 bits
+ * PCI 2.2 MSI:            16 bits
+ * Some PCI devices:       32 bits (Symbios SCSI/ATM/HyperFabric)
+ *
+ * On the service provider side:
+ * o PA 1.1 (and PA2.0 narrow mode)     5-bits (width of EIR register)
+ * o PA 2.0 wide mode                   6-bits (per processor)
+ * o IA64                               8-bits (0-256 total)
+ *
+ * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
+ * by the processor...and the N/L-class I/O subsystem supports more bits than
+ * PA2.0 has. The first case is the problem.
+ */
+int txn_alloc_irq(unsigned int bits_wide)
+{
+	int irq;
+
+	/* never return irq 0 cause that's the interval timer */
+	for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
+		if (cpu_claim_irq(irq, NULL, NULL) < 0)
+			continue;
+		if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
+			continue;
+		return irq;
+	}
+
+	/* unlikely, but be prepared */
+	return -1;
+}
+
+
+unsigned long txn_affinity_addr(unsigned int irq, int cpu)
+{
+#ifdef CONFIG_SMP
+	struct irq_data *d = irq_get_irq_data(irq);
+	cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
+#endif
+
+	return per_cpu(cpu_data, cpu).txn_addr;
+}
+
+
+unsigned long txn_alloc_addr(unsigned int virt_irq)
+{
+	static int next_cpu = -1;
+
+	next_cpu++; /* assign to "next" CPU we want this bugger on */
+
+	/* validate entry */
+	while ((next_cpu < nr_cpu_ids) &&
+		(!per_cpu(cpu_data, next_cpu).txn_addr ||
+		 !cpu_online(next_cpu)))
+		next_cpu++;
+
+	if (next_cpu >= nr_cpu_ids) 
+		next_cpu = 0;	/* nothing else, assign monarch */
+
+	return txn_affinity_addr(virt_irq, next_cpu);
+}
+
+
+unsigned int txn_alloc_data(unsigned int virt_irq)
+{
+	return virt_irq - CPU_IRQ_BASE;
+}
+
+static inline int eirr_to_irq(unsigned long eirr)
+{
+	int bit = fls_long(eirr);
+	return (BITS_PER_LONG - bit) + TIMER_IRQ;
+}
+
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE      (4096 << 3) /* 32k irq stack size */
+
+union irq_stack_union {
+	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+	volatile unsigned int slock[4];
+	volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+		.slock = { 1,1,1,1 },
+	};
+#endif
+
+
+int sysctl_panic_on_stackoverflow = 1;
+
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	#define STACK_MARGIN	(256*6)
+
+	/* Our stack starts directly behind the thread_info struct. */
+	unsigned long stack_start = (unsigned long) current_thread_info();
+	unsigned long sp = regs->gr[30];
+	unsigned long stack_usage;
+	unsigned int *last_usage;
+	int cpu = smp_processor_id();
+
+	/* if sr7 != 0, we interrupted a userspace process which we do not want
+	 * to check for stack overflow. We will only check the kernel stack. */
+	if (regs->sr[7])
+		return;
+
+	/* exit if already in panic */
+	if (sysctl_panic_on_stackoverflow < 0)
+		return;
+
+	/* calculate kernel stack usage */
+	stack_usage = sp - stack_start;
+#ifdef CONFIG_IRQSTACKS
+	if (likely(stack_usage <= THREAD_SIZE))
+		goto check_kernel_stack; /* found kernel stack */
+
+	/* check irq stack usage */
+	stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+	stack_usage = sp - stack_start;
+
+	last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+	if (unlikely(stack_usage > *last_usage))
+		*last_usage = stack_usage;
+
+	if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+		return;
+
+	pr_emerg("stackcheck: %s will most likely overflow irq stack "
+		 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+		current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+	goto panic_check;
+
+check_kernel_stack:
+#endif
+
+	/* check kernel stack usage */
+	last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
+
+	if (unlikely(stack_usage > *last_usage))
+		*last_usage = stack_usage;
+
+	if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
+		return;
+
+	pr_emerg("stackcheck: %s will most likely overflow kernel stack "
+		 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+		current->comm, sp, stack_start, stack_start + THREAD_SIZE);
+
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
+	if (sysctl_panic_on_stackoverflow) {
+		sysctl_panic_on_stackoverflow = -1; /* disable further checks */
+		panic("low stack detected by irq handler - check messages\n");
+	}
+#endif
+}
+
+#ifdef CONFIG_IRQSTACKS
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
+
+static void execute_on_irq_stack(void *func, unsigned long param1)
+{
+	union irq_stack_union *union_ptr;
+	unsigned long irq_stack;
+	volatile unsigned int *irq_stack_in_use;
+
+	union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+	irq_stack = (unsigned long) &union_ptr->stack;
+	irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+			 64); /* align for stack frame usage */
+
+	/* We may be called recursive. If we are already using the irq stack,
+	 * just continue to use it. Use spinlocks to serialize
+	 * the irq stack usage.
+	 */
+	irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+	if (!__ldcw(irq_stack_in_use)) {
+		void (*direct_call)(unsigned long p1) = func;
+
+		/* We are using the IRQ stack already.
+		 * Do direct call on current stack. */
+		direct_call(param1);
+		return;
+	}
+
+	/* This is where we switch to the IRQ stack. */
+	call_on_stack(param1, func, irq_stack);
+
+	/* free up irq stack usage. */
+	*irq_stack_in_use = 1;
+}
+
+void do_softirq_own_stack(void)
+{
+	execute_on_irq_stack(__do_softirq, 0);
+}
+#endif /* CONFIG_IRQSTACKS */
+
+/* ONLY called from entry.S:intr_extint() */
+void do_cpu_irq_mask(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+	unsigned long eirr_val;
+	int irq, cpu = smp_processor_id();
+	struct irq_data *irq_data;
+#ifdef CONFIG_SMP
+	cpumask_t dest;
+#endif
+
+	old_regs = set_irq_regs(regs);
+	local_irq_disable();
+	irq_enter();
+
+	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
+	if (!eirr_val)
+		goto set_out;
+	irq = eirr_to_irq(eirr_val);
+
+	irq_data = irq_get_irq_data(irq);
+
+	/* Filter out spurious interrupts, mostly from serial port at bootup */
+	if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
+		goto set_out;
+
+#ifdef CONFIG_SMP
+	cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
+	if (irqd_is_per_cpu(irq_data) &&
+	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
+		int cpu = cpumask_first(&dest);
+
+		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
+		       irq, smp_processor_id(), cpu);
+		gsc_writel(irq + CPU_IRQ_BASE,
+			   per_cpu(cpu_data, cpu).hpa);
+		goto set_out;
+	}
+#endif
+	stack_overflow_check(regs);
+
+#ifdef CONFIG_IRQSTACKS
+	execute_on_irq_stack(&generic_handle_irq, irq);
+#else
+	generic_handle_irq(irq);
+#endif /* CONFIG_IRQSTACKS */
+
+ out:
+	irq_exit();
+	set_irq_regs(old_regs);
+	return;
+
+ set_out:
+	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
+	goto out;
+}
+
+static struct irqaction timer_action = {
+	.handler = timer_interrupt,
+	.name = "timer",
+	.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
+};
+
+#ifdef CONFIG_SMP
+static struct irqaction ipi_action = {
+	.handler = ipi_interrupt,
+	.name = "IPI",
+	.flags = IRQF_PERCPU,
+};
+#endif
+
+static void claim_cpu_irqs(void)
+{
+	int i;
+	for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
+		irq_set_chip_and_handler(i, &cpu_interrupt_type,
+					 handle_percpu_irq);
+	}
+
+	irq_set_handler(TIMER_IRQ, handle_percpu_irq);
+	setup_irq(TIMER_IRQ, &timer_action);
+#ifdef CONFIG_SMP
+	irq_set_handler(IPI_IRQ, handle_percpu_irq);
+	setup_irq(IPI_IRQ, &ipi_action);
+#endif
+}
+
+void __init init_IRQ(void)
+{
+	local_irq_disable();	/* PARANOID - should already be disabled */
+	mtctl(~0UL, 23);	/* EIRR : clear all pending external intr */
+#ifdef CONFIG_SMP
+	if (!cpu_eiem) {
+		claim_cpu_irqs();
+		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+	}
+#else
+	claim_cpu_irqs();
+	cpu_eiem = EIEM_MASK(TIMER_IRQ);
+#endif
+        set_eiem(cpu_eiem);	/* EIEM : enable all external intr */
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/module.c b/src/kernel/linux/v4.14/arch/parisc/kernel/module.c
new file mode 100644
index 0000000..f1a7693
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/module.c
@@ -0,0 +1,956 @@
+/*    Kernel dynamically loadable module help for PARISC.
+ *
+ *    The best reference for this stuff is probably the Processor-
+ *    Specific ELF Supplement for PA-RISC:
+ *        http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
+ *
+ *    Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *    Copyright (C) 2003 Randolph Chung <tausq at debian . org>
+ *    Copyright (C) 2008 Helge Deller <deller@gmx.de>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *
+ *    Notes:
+ *    - PLT stub handling
+ *      On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
+ *      ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
+ *      fail to reach their PLT stub if we only create one big stub array for
+ *      all sections at the beginning of the core or init section.
+ *      Instead we now insert individual PLT stub entries directly in front of
+ *      of the code sections where the stubs are actually called.
+ *      This reduces the distance between the PCREL location and the stub entry
+ *      so that the relocations can be fulfilled.
+ *      While calculating the final layout of the kernel module in memory, the
+ *      kernel module loader calls arch_mod_section_prepend() to request the
+ *      to be reserved amount of memory in front of each individual section.
+ *
+ *    - SEGREL32 handling
+ *      We are not doing SEGREL32 handling correctly. According to the ABI, we
+ *      should do a value offset, like this:
+ *			if (in_init(me, (void *)val))
+ *				val -= (uint32_t)me->init_layout.base;
+ *			else
+ *				val -= (uint32_t)me->core_layout.base;
+ *	However, SEGREL32 is used only for PARISC unwind entries, and we want
+ *	those entries to have an absolute address, and not just an offset.
+ *
+ *	The unwind table mechanism has the ability to specify an offset for 
+ *	the unwind table; however, because we split off the init functions into
+ *	a different piece of memory, it is not possible to do this using a 
+ *	single offset. Instead, we use the above hack for now.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <asm/unwind.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+#define RELOC_REACHABLE(val, bits) \
+	(( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 )  ||	\
+	     ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
+	0 : 1)
+
+#define CHECK_RELOC(val, bits) \
+	if (!RELOC_REACHABLE(val, bits)) { \
+		printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
+		me->name, strtab + sym->st_name, (unsigned long)val, bits); \
+		return -ENOEXEC;			\
+	}
+
+/* Maximum number of GOT entries. We use a long displacement ldd from
+ * the bottom of the table, which has a maximum signed displacement of
+ * 0x3fff; however, since we're only going forward, this becomes
+ * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
+ * at most 1023 entries.
+ * To overcome this 14bit displacement with some kernel modules, we'll
+ * use instead the unusal 16bit displacement method (see reassemble_16a)
+ * which gives us a maximum positive displacement of 0x7fff, and as such
+ * allows us to allocate up to 4095 GOT entries. */
+#define MAX_GOTS	4095
+
+/* three functions to determine where in the module core
+ * or init pieces the location is */
+static inline int in_init(struct module *me, void *loc)
+{
+	return (loc >= me->init_layout.base &&
+		loc <= (me->init_layout.base + me->init_layout.size));
+}
+
+static inline int in_core(struct module *me, void *loc)
+{
+	return (loc >= me->core_layout.base &&
+		loc <= (me->core_layout.base + me->core_layout.size));
+}
+
+static inline int in_local(struct module *me, void *loc)
+{
+	return in_init(me, loc) || in_core(me, loc);
+}
+
+#ifndef CONFIG_64BIT
+struct got_entry {
+	Elf32_Addr addr;
+};
+
+struct stub_entry {
+	Elf32_Word insns[2]; /* each stub entry has two insns */
+};
+#else
+struct got_entry {
+	Elf64_Addr addr;
+};
+
+struct stub_entry {
+	Elf64_Word insns[4]; /* each stub entry has four insns */
+};
+#endif
+
+/* Field selection types defined by hppa */
+#define rnd(x)			(((x)+0x1000)&~0x1fff)
+/* fsel: full 32 bits */
+#define fsel(v,a)		((v)+(a))
+/* lsel: select left 21 bits */
+#define lsel(v,a)		(((v)+(a))>>11)
+/* rsel: select right 11 bits */
+#define rsel(v,a)		(((v)+(a))&0x7ff)
+/* lrsel with rounding of addend to nearest 8k */
+#define lrsel(v,a)		(((v)+rnd(a))>>11)
+/* rrsel with rounding of addend to nearest 8k */
+#define rrsel(v,a)		((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
+
+#define mask(x,sz)		((x) & ~((1<<(sz))-1))
+
+
+/* The reassemble_* functions prepare an immediate value for
+   insertion into an opcode. pa-risc uses all sorts of weird bitfields
+   in the instruction to hold the value.  */
+static inline int sign_unext(int x, int len)
+{
+	int len_ones;
+
+	len_ones = (1 << len) - 1;
+	return x & len_ones;
+}
+
+static inline int low_sign_unext(int x, int len)
+{
+	int sign, temp;
+
+	sign = (x >> (len-1)) & 1;
+	temp = sign_unext(x, len-1);
+	return (temp << 1) | sign;
+}
+
+static inline int reassemble_14(int as14)
+{
+	return (((as14 & 0x1fff) << 1) |
+		((as14 & 0x2000) >> 13));
+}
+
+static inline int reassemble_16a(int as16)
+{
+	int s, t;
+
+	/* Unusual 16-bit encoding, for wide mode only.  */
+	t = (as16 << 1) & 0xffff;
+	s = (as16 & 0x8000);
+	return (t ^ s ^ (s >> 1)) | (s >> 15);
+}
+
+
+static inline int reassemble_17(int as17)
+{
+	return (((as17 & 0x10000) >> 16) |
+		((as17 & 0x0f800) << 5) |
+		((as17 & 0x00400) >> 8) |
+		((as17 & 0x003ff) << 3));
+}
+
+static inline int reassemble_21(int as21)
+{
+	return (((as21 & 0x100000) >> 20) |
+		((as21 & 0x0ffe00) >> 8) |
+		((as21 & 0x000180) << 7) |
+		((as21 & 0x00007c) << 14) |
+		((as21 & 0x000003) << 12));
+}
+
+static inline int reassemble_22(int as22)
+{
+	return (((as22 & 0x200000) >> 21) |
+		((as22 & 0x1f0000) << 5) |
+		((as22 & 0x00f800) << 5) |
+		((as22 & 0x000400) >> 8) |
+		((as22 & 0x0003ff) << 3));
+}
+
+void *module_alloc(unsigned long size)
+{
+	/* using RWX means less protection for modules, but it's
+	 * easier than trying to map the text, data, init_text and
+	 * init_data correctly */
+	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+				    GFP_KERNEL,
+				    PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
+				    __builtin_return_address(0));
+}
+
+#ifndef CONFIG_64BIT
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+	return 0;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+	return 0;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+	unsigned long cnt = 0;
+
+	for (; n > 0; n--, rela++)
+	{
+		switch (ELF32_R_TYPE(rela->r_info)) {
+			case R_PARISC_PCREL17F:
+			case R_PARISC_PCREL22F:
+				cnt++;
+		}
+	}
+
+	return cnt;
+}
+#else
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+	unsigned long cnt = 0;
+
+	for (; n > 0; n--, rela++)
+	{
+		switch (ELF64_R_TYPE(rela->r_info)) {
+			case R_PARISC_LTOFF21L:
+			case R_PARISC_LTOFF14R:
+			case R_PARISC_PCREL22F:
+				cnt++;
+		}
+	}
+
+	return cnt;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+	unsigned long cnt = 0;
+
+	for (; n > 0; n--, rela++)
+	{
+		switch (ELF64_R_TYPE(rela->r_info)) {
+			case R_PARISC_FPTR64:
+				cnt++;
+		}
+	}
+
+	return cnt;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+	unsigned long cnt = 0;
+
+	for (; n > 0; n--, rela++)
+	{
+		switch (ELF64_R_TYPE(rela->r_info)) {
+			case R_PARISC_PCREL22F:
+				cnt++;
+		}
+	}
+
+	return cnt;
+}
+#endif
+
+void module_arch_freeing_init(struct module *mod)
+{
+	kfree(mod->arch.section);
+	mod->arch.section = NULL;
+}
+
+/* Additional bytes needed in front of individual sections */
+unsigned int arch_mod_section_prepend(struct module *mod,
+				      unsigned int section)
+{
+	/* size needed for all stubs of this section (including
+	 * one additional for correct alignment of the stubs) */
+	return (mod->arch.section[section].stub_entries + 1)
+		* sizeof(struct stub_entry);
+}
+
+#define CONST 
+int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+			      CONST Elf_Shdr *sechdrs,
+			      CONST char *secstrings,
+			      struct module *me)
+{
+	unsigned long gots = 0, fdescs = 0, len;
+	unsigned int i;
+
+	len = hdr->e_shnum * sizeof(me->arch.section[0]);
+	me->arch.section = kzalloc(len, GFP_KERNEL);
+	if (!me->arch.section)
+		return -ENOMEM;
+
+	for (i = 1; i < hdr->e_shnum; i++) {
+		const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
+		unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
+		unsigned int count, s;
+
+		if (strncmp(secstrings + sechdrs[i].sh_name,
+			    ".PARISC.unwind", 14) == 0)
+			me->arch.unwind_section = i;
+
+		if (sechdrs[i].sh_type != SHT_RELA)
+			continue;
+
+		/* some of these are not relevant for 32-bit/64-bit
+		 * we leave them here to make the code common. the
+		 * compiler will do its thing and optimize out the
+		 * stuff we don't need
+		 */
+		gots += count_gots(rels, nrels);
+		fdescs += count_fdescs(rels, nrels);
+
+		/* XXX: By sorting the relocs and finding duplicate entries
+		 *  we could reduce the number of necessary stubs and save
+		 *  some memory. */
+		count = count_stubs(rels, nrels);
+		if (!count)
+			continue;
+
+		/* so we need relocation stubs. reserve necessary memory. */
+		/* sh_info gives the section for which we need to add stubs. */
+		s = sechdrs[i].sh_info;
+
+		/* each code section should only have one relocation section */
+		WARN_ON(me->arch.section[s].stub_entries);
+
+		/* store number of stubs we need for this section */
+		me->arch.section[s].stub_entries += count;
+	}
+
+	/* align things a bit */
+	me->core_layout.size = ALIGN(me->core_layout.size, 16);
+	me->arch.got_offset = me->core_layout.size;
+	me->core_layout.size += gots * sizeof(struct got_entry);
+
+	me->core_layout.size = ALIGN(me->core_layout.size, 16);
+	me->arch.fdesc_offset = me->core_layout.size;
+	me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
+
+	me->arch.got_max = gots;
+	me->arch.fdesc_max = fdescs;
+
+	return 0;
+}
+
+#ifdef CONFIG_64BIT
+static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+{
+	unsigned int i;
+	struct got_entry *got;
+
+	value += addend;
+
+	BUG_ON(value == 0);
+
+	got = me->core_layout.base + me->arch.got_offset;
+	for (i = 0; got[i].addr; i++)
+		if (got[i].addr == value)
+			goto out;
+
+	BUG_ON(++me->arch.got_count > me->arch.got_max);
+
+	got[i].addr = value;
+ out:
+	DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
+	       value);
+	return i * sizeof(struct got_entry);
+}
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_64BIT
+static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+{
+	Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
+
+	if (!value) {
+		printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+		return 0;
+	}
+
+	/* Look for existing fdesc entry. */
+	while (fdesc->addr) {
+		if (fdesc->addr == value)
+			return (Elf_Addr)fdesc;
+		fdesc++;
+	}
+
+	BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
+
+	/* Create new one */
+	fdesc->addr = value;
+	fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+	return (Elf_Addr)fdesc;
+}
+#endif /* CONFIG_64BIT */
+
+enum elf_stub_type {
+	ELF_STUB_GOT,
+	ELF_STUB_MILLI,
+	ELF_STUB_DIRECT,
+};
+
+static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
+	enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
+{
+	struct stub_entry *stub;
+	int __maybe_unused d;
+
+	/* initialize stub_offset to point in front of the section */
+	if (!me->arch.section[targetsec].stub_offset) {
+		loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
+				sizeof(struct stub_entry);
+		/* get correct alignment for the stubs */
+		loc0 = ALIGN(loc0, sizeof(struct stub_entry));
+		me->arch.section[targetsec].stub_offset = loc0;
+	}
+
+	/* get address of stub entry */
+	stub = (void *) me->arch.section[targetsec].stub_offset;
+	me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
+
+	/* do not write outside available stub area */
+	BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
+
+
+#ifndef CONFIG_64BIT
+/* for 32-bit the stub looks like this:
+ * 	ldil L'XXX,%r1
+ * 	be,n R'XXX(%sr4,%r1)
+ */
+	//value = *(unsigned long *)((value + addend) & ~3); /* why? */
+
+	stub->insns[0] = 0x20200000;	/* ldil L'XXX,%r1	*/
+	stub->insns[1] = 0xe0202002;	/* be,n R'XXX(%sr4,%r1)	*/
+
+	stub->insns[0] |= reassemble_21(lrsel(value, addend));
+	stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
+
+#else
+/* for 64-bit we have three kinds of stubs:
+ * for normal function calls:
+ * 	ldd 0(%dp),%dp
+ * 	ldd 10(%dp), %r1
+ * 	bve (%r1)
+ * 	ldd 18(%dp), %dp
+ *
+ * for millicode:
+ * 	ldil 0, %r1
+ * 	ldo 0(%r1), %r1
+ * 	ldd 10(%r1), %r1
+ * 	bve,n (%r1)
+ *
+ * for direct branches (jumps between different section of the
+ * same module):
+ *	ldil 0, %r1
+ *	ldo 0(%r1), %r1
+ *	bve,n (%r1)
+ */
+	switch (stub_type) {
+	case ELF_STUB_GOT:
+		d = get_got(me, value, addend);
+		if (d <= 15) {
+			/* Format 5 */
+			stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp	*/
+			stub->insns[0] |= low_sign_unext(d, 5) << 16;
+		} else {
+			/* Format 3 */
+			stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp	*/
+			stub->insns[0] |= reassemble_16a(d);
+		}
+		stub->insns[1] = 0x53610020;	/* ldd 10(%dp),%r1	*/
+		stub->insns[2] = 0xe820d000;	/* bve (%r1)		*/
+		stub->insns[3] = 0x537b0030;	/* ldd 18(%dp),%dp	*/
+		break;
+	case ELF_STUB_MILLI:
+		stub->insns[0] = 0x20200000;	/* ldil 0,%r1		*/
+		stub->insns[1] = 0x34210000;	/* ldo 0(%r1), %r1	*/
+		stub->insns[2] = 0x50210020;	/* ldd 10(%r1),%r1	*/
+		stub->insns[3] = 0xe820d002;	/* bve,n (%r1)		*/
+
+		stub->insns[0] |= reassemble_21(lrsel(value, addend));
+		stub->insns[1] |= reassemble_14(rrsel(value, addend));
+		break;
+	case ELF_STUB_DIRECT:
+		stub->insns[0] = 0x20200000;    /* ldil 0,%r1           */
+		stub->insns[1] = 0x34210000;    /* ldo 0(%r1), %r1      */
+		stub->insns[2] = 0xe820d002;    /* bve,n (%r1)          */
+
+		stub->insns[0] |= reassemble_21(lrsel(value, addend));
+		stub->insns[1] |= reassemble_14(rrsel(value, addend));
+		break;
+	}
+
+#endif
+
+	return (Elf_Addr)stub;
+}
+
+#ifndef CONFIG_64BIT
+int apply_relocate_add(Elf_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	int i;
+	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	Elf32_Word *loc;
+	Elf32_Addr val;
+	Elf32_Sword addend;
+	Elf32_Addr dot;
+	Elf_Addr loc0;
+	unsigned int targetsec = sechdrs[relsec].sh_info;
+	//unsigned long dp = (unsigned long)$global$;
+	register unsigned long dp asm ("r27");
+
+	DEBUGP("Applying relocate section %u to %u\n", relsec,
+	       targetsec);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		loc = (void *)sechdrs[targetsec].sh_addr
+		      + rel[i].r_offset;
+		/* This is the start of the target section */
+		loc0 = sechdrs[targetsec].sh_addr;
+		/* This is the symbol it is referring to */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+		if (!sym->st_value) {
+			printk(KERN_WARNING "%s: Unknown symbol %s\n",
+			       me->name, strtab + sym->st_name);
+			return -ENOENT;
+		}
+		//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+		dot =  (Elf32_Addr)loc & ~0x03;
+
+		val = sym->st_value;
+		addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
+		DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
+			strtab + sym->st_name,
+			(uint32_t)loc, val, addend,
+			r(R_PARISC_PLABEL32)
+			r(R_PARISC_DIR32)
+			r(R_PARISC_DIR21L)
+			r(R_PARISC_DIR14R)
+			r(R_PARISC_SEGREL32)
+			r(R_PARISC_DPREL21L)
+			r(R_PARISC_DPREL14R)
+			r(R_PARISC_PCREL17F)
+			r(R_PARISC_PCREL22F)
+			"UNKNOWN");
+#undef r
+#endif
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_PARISC_PLABEL32:
+			/* 32-bit function address */
+			/* no function descriptors... */
+			*loc = fsel(val, addend);
+			break;
+		case R_PARISC_DIR32:
+			/* direct 32-bit ref */
+			*loc = fsel(val, addend);
+			break;
+		case R_PARISC_DIR21L:
+			/* left 21 bits of effective address */
+			val = lrsel(val, addend);
+			*loc = mask(*loc, 21) | reassemble_21(val);
+			break;
+		case R_PARISC_DIR14R:
+			/* right 14 bits of effective address */
+			val = rrsel(val, addend);
+			*loc = mask(*loc, 14) | reassemble_14(val);
+			break;
+		case R_PARISC_SEGREL32:
+			/* 32-bit segment relative address */
+			/* See note about special handling of SEGREL32 at
+			 * the beginning of this file.
+			 */
+			*loc = fsel(val, addend); 
+			break;
+		case R_PARISC_SECREL32:
+			/* 32-bit section relative address. */
+			*loc = fsel(val, addend);
+			break;
+		case R_PARISC_DPREL21L:
+			/* left 21 bit of relative address */
+			val = lrsel(val - dp, addend);
+			*loc = mask(*loc, 21) | reassemble_21(val);
+			break;
+		case R_PARISC_DPREL14R:
+			/* right 14 bit of relative address */
+			val = rrsel(val - dp, addend);
+			*loc = mask(*loc, 14) | reassemble_14(val);
+			break;
+		case R_PARISC_PCREL17F:
+			/* 17-bit PC relative address */
+			/* calculate direct call offset */
+			val += addend;
+			val = (val - dot - 8)/4;
+			if (!RELOC_REACHABLE(val, 17)) {
+				/* direct distance too far, create
+				 * stub entry instead */
+				val = get_stub(me, sym->st_value, addend,
+					ELF_STUB_DIRECT, loc0, targetsec);
+				val = (val - dot - 8)/4;
+				CHECK_RELOC(val, 17);
+			}
+			*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
+			break;
+		case R_PARISC_PCREL22F:
+			/* 22-bit PC relative address; only defined for pa20 */
+			/* calculate direct call offset */
+			val += addend;
+			val = (val - dot - 8)/4;
+			if (!RELOC_REACHABLE(val, 22)) {
+				/* direct distance too far, create
+				 * stub entry instead */
+				val = get_stub(me, sym->st_value, addend,
+					ELF_STUB_DIRECT, loc0, targetsec);
+				val = (val - dot - 8)/4;
+				CHECK_RELOC(val, 22);
+			}
+			*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+			break;
+		case R_PARISC_PCREL32:
+			/* 32-bit PC relative address */
+			*loc = val - dot - 8 + addend;
+			break;
+
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+
+	return 0;
+}
+
+#else
+int apply_relocate_add(Elf_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	int i;
+	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf64_Sym *sym;
+	Elf64_Word *loc;
+	Elf64_Xword *loc64;
+	Elf64_Addr val;
+	Elf64_Sxword addend;
+	Elf64_Addr dot;
+	Elf_Addr loc0;
+	unsigned int targetsec = sechdrs[relsec].sh_info;
+
+	DEBUGP("Applying relocate section %u to %u\n", relsec,
+	       targetsec);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		loc = (void *)sechdrs[targetsec].sh_addr
+		      + rel[i].r_offset;
+		/* This is the start of the target section */
+		loc0 = sechdrs[targetsec].sh_addr;
+		/* This is the symbol it is referring to */
+		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+			+ ELF64_R_SYM(rel[i].r_info);
+		if (!sym->st_value) {
+			printk(KERN_WARNING "%s: Unknown symbol %s\n",
+			       me->name, strtab + sym->st_name);
+			return -ENOENT;
+		}
+		//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+		dot = (Elf64_Addr)loc & ~0x03;
+		loc64 = (Elf64_Xword *)loc;
+
+		val = sym->st_value;
+		addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
+		printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
+			strtab + sym->st_name,
+			loc, val, addend,
+			r(R_PARISC_LTOFF14R)
+			r(R_PARISC_LTOFF21L)
+			r(R_PARISC_PCREL22F)
+			r(R_PARISC_DIR64)
+			r(R_PARISC_SEGREL32)
+			r(R_PARISC_FPTR64)
+			"UNKNOWN");
+#undef r
+#endif
+
+		switch (ELF64_R_TYPE(rel[i].r_info)) {
+		case R_PARISC_LTOFF21L:
+			/* LT-relative; left 21 bits */
+			val = get_got(me, val, addend);
+			DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
+			       strtab + sym->st_name,
+			       loc, val);
+			val = lrsel(val, 0);
+			*loc = mask(*loc, 21) | reassemble_21(val);
+			break;
+		case R_PARISC_LTOFF14R:
+			/* L(ltoff(val+addend)) */
+			/* LT-relative; right 14 bits */
+			val = get_got(me, val, addend);
+			val = rrsel(val, 0);
+			DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
+			       strtab + sym->st_name,
+			       loc, val);
+			*loc = mask(*loc, 14) | reassemble_14(val);
+			break;
+		case R_PARISC_PCREL22F:
+			/* PC-relative; 22 bits */
+			DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
+			       strtab + sym->st_name,
+			       loc, val);
+			val += addend;
+			/* can we reach it locally? */
+			if (in_local(me, (void *)val)) {
+				/* this is the case where the symbol is local
+				 * to the module, but in a different section,
+				 * so stub the jump in case it's more than 22
+				 * bits away */
+				val = (val - dot - 8)/4;
+				if (!RELOC_REACHABLE(val, 22)) {
+					/* direct distance too far, create
+					 * stub entry instead */
+					val = get_stub(me, sym->st_value,
+						addend, ELF_STUB_DIRECT,
+						loc0, targetsec);
+				} else {
+					/* Ok, we can reach it directly. */
+					val = sym->st_value;
+					val += addend;
+				}
+			} else {
+				val = sym->st_value;
+				if (strncmp(strtab + sym->st_name, "$$", 2)
+				    == 0)
+					val = get_stub(me, val, addend, ELF_STUB_MILLI,
+						       loc0, targetsec);
+				else
+					val = get_stub(me, val, addend, ELF_STUB_GOT,
+						       loc0, targetsec);
+			}
+			DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", 
+			       strtab + sym->st_name, loc, sym->st_value,
+			       addend, val);
+			val = (val - dot - 8)/4;
+			CHECK_RELOC(val, 22);
+			*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+			break;
+		case R_PARISC_PCREL32:
+			/* 32-bit PC relative address */
+			*loc = val - dot - 8 + addend;
+			break;
+		case R_PARISC_DIR64:
+			/* 64-bit effective address */
+			*loc64 = val + addend;
+			break;
+		case R_PARISC_SEGREL32:
+			/* 32-bit segment relative address */
+			/* See note about special handling of SEGREL32 at
+			 * the beginning of this file.
+			 */
+			*loc = fsel(val, addend); 
+			break;
+		case R_PARISC_SECREL32:
+			/* 32-bit section relative address. */
+			*loc = fsel(val, addend);
+			break;
+		case R_PARISC_FPTR64:
+			/* 64-bit function address */
+			if(in_local(me, (void *)(val + addend))) {
+				*loc64 = get_fdesc(me, val+addend);
+				DEBUGP("FDESC for %s at %p points to %lx\n",
+				       strtab + sym->st_name, *loc64,
+				       ((Elf_Fdesc *)*loc64)->addr);
+			} else {
+				/* if the symbol is not local to this
+				 * module then val+addend is a pointer
+				 * to the function descriptor */
+				DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
+				       strtab + sym->st_name,
+				       loc, val);
+				*loc64 = val + addend;
+			}
+			break;
+
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
+			       me->name, ELF64_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+#endif
+
+static void
+register_unwind_table(struct module *me,
+		      const Elf_Shdr *sechdrs)
+{
+	unsigned char *table, *end;
+	unsigned long gp;
+
+	if (!me->arch.unwind_section)
+		return;
+
+	table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+	end = table + sechdrs[me->arch.unwind_section].sh_size;
+	gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
+
+	DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+	       me->arch.unwind_section, table, end, gp);
+	me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
+}
+
+static void
+deregister_unwind_table(struct module *me)
+{
+	if (me->arch.unwind)
+		unwind_table_remove(me->arch.unwind);
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *me)
+{
+	int i;
+	unsigned long nsyms;
+	const char *strtab = NULL;
+	Elf_Sym *newptr, *oldptr;
+	Elf_Shdr *symhdr = NULL;
+#ifdef DEBUG
+	Elf_Fdesc *entry;
+	u32 *addr;
+
+	entry = (Elf_Fdesc *)me->init;
+	printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
+	       entry->gp, entry->addr);
+	addr = (u32 *)entry->addr;
+	printk("INSNS: %x %x %x %x\n",
+	       addr[0], addr[1], addr[2], addr[3]);
+	printk("got entries used %ld, gots max %ld\n"
+	       "fdescs used %ld, fdescs max %ld\n",
+	       me->arch.got_count, me->arch.got_max,
+	       me->arch.fdesc_count, me->arch.fdesc_max);
+#endif
+
+	register_unwind_table(me, sechdrs);
+
+	/* haven't filled in me->symtab yet, so have to find it
+	 * ourselves */
+	for (i = 1; i < hdr->e_shnum; i++) {
+		if(sechdrs[i].sh_type == SHT_SYMTAB
+		   && (sechdrs[i].sh_flags & SHF_ALLOC)) {
+			int strindex = sechdrs[i].sh_link;
+			/* FIXME: AWFUL HACK
+			 * The cast is to drop the const from
+			 * the sechdrs pointer */
+			symhdr = (Elf_Shdr *)&sechdrs[i];
+			strtab = (char *)sechdrs[strindex].sh_addr;
+			break;
+		}
+	}
+
+	DEBUGP("module %s: strtab %p, symhdr %p\n",
+	       me->name, strtab, symhdr);
+
+	if(me->arch.got_count > MAX_GOTS) {
+		printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
+				me->name, me->arch.got_count, MAX_GOTS);
+		return -EINVAL;
+	}
+
+	kfree(me->arch.section);
+	me->arch.section = NULL;
+
+	/* no symbol table */
+	if(symhdr == NULL)
+		return 0;
+
+	oldptr = (void *)symhdr->sh_addr;
+	newptr = oldptr + 1;	/* we start counting at 1 */
+	nsyms = symhdr->sh_size / sizeof(Elf_Sym);
+	DEBUGP("OLD num_symtab %lu\n", nsyms);
+
+	for (i = 1; i < nsyms; i++) {
+		oldptr++;	/* note, count starts at 1 so preincrement */
+		if(strncmp(strtab + oldptr->st_name,
+			      ".L", 2) == 0)
+			continue;
+
+		if(newptr != oldptr)
+			*newptr++ = *oldptr;
+		else
+			newptr++;
+
+	}
+	nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
+	DEBUGP("NEW num_symtab %lu\n", nsyms);
+	symhdr->sh_size = nsyms * sizeof(Elf_Sym);
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+	deregister_unwind_table(mod);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pa7300lc.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pa7300lc.c
new file mode 100644
index 0000000..0d770ac
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pa7300lc.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *   linux/arch/parisc/kernel/pa7300lc.c
+ *	- PA7300LC-specific functions	
+ *
+ *   Copyright (C) 2000 Philipp Rumpf */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+
+/* CPU register indices */
+
+#define MIOC_STATUS	0xf040
+#define MIOC_CONTROL	0xf080
+#define MDERRADD	0xf0e0
+#define DMAERR		0xf0e8
+#define DIOERR		0xf0ec
+#define HIDMAMEM	0xf0f4
+
+/* this returns the HPA of the CPU it was called on */
+static u32 cpu_hpa(void)
+{
+	return 0xfffb0000;
+}
+
+static void pa7300lc_lpmc(int code, struct pt_regs *regs)
+{
+	u32 hpa;
+	printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
+
+	show_regs(regs);
+
+	hpa = cpu_hpa();
+	printk(KERN_WARNING
+		"MIOC_CONTROL %08x\n" "MIOC_STATUS  %08x\n"
+		"MDERRADD     %08x\n" "DMAERR       %08x\n"
+		"DIOERR       %08x\n" "HIDMAMEM     %08x\n",
+		gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
+		gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
+		gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
+}
+
+void pa7300lc_init(void)
+{
+	cpu_lpmc = pa7300lc_lpmc;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pacache.S b/src/kernel/linux/v4.14/arch/parisc/kernel/pacache.S
new file mode 100644
index 0000000..3e163df
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pacache.S
@@ -0,0 +1,1315 @@
+/*
+ *  PARISC TLB and cache flushing support
+ *  Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
+ *  Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
+ *  Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * NOTE: fdc,fic, and pdc instructions that use base register modification
+ *       should only use index and base registers that are not shadowed,
+ *       so that the fast path emulation in the non access miss handler
+ *       can be used.
+ */
+
+#ifdef CONFIG_64BIT
+	.level	2.0w
+#else
+	.level	2.0
+#endif
+
+#include <asm/psw.h>
+#include <asm/assembly.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <asm/ldcw.h>
+#include <linux/linkage.h>
+
+	.text
+	.align	128
+
+ENTRY_CFI(flush_tlb_all_local)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	/*
+	 * The pitlbe and pdtlbe instructions should only be used to
+	 * flush the entire tlb. Also, there needs to be no intervening
+	 * tlb operations, e.g. tlb misses, so the operation needs
+	 * to happen in real mode with all interruptions disabled.
+	 */
+
+	/* pcxt_ssm_bug	- relied upon translation! PA 2.0 Arch. F-4 and F-5 */
+	rsm		PSW_SM_I, %r19		/* save I-bit state */
+	load32		PA(1f), %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
+	mtctl		%r0, %cr17		/* Clear IIASQ tail */
+	mtctl		%r0, %cr17		/* Clear IIASQ head */
+	mtctl		%r1, %cr18		/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18		/* IIAOQ tail */
+	load32		REAL_MODE_PSW, %r1
+	mtctl           %r1, %ipsw
+	rfi
+	nop
+
+1:      load32		PA(cache_info), %r1
+
+	/* Flush Instruction Tlb */
+
+	LDREG		ITLB_SID_BASE(%r1), %r20
+	LDREG		ITLB_SID_STRIDE(%r1), %r21
+	LDREG		ITLB_SID_COUNT(%r1), %r22
+	LDREG		ITLB_OFF_BASE(%r1), %arg0
+	LDREG		ITLB_OFF_STRIDE(%r1), %arg1
+	LDREG		ITLB_OFF_COUNT(%r1), %arg2
+	LDREG		ITLB_LOOP(%r1), %arg3
+
+	addib,COND(=)		-1, %arg3, fitoneloop	/* Preadjust and test */
+	movb,<,n	%arg3, %r31, fitdone	/* If loop < 0, skip */
+	copy		%arg0, %r28		/* Init base addr */
+
+fitmanyloop:					/* Loop if LOOP >= 2 */
+	mtsp		%r20, %sr1
+	add		%r21, %r20, %r20	/* increment space */
+	copy		%arg2, %r29		/* Init middle loop count */
+
+fitmanymiddle:					/* Loop if LOOP >= 2 */
+	addib,COND(>)		-1, %r31, fitmanymiddle	/* Adjusted inner loop decr */
+	pitlbe		%r0(%sr1, %r28)
+	pitlbe,m	%arg1(%sr1, %r28)	/* Last pitlbe and addr adjust */
+	addib,COND(>)		-1, %r29, fitmanymiddle	/* Middle loop decr */
+	copy		%arg3, %r31		/* Re-init inner loop count */
+
+	movb,tr		%arg0, %r28, fitmanyloop /* Re-init base addr */
+	addib,COND(<=),n	-1, %r22, fitdone	/* Outer loop count decr */
+
+fitoneloop:					/* Loop if LOOP = 1 */
+	mtsp		%r20, %sr1
+	copy		%arg0, %r28		/* init base addr */
+	copy		%arg2, %r29		/* init middle loop count */
+
+fitonemiddle:					/* Loop if LOOP = 1 */
+	addib,COND(>)		-1, %r29, fitonemiddle	/* Middle loop count decr */
+	pitlbe,m	%arg1(%sr1, %r28)	/* pitlbe for one loop */
+
+	addib,COND(>)		-1, %r22, fitoneloop	/* Outer loop count decr */
+	add		%r21, %r20, %r20		/* increment space */
+
+fitdone:
+
+	/* Flush Data Tlb */
+
+	LDREG		DTLB_SID_BASE(%r1), %r20
+	LDREG		DTLB_SID_STRIDE(%r1), %r21
+	LDREG		DTLB_SID_COUNT(%r1), %r22
+	LDREG		DTLB_OFF_BASE(%r1), %arg0
+	LDREG		DTLB_OFF_STRIDE(%r1), %arg1
+	LDREG		DTLB_OFF_COUNT(%r1), %arg2
+	LDREG		DTLB_LOOP(%r1), %arg3
+
+	addib,COND(=)		-1, %arg3, fdtoneloop	/* Preadjust and test */
+	movb,<,n	%arg3, %r31, fdtdone	/* If loop < 0, skip */
+	copy		%arg0, %r28		/* Init base addr */
+
+fdtmanyloop:					/* Loop if LOOP >= 2 */
+	mtsp		%r20, %sr1
+	add		%r21, %r20, %r20	/* increment space */
+	copy		%arg2, %r29		/* Init middle loop count */
+
+fdtmanymiddle:					/* Loop if LOOP >= 2 */
+	addib,COND(>)		-1, %r31, fdtmanymiddle	/* Adjusted inner loop decr */
+	pdtlbe		%r0(%sr1, %r28)
+	pdtlbe,m	%arg1(%sr1, %r28)	/* Last pdtlbe and addr adjust */
+	addib,COND(>)		-1, %r29, fdtmanymiddle	/* Middle loop decr */
+	copy		%arg3, %r31		/* Re-init inner loop count */
+
+	movb,tr		%arg0, %r28, fdtmanyloop /* Re-init base addr */
+	addib,COND(<=),n	-1, %r22,fdtdone	/* Outer loop count decr */
+
+fdtoneloop:					/* Loop if LOOP = 1 */
+	mtsp		%r20, %sr1
+	copy		%arg0, %r28		/* init base addr */
+	copy		%arg2, %r29		/* init middle loop count */
+
+fdtonemiddle:					/* Loop if LOOP = 1 */
+	addib,COND(>)		-1, %r29, fdtonemiddle	/* Middle loop count decr */
+	pdtlbe,m	%arg1(%sr1, %r28)	/* pdtlbe for one loop */
+
+	addib,COND(>)		-1, %r22, fdtoneloop	/* Outer loop count decr */
+	add		%r21, %r20, %r20	/* increment space */
+
+
+fdtdone:
+	/*
+	 * Switch back to virtual mode
+	 */
+	/* pcxt_ssm_bug */
+	rsm		PSW_SM_I, %r0
+	load32		2f, %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
+	mtctl		%r0, %cr17		/* Clear IIASQ tail */
+	mtctl		%r0, %cr17		/* Clear IIASQ head */
+	mtctl		%r1, %cr18		/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18		/* IIAOQ tail */
+	load32		KERNEL_PSW, %r1
+	or		%r1, %r19, %r1	/* I-bit to state on entry */
+	mtctl		%r1, %ipsw	/* restore I-bit (entire PSW) */
+	rfi
+	nop
+
+2:      bv		%r0(%r2)
+	nop
+
+	.exit
+	.procend
+ENDPROC_CFI(flush_tlb_all_local)
+
+	.import cache_info,data
+
+ENTRY_CFI(flush_instruction_cache_local)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	load32		cache_info, %r1
+
+	/* Flush Instruction Cache */
+
+	LDREG		ICACHE_BASE(%r1), %arg0
+	LDREG		ICACHE_STRIDE(%r1), %arg1
+	LDREG		ICACHE_COUNT(%r1), %arg2
+	LDREG		ICACHE_LOOP(%r1), %arg3
+	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
+	mtsp		%r0, %sr1
+	addib,COND(=)		-1, %arg3, fioneloop	/* Preadjust and test */
+	movb,<,n	%arg3, %r31, fisync	/* If loop < 0, do sync */
+
+fimanyloop:					/* Loop if LOOP >= 2 */
+	addib,COND(>)		-1, %r31, fimanyloop	/* Adjusted inner loop decr */
+	fice            %r0(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)	/* Last fice and addr adjust */
+	movb,tr		%arg3, %r31, fimanyloop	/* Re-init inner loop count */
+	addib,COND(<=),n	-1, %arg2, fisync	/* Outer loop decr */
+
+fioneloop:					/* Loop if LOOP = 1 */
+	/* Some implementations may flush with a single fice instruction */
+	cmpib,COND(>>=),n	15, %arg2, fioneloop2
+
+fioneloop1:
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	addib,COND(>)	-16, %arg2, fioneloop1
+	fice,m		%arg1(%sr1, %arg0)
+
+	/* Check if done */
+	cmpb,COND(=),n	%arg2, %r0, fisync	/* Predict branch taken */
+
+fioneloop2:
+	addib,COND(>)	-1, %arg2, fioneloop2	/* Outer loop count decr */
+	fice,m		%arg1(%sr1, %arg0)	/* Fice for one loop */
+
+fisync:
+	sync
+	mtsm		%r22			/* restore I-bit */
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_instruction_cache_local)
+
+
+	.import cache_info, data
+ENTRY_CFI(flush_data_cache_local)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	load32		cache_info, %r1
+
+	/* Flush Data Cache */
+
+	LDREG		DCACHE_BASE(%r1), %arg0
+	LDREG		DCACHE_STRIDE(%r1), %arg1
+	LDREG		DCACHE_COUNT(%r1), %arg2
+	LDREG		DCACHE_LOOP(%r1), %arg3
+	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
+	mtsp		%r0, %sr1
+	addib,COND(=)		-1, %arg3, fdoneloop	/* Preadjust and test */
+	movb,<,n	%arg3, %r31, fdsync	/* If loop < 0, do sync */
+
+fdmanyloop:					/* Loop if LOOP >= 2 */
+	addib,COND(>)		-1, %r31, fdmanyloop	/* Adjusted inner loop decr */
+	fdce		%r0(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)	/* Last fdce and addr adjust */
+	movb,tr		%arg3, %r31, fdmanyloop	/* Re-init inner loop count */
+	addib,COND(<=),n	-1, %arg2, fdsync	/* Outer loop decr */
+
+fdoneloop:					/* Loop if LOOP = 1 */
+	/* Some implementations may flush with a single fdce instruction */
+	cmpib,COND(>>=),n	15, %arg2, fdoneloop2
+
+fdoneloop1:
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	addib,COND(>)	-16, %arg2, fdoneloop1
+	fdce,m		%arg1(%sr1, %arg0)
+
+	/* Check if done */
+	cmpb,COND(=),n	%arg2, %r0, fdsync	/* Predict branch taken */
+
+fdoneloop2:
+	addib,COND(>)	-1, %arg2, fdoneloop2	/* Outer loop count decr */
+	fdce,m		%arg1(%sr1, %arg0)	/* Fdce for one loop */
+
+fdsync:
+	syncdma
+	sync
+	mtsm		%r22			/* restore I-bit */
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_data_cache_local)
+
+	.align	16
+
+/* Macros to serialize TLB purge operations on SMP.  */
+
+	.macro	tlb_lock	la,flags,tmp
+#ifdef CONFIG_SMP
+#if __PA_LDCW_ALIGNMENT > 4
+	load32		pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
+	depi		0,31,__PA_LDCW_ALIGN_ORDER, \la
+#else
+	load32		pa_tlb_lock, \la
+#endif
+	rsm		PSW_SM_I,\flags
+1:	LDCW		0(\la),\tmp
+	cmpib,<>,n	0,\tmp,3f
+2:	ldw		0(\la),\tmp
+	cmpb,<>		%r0,\tmp,1b
+	nop
+	b,n		2b
+3:
+#endif
+	.endm
+
+	.macro	tlb_unlock	la,flags,tmp
+#ifdef CONFIG_SMP
+	ldi		1,\tmp
+	sync
+	stw		\tmp,0(\la)
+	mtsm		\flags
+#endif
+	.endm
+
+/* Clear page using kernel mapping.  */
+
+ENTRY_CFI(clear_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+#ifdef CONFIG_64BIT
+
+	/* Unroll the loop.  */
+	ldi		(PAGE_SIZE / 128), %r1
+
+1:
+	std		%r0, 0(%r26)
+	std		%r0, 8(%r26)
+	std		%r0, 16(%r26)
+	std		%r0, 24(%r26)
+	std		%r0, 32(%r26)
+	std		%r0, 40(%r26)
+	std		%r0, 48(%r26)
+	std		%r0, 56(%r26)
+	std		%r0, 64(%r26)
+	std		%r0, 72(%r26)
+	std		%r0, 80(%r26)
+	std		%r0, 88(%r26)
+	std		%r0, 96(%r26)
+	std		%r0, 104(%r26)
+	std		%r0, 112(%r26)
+	std		%r0, 120(%r26)
+
+	/* Note reverse branch hint for addib is taken.  */
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		128(%r26), %r26
+
+#else
+
+	/*
+	 * Note that until (if) we start saving the full 64-bit register
+	 * values on interrupt, we can't use std on a 32 bit kernel.
+	 */
+	ldi		(PAGE_SIZE / 64), %r1
+
+1:
+	stw		%r0, 0(%r26)
+	stw		%r0, 4(%r26)
+	stw		%r0, 8(%r26)
+	stw		%r0, 12(%r26)
+	stw		%r0, 16(%r26)
+	stw		%r0, 20(%r26)
+	stw		%r0, 24(%r26)
+	stw		%r0, 28(%r26)
+	stw		%r0, 32(%r26)
+	stw		%r0, 36(%r26)
+	stw		%r0, 40(%r26)
+	stw		%r0, 44(%r26)
+	stw		%r0, 48(%r26)
+	stw		%r0, 52(%r26)
+	stw		%r0, 56(%r26)
+	stw		%r0, 60(%r26)
+
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		64(%r26), %r26
+#endif
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(clear_page_asm)
+
+/* Copy page using kernel mapping.  */
+
+ENTRY_CFI(copy_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+#ifdef CONFIG_64BIT
+	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+	 * Unroll the loop by hand and arrange insn appropriately.
+	 * Prefetch doesn't improve performance on rp3440.
+	 * GCC probably can do this just as well...
+	 */
+
+	ldi		(PAGE_SIZE / 128), %r1
+
+1:	ldd		0(%r25), %r19
+	ldd		8(%r25), %r20
+
+	ldd		16(%r25), %r21
+	ldd		24(%r25), %r22
+	std		%r19, 0(%r26)
+	std		%r20, 8(%r26)
+
+	ldd		32(%r25), %r19
+	ldd		40(%r25), %r20
+	std		%r21, 16(%r26)
+	std		%r22, 24(%r26)
+
+	ldd		48(%r25), %r21
+	ldd		56(%r25), %r22
+	std		%r19, 32(%r26)
+	std		%r20, 40(%r26)
+
+	ldd		64(%r25), %r19
+	ldd		72(%r25), %r20
+	std		%r21, 48(%r26)
+	std		%r22, 56(%r26)
+
+	ldd		80(%r25), %r21
+	ldd		88(%r25), %r22
+	std		%r19, 64(%r26)
+	std		%r20, 72(%r26)
+
+	ldd		 96(%r25), %r19
+	ldd		104(%r25), %r20
+	std		%r21, 80(%r26)
+	std		%r22, 88(%r26)
+
+	ldd		112(%r25), %r21
+	ldd		120(%r25), %r22
+	ldo		128(%r25), %r25
+	std		%r19, 96(%r26)
+	std		%r20, 104(%r26)
+
+	std		%r21, 112(%r26)
+	std		%r22, 120(%r26)
+
+	/* Note reverse branch hint for addib is taken.  */
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		128(%r26), %r26
+
+#else
+
+	/*
+	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+	 * bundles (very restricted rules for bundling).
+	 * Note that until (if) we start saving
+	 * the full 64 bit register values on interrupt, we can't
+	 * use ldd/std on a 32 bit kernel.
+	 */
+	ldw		0(%r25), %r19
+	ldi		(PAGE_SIZE / 64), %r1
+
+1:
+	ldw		4(%r25), %r20
+	ldw		8(%r25), %r21
+	ldw		12(%r25), %r22
+	stw		%r19, 0(%r26)
+	stw		%r20, 4(%r26)
+	stw		%r21, 8(%r26)
+	stw		%r22, 12(%r26)
+	ldw		16(%r25), %r19
+	ldw		20(%r25), %r20
+	ldw		24(%r25), %r21
+	ldw		28(%r25), %r22
+	stw		%r19, 16(%r26)
+	stw		%r20, 20(%r26)
+	stw		%r21, 24(%r26)
+	stw		%r22, 28(%r26)
+	ldw		32(%r25), %r19
+	ldw		36(%r25), %r20
+	ldw		40(%r25), %r21
+	ldw		44(%r25), %r22
+	stw		%r19, 32(%r26)
+	stw		%r20, 36(%r26)
+	stw		%r21, 40(%r26)
+	stw		%r22, 44(%r26)
+	ldw		48(%r25), %r19
+	ldw		52(%r25), %r20
+	ldw		56(%r25), %r21
+	ldw		60(%r25), %r22
+	stw		%r19, 48(%r26)
+	stw		%r20, 52(%r26)
+	ldo		64(%r25), %r25
+	stw		%r21, 56(%r26)
+	stw		%r22, 60(%r26)
+	ldo		64(%r26), %r26
+	addib,COND(>),n	-1, %r1, 1b
+	ldw		0(%r25), %r19
+#endif
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(copy_page_asm)
+
+/*
+ * NOTE: Code in clear_user_page has a hard coded dependency on the
+ *       maximum alias boundary being 4 Mb. We've been assured by the
+ *       parisc chip designers that there will not ever be a parisc
+ *       chip with a larger alias boundary (Never say never :-) ).
+ *
+ *       Subtle: the dtlb miss handlers support the temp alias region by
+ *       "knowing" that if a dtlb miss happens within the temp alias
+ *       region it must have occurred while in clear_user_page. Since
+ *       this routine makes use of processor local translations, we
+ *       don't want to insert them into the kernel page table. Instead,
+ *       we load up some general registers (they need to be registers
+ *       which aren't shadowed) with the physical page numbers (preshifted
+ *       for tlb insertion) needed to insert the translations. When we
+ *       miss on the translation, the dtlb miss handler inserts the
+ *       translation into the tlb using these values:
+ *
+ *          %r26 physical page (shifted for tlb insert) of "to" translation
+ *          %r23 physical page (shifted for tlb insert) of "from" translation
+ */
+
+        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+        #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
+        .macro          convert_phys_for_tlb_insert20  phys
+        extrd,u         \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
+#if _PAGE_SIZE_ENCODING_DEFAULT
+        depdi           _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
+#endif
+	.endm
+
+	/*
+	 * copy_user_page_asm() performs a page copy using mappings
+	 * equivalent to the user page mappings.  It can be used to
+	 * implement copy_user_page() but unfortunately both the `from'
+	 * and `to' pages need to be flushed through mappings equivalent
+	 * to the user mappings after the copy because the kernel accesses
+	 * the `from' page through the kmap kernel mapping and the `to'
+	 * page needs to be flushed since code can be copied.  As a
+	 * result, this implementation is less efficient than the simpler
+	 * copy using the kernel mapping.  It only needs the `from' page
+	 * to flushed via the user mapping.  The kunmap routines handle
+	 * the flushes needed for the kernel mapping.
+	 *
+	 * I'm still keeping this around because it may be possible to
+	 * use it if more information is passed into copy_user_page().
+	 * Have to do some measurements to see if it is worthwhile to
+	 * lobby for such a change.
+	 *
+	 */
+
+ENTRY_CFI(copy_user_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	/* Convert virtual `to' and `from' addresses to physical addresses.
+	   Move `from' physical address to non shadowed register.  */
+	ldil		L%(__PAGE_OFFSET), %r1
+	sub		%r26, %r1, %r26
+	sub		%r25, %r1, %r23
+
+	ldil		L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r23	/* convert phys addr to tlb insert format */
+	depd		%r24,63,22, %r28	/* Form aliased virtual address 'to' */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
+	copy		%r28, %r29
+	depdi		1, 41,1, %r29		/* Form aliased virtual address 'from' */
+#else
+	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
+	extrw,u		%r23, 24,25, %r23	/* convert phys addr to tlb insert format */
+	depw		%r24, 31,22, %r28	/* Form aliased virtual address 'to' */
+	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
+	copy		%r28, %r29
+	depwi		1, 9,1, %r29		/* Form aliased virtual address 'from' */
+#endif
+
+	/* Purge any old translations */
+
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r28)
+	pdtlb,l		%r0(%r29)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r28)
+	pdtlb		%r0(%r29)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
+#ifdef CONFIG_64BIT
+	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+	 * Unroll the loop by hand and arrange insn appropriately.
+	 * GCC probably can do this just as well.
+	 */
+
+	ldd		0(%r29), %r19
+	ldi		(PAGE_SIZE / 128), %r1
+
+1:	ldd		8(%r29), %r20
+
+	ldd		16(%r29), %r21
+	ldd		24(%r29), %r22
+	std		%r19, 0(%r28)
+	std		%r20, 8(%r28)
+
+	ldd		32(%r29), %r19
+	ldd		40(%r29), %r20
+	std		%r21, 16(%r28)
+	std		%r22, 24(%r28)
+
+	ldd		48(%r29), %r21
+	ldd		56(%r29), %r22
+	std		%r19, 32(%r28)
+	std		%r20, 40(%r28)
+
+	ldd		64(%r29), %r19
+	ldd		72(%r29), %r20
+	std		%r21, 48(%r28)
+	std		%r22, 56(%r28)
+
+	ldd		80(%r29), %r21
+	ldd		88(%r29), %r22
+	std		%r19, 64(%r28)
+	std		%r20, 72(%r28)
+
+	ldd		 96(%r29), %r19
+	ldd		104(%r29), %r20
+	std		%r21, 80(%r28)
+	std		%r22, 88(%r28)
+
+	ldd		112(%r29), %r21
+	ldd		120(%r29), %r22
+	std		%r19, 96(%r28)
+	std		%r20, 104(%r28)
+
+	ldo		128(%r29), %r29
+	std		%r21, 112(%r28)
+	std		%r22, 120(%r28)
+	ldo		128(%r28), %r28
+
+	/* conditional branches nullify on forward taken branch, and on
+	 * non-taken backward branch. Note that .+4 is a backwards branch.
+	 * The ldd should only get executed if the branch is taken.
+	 */
+	addib,COND(>),n	-1, %r1, 1b		/* bundle 10 */
+	ldd		0(%r29), %r19		/* start next loads */
+
+#else
+	ldi		(PAGE_SIZE / 64), %r1
+
+	/*
+	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+	 * bundles (very restricted rules for bundling). It probably
+	 * does OK on PCXU and better, but we could do better with
+	 * ldd/std instructions. Note that until (if) we start saving
+	 * the full 64 bit register values on interrupt, we can't
+	 * use ldd/std on a 32 bit kernel.
+	 */
+
+1:	ldw		0(%r29), %r19
+	ldw		4(%r29), %r20
+	ldw		8(%r29), %r21
+	ldw		12(%r29), %r22
+	stw		%r19, 0(%r28)
+	stw		%r20, 4(%r28)
+	stw		%r21, 8(%r28)
+	stw		%r22, 12(%r28)
+	ldw		16(%r29), %r19
+	ldw		20(%r29), %r20
+	ldw		24(%r29), %r21
+	ldw		28(%r29), %r22
+	stw		%r19, 16(%r28)
+	stw		%r20, 20(%r28)
+	stw		%r21, 24(%r28)
+	stw		%r22, 28(%r28)
+	ldw		32(%r29), %r19
+	ldw		36(%r29), %r20
+	ldw		40(%r29), %r21
+	ldw		44(%r29), %r22
+	stw		%r19, 32(%r28)
+	stw		%r20, 36(%r28)
+	stw		%r21, 40(%r28)
+	stw		%r22, 44(%r28)
+	ldw		48(%r29), %r19
+	ldw		52(%r29), %r20
+	ldw		56(%r29), %r21
+	ldw		60(%r29), %r22
+	stw		%r19, 48(%r28)
+	stw		%r20, 52(%r28)
+	stw		%r21, 56(%r28)
+	stw		%r22, 60(%r28)
+	ldo		64(%r28), %r28
+
+	addib,COND(>)		-1, %r1,1b
+	ldo		64(%r29), %r29
+#endif
+
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(copy_user_page_asm)
+
+ENTRY_CFI(clear_user_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	tophys_r1	%r26
+
+	ldil		L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#else
+	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
+	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
+	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#endif
+
+	/* Purge any old translation */
+
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r28)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r28)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
+#ifdef CONFIG_64BIT
+	ldi		(PAGE_SIZE / 128), %r1
+
+	/* PREFETCH (Write) has not (yet) been proven to help here */
+	/* #define	PREFETCHW_OP	ldd		256(%0), %r0 */
+
+1:	std		%r0, 0(%r28)
+	std		%r0, 8(%r28)
+	std		%r0, 16(%r28)
+	std		%r0, 24(%r28)
+	std		%r0, 32(%r28)
+	std		%r0, 40(%r28)
+	std		%r0, 48(%r28)
+	std		%r0, 56(%r28)
+	std		%r0, 64(%r28)
+	std		%r0, 72(%r28)
+	std		%r0, 80(%r28)
+	std		%r0, 88(%r28)
+	std		%r0, 96(%r28)
+	std		%r0, 104(%r28)
+	std		%r0, 112(%r28)
+	std		%r0, 120(%r28)
+	addib,COND(>)		-1, %r1, 1b
+	ldo		128(%r28), %r28
+
+#else	/* ! CONFIG_64BIT */
+	ldi		(PAGE_SIZE / 64), %r1
+
+1:	stw		%r0, 0(%r28)
+	stw		%r0, 4(%r28)
+	stw		%r0, 8(%r28)
+	stw		%r0, 12(%r28)
+	stw		%r0, 16(%r28)
+	stw		%r0, 20(%r28)
+	stw		%r0, 24(%r28)
+	stw		%r0, 28(%r28)
+	stw		%r0, 32(%r28)
+	stw		%r0, 36(%r28)
+	stw		%r0, 40(%r28)
+	stw		%r0, 44(%r28)
+	stw		%r0, 48(%r28)
+	stw		%r0, 52(%r28)
+	stw		%r0, 56(%r28)
+	stw		%r0, 60(%r28)
+	addib,COND(>)		-1, %r1, 1b
+	ldo		64(%r28), %r28
+#endif	/* CONFIG_64BIT */
+
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(clear_user_page_asm)
+
+ENTRY_CFI(flush_dcache_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#else
+	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
+	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
+	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#endif
+
+	/* Purge any old translation */
+
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r28)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r28)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), r31
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r28, %r25, %r25
+	sub		%r25, r31, %r25
+
+
+1:      fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	cmpb,COND(<<)	%r28, %r25,1b
+	fdc,m		r31(%r28)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_dcache_page_asm)
+
+ENTRY_CFI(flush_icache_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#else
+	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
+	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
+	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#endif
+
+	/* Purge any old translation.  Note that the FIC instruction
+	 * may use either the instruction or data TLB.  Given that we
+	 * have a flat address space, it's not clear which TLB will be
+	 * used.  So, we purge both entries.  */
+
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r28)
+	pitlb,l         %r0(%sr4,%r28)
+#else
+	tlb_lock        %r20,%r21,%r22
+	pdtlb		%r0(%r28)
+	pitlb           %r0(%sr4,%r28)
+	tlb_unlock      %r20,%r21,%r22
+#endif
+
+	ldil		L%icache_stride, %r1
+	ldw		R%icache_stride(%r1), %r31
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r28, %r25, %r25
+	sub		%r25, %r31, %r25
+
+
+	/* fic only has the type 26 form on PA1.1, requiring an
+	 * explicit space specification, so use %sr4 */
+1:      fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	cmpb,COND(<<)	%r28, %r25,1b
+	fic,m		%r31(%sr4,%r28)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_icache_page_asm)
+
+ENTRY_CFI(flush_kernel_dcache_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r26, %r25, %r25
+	sub		%r25, %r23, %r25
+
+
+1:      fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	cmpb,COND(<<)		%r26, %r25,1b
+	fdc,m		%r23(%r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_kernel_dcache_page_asm)
+
+ENTRY_CFI(purge_kernel_dcache_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r26, %r25, %r25
+	sub		%r25, %r23, %r25
+
+1:      pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	cmpb,COND(<<)		%r26, %r25, 1b
+	pdc,m		%r23(%r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(purge_kernel_dcache_page_asm)
+
+ENTRY_CFI(flush_user_dcache_range_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), %r23
+	ldo		-1(%r23), %r21
+	ANDCM		%r26, %r21, %r26
+
+1:      cmpb,COND(<<),n	%r26, %r25, 1b
+	fdc,m		%r23(%sr3, %r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_user_dcache_range_asm)
+
+ENTRY_CFI(flush_kernel_dcache_range_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), %r23
+	ldo		-1(%r23), %r21
+	ANDCM		%r26, %r21, %r26
+
+1:      cmpb,COND(<<),n	%r26, %r25,1b
+	fdc,m		%r23(%r26)
+
+	sync
+	syncdma
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_kernel_dcache_range_asm)
+
+ENTRY_CFI(purge_kernel_dcache_range_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), %r23
+	ldo		-1(%r23), %r21
+	ANDCM		%r26, %r21, %r26
+
+1:      cmpb,COND(<<),n	%r26, %r25,1b
+	pdc,m		%r23(%r26)
+
+	sync
+	syncdma
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
+
+ENTRY_CFI(flush_user_icache_range_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%icache_stride, %r1
+	ldw		R%icache_stride(%r1), %r23
+	ldo		-1(%r23), %r21
+	ANDCM		%r26, %r21, %r26
+
+1:      cmpb,COND(<<),n	%r26, %r25,1b
+	fic,m		%r23(%sr3, %r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_user_icache_range_asm)
+
+ENTRY_CFI(flush_kernel_icache_page)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%icache_stride, %r1
+	ldw		R%icache_stride(%r1), %r23
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r26, %r25, %r25
+	sub		%r25, %r23, %r25
+
+
+1:      fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	cmpb,COND(<<)		%r26, %r25, 1b
+	fic,m		%r23(%sr4, %r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(flush_kernel_icache_page)
+
+ENTRY_CFI(flush_kernel_icache_range_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%icache_stride, %r1
+	ldw		R%icache_stride(%r1), %r23
+	ldo		-1(%r23), %r21
+	ANDCM		%r26, %r21, %r26
+
+1:      cmpb,COND(<<),n	%r26, %r25, 1b
+	fic,m		%r23(%sr4, %r26)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+	.procend
+ENDPROC_CFI(flush_kernel_icache_range_asm)
+
+	/* align should cover use of rfi in disable_sr_hashing_asm and
+	 * srdis_done.
+	 */
+	.align	256
+ENTRY_CFI(disable_sr_hashing_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	/*
+	 * Switch to real mode
+	 */
+	/* pcxt_ssm_bug */
+	rsm		PSW_SM_I, %r0
+	load32		PA(1f), %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
+	mtctl		%r0, %cr17		/* Clear IIASQ tail */
+	mtctl		%r0, %cr17		/* Clear IIASQ head */
+	mtctl		%r1, %cr18		/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18		/* IIAOQ tail */
+	load32		REAL_MODE_PSW, %r1
+	mtctl		%r1, %ipsw
+	rfi
+	nop
+
+1:      cmpib,=,n	SRHASH_PCXST, %r26,srdis_pcxs
+	cmpib,=,n	SRHASH_PCXL, %r26,srdis_pcxl
+	cmpib,=,n	SRHASH_PA20, %r26,srdis_pa20
+	b,n		srdis_done
+
+srdis_pcxs:
+
+	/* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
+
+	.word		0x141c1a00		/* mfdiag %dr0, %r28 */
+	.word		0x141c1a00		/* must issue twice */
+	depwi		0,18,1, %r28		/* Clear DHE (dcache hash enable) */
+	depwi		0,20,1, %r28		/* Clear IHE (icache hash enable) */
+	.word		0x141c1600		/* mtdiag %r28, %dr0 */
+	.word		0x141c1600		/* must issue twice */
+	b,n		srdis_done
+
+srdis_pcxl:
+
+	/* Disable Space Register Hashing for PCXL */
+
+	.word		0x141c0600		/* mfdiag %dr0, %r28 */
+	depwi           0,28,2, %r28		/* Clear DHASH_EN & IHASH_EN */
+	.word		0x141c0240		/* mtdiag %r28, %dr0 */
+	b,n		srdis_done
+
+srdis_pa20:
+
+	/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
+
+	.word		0x144008bc		/* mfdiag %dr2, %r28 */
+	depdi		0, 54,1, %r28		/* clear DIAG_SPHASH_ENAB (bit 54) */
+	.word		0x145c1840		/* mtdiag %r28, %dr2 */
+
+
+srdis_done:
+	/* Switch back to virtual mode */
+	rsm		PSW_SM_I, %r0		/* prep to load iia queue */
+	load32 	   	2f, %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
+	mtctl		%r0, %cr17		/* Clear IIASQ tail */
+	mtctl		%r0, %cr17		/* Clear IIASQ head */
+	mtctl		%r1, %cr18		/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18		/* IIAOQ tail */
+	load32		KERNEL_PSW, %r1
+	mtctl		%r1, %ipsw
+	rfi
+	nop
+
+2:      bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(disable_sr_hashing_asm)
+
+	.end
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/parisc_ksyms.c b/src/kernel/linux/v4.14/arch/parisc/kernel/parisc_ksyms.c
new file mode 100644
index 0000000..7baa226
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/parisc_ksyms.c
@@ -0,0 +1,154 @@
+/*
+ *    Architecture-specific kernel symbols
+ *
+ *    Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
+ *    Copyright (C) 2001 Dave Kennedy
+ *    Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
+ *    Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
+ *    Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org>
+ * 
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+
+#include <linux/string.h>
+EXPORT_SYMBOL(memset);
+
+#include <linux/atomic.h>
+EXPORT_SYMBOL(__xchg8);
+EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u32);
+EXPORT_SYMBOL(__cmpxchg_u64);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__atomic_hash);
+#endif
+#ifdef CONFIG_64BIT
+EXPORT_SYMBOL(__xchg64);
+#endif
+
+#include <linux/uaccess.h>
+EXPORT_SYMBOL(lclear_user);
+EXPORT_SYMBOL(lstrnlen_user);
+
+#ifndef CONFIG_64BIT
+/* Needed so insmod can set dp value */
+extern int $global$;
+EXPORT_SYMBOL($global$);
+#endif
+
+#include <asm/io.h>
+EXPORT_SYMBOL(memcpy_toio);
+EXPORT_SYMBOL(memcpy_fromio);
+EXPORT_SYMBOL(memset_io);
+
+extern void $$divI(void);
+extern void $$divU(void);
+extern void $$remI(void);
+extern void $$remU(void);
+extern void $$mulI(void);
+extern void $$divU_3(void);
+extern void $$divU_5(void);
+extern void $$divU_6(void);
+extern void $$divU_9(void);
+extern void $$divU_10(void);
+extern void $$divU_12(void);
+extern void $$divU_7(void);
+extern void $$divU_14(void);
+extern void $$divU_15(void);
+extern void $$divI_3(void);
+extern void $$divI_5(void);
+extern void $$divI_6(void);
+extern void $$divI_7(void);
+extern void $$divI_9(void);
+extern void $$divI_10(void);
+extern void $$divI_12(void);
+extern void $$divI_14(void);
+extern void $$divI_15(void);
+
+EXPORT_SYMBOL($$divI);
+EXPORT_SYMBOL($$divU);
+EXPORT_SYMBOL($$remI);
+EXPORT_SYMBOL($$remU);
+EXPORT_SYMBOL($$mulI);
+EXPORT_SYMBOL($$divU_3);
+EXPORT_SYMBOL($$divU_5);
+EXPORT_SYMBOL($$divU_6);
+EXPORT_SYMBOL($$divU_9);
+EXPORT_SYMBOL($$divU_10);
+EXPORT_SYMBOL($$divU_12);
+EXPORT_SYMBOL($$divU_7);
+EXPORT_SYMBOL($$divU_14);
+EXPORT_SYMBOL($$divU_15);
+EXPORT_SYMBOL($$divI_3);
+EXPORT_SYMBOL($$divI_5);
+EXPORT_SYMBOL($$divI_6);
+EXPORT_SYMBOL($$divI_7);
+EXPORT_SYMBOL($$divI_9);
+EXPORT_SYMBOL($$divI_10);
+EXPORT_SYMBOL($$divI_12);
+EXPORT_SYMBOL($$divI_14);
+EXPORT_SYMBOL($$divI_15);
+
+extern void __ashrdi3(void);
+extern void __ashldi3(void);
+extern void __lshrdi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+
+asmlinkage void * __canonicalize_funcptr_for_compare(void *);
+EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
+
+#ifdef CONFIG_64BIT
+extern void __divdi3(void);
+extern void __udivdi3(void);
+extern void __umoddi3(void);
+extern void __moddi3(void);
+
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__moddi3);
+#endif
+
+#ifndef CONFIG_64BIT
+extern void $$dyncall(void);
+EXPORT_SYMBOL($$dyncall);
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/mmzone.h>
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(pfnnid_map);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/* from pacache.S -- needed for clear/copy_page */
+EXPORT_SYMBOL(clear_page_asm);
+EXPORT_SYMBOL(copy_page_asm);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pci-dma.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pci-dma.c
new file mode 100644
index 0000000..412231d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pci-dma.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+** PARISC 1.1 Dynamic DMA mapping support.
+** This implementation is for PA-RISC platforms that do not support
+** I/O TLBs (aka DMA address translation hardware).
+** See Documentation/DMA-API-HOWTO.txt for interface definitions.
+**
+**      (c) Copyright 1999,2000 Hewlett-Packard Company
+**      (c) Copyright 2000 Grant Grundler
+**	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
+**      (c) Copyright 2000 John Marvin
+**
+** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
+** (I assume it's from David Mosberger-Tang but there was no Copyright)
+**
+** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
+**
+** - ggg
+*/
+
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
+#include <asm/io.h>
+#include <asm/page.h>	/* get_order */
+#include <asm/pgalloc.h>
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
+
+static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
+static unsigned long pcxl_used_bytes __read_mostly = 0;
+static unsigned long pcxl_used_pages __read_mostly = 0;
+
+extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
+static DEFINE_SPINLOCK(pcxl_res_lock);
+static char    *pcxl_res_map;
+static int     pcxl_res_hint;
+static int     pcxl_res_size;
+
+#ifdef DEBUG_PCXL_RESOURCE
+#define DBG_RES(x...)	printk(x)
+#else
+#define DBG_RES(x...)
+#endif
+
+
+/*
+** Dump a hex representation of the resource map.
+*/
+
+#ifdef DUMP_RESMAP
+static
+void dump_resmap(void)
+{
+	u_long *res_ptr = (unsigned long *)pcxl_res_map;
+	u_long i = 0;
+
+	printk("res_map: ");
+	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
+		printk("%08lx ", *res_ptr);
+
+	printk("\n");
+}
+#else
+static inline void dump_resmap(void) {;}
+#endif
+
+static int pa11_dma_supported( struct device *dev, u64 mask)
+{
+	return 1;
+}
+
+static inline int map_pte_uncached(pte_t * pte,
+		unsigned long vaddr,
+		unsigned long size, unsigned long *paddr_ptr)
+{
+	unsigned long end;
+	unsigned long orig_vaddr = vaddr;
+
+	vaddr &= ~PMD_MASK;
+	end = vaddr + size;
+	if (end > PMD_SIZE)
+		end = PMD_SIZE;
+	do {
+		unsigned long flags;
+
+		if (!pte_none(*pte))
+			printk(KERN_ERR "map_pte_uncached: page already exists\n");
+		purge_tlb_start(flags);
+		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
+		pdtlb_kernel(orig_vaddr);
+		purge_tlb_end(flags);
+		vaddr += PAGE_SIZE;
+		orig_vaddr += PAGE_SIZE;
+		(*paddr_ptr) += PAGE_SIZE;
+		pte++;
+	} while (vaddr < end);
+	return 0;
+}
+
+static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
+		unsigned long size, unsigned long *paddr_ptr)
+{
+	unsigned long end;
+	unsigned long orig_vaddr = vaddr;
+
+	vaddr &= ~PGDIR_MASK;
+	end = vaddr + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	do {
+		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
+		if (!pte)
+			return -ENOMEM;
+		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
+			return -ENOMEM;
+		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+		orig_vaddr += PMD_SIZE;
+		pmd++;
+	} while (vaddr < end);
+	return 0;
+}
+
+static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
+		unsigned long paddr)
+{
+	pgd_t * dir;
+	unsigned long end = vaddr + size;
+
+	dir = pgd_offset_k(vaddr);
+	do {
+		pmd_t *pmd;
+		
+		pmd = pmd_alloc(NULL, dir, vaddr);
+		if (!pmd)
+			return -ENOMEM;
+		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
+			return -ENOMEM;
+		vaddr = vaddr + PGDIR_SIZE;
+		dir++;
+	} while (vaddr && (vaddr < end));
+	return 0;
+}
+
+static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
+		unsigned long size)
+{
+	pte_t * pte;
+	unsigned long end;
+	unsigned long orig_vaddr = vaddr;
+
+	if (pmd_none(*pmd))
+		return;
+	if (pmd_bad(*pmd)) {
+		pmd_ERROR(*pmd);
+		pmd_clear(pmd);
+		return;
+	}
+	pte = pte_offset_map(pmd, vaddr);
+	vaddr &= ~PMD_MASK;
+	end = vaddr + size;
+	if (end > PMD_SIZE)
+		end = PMD_SIZE;
+	do {
+		unsigned long flags;
+		pte_t page = *pte;
+
+		pte_clear(&init_mm, vaddr, pte);
+		purge_tlb_start(flags);
+		pdtlb_kernel(orig_vaddr);
+		purge_tlb_end(flags);
+		vaddr += PAGE_SIZE;
+		orig_vaddr += PAGE_SIZE;
+		pte++;
+		if (pte_none(page) || pte_present(page))
+			continue;
+		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
+	} while (vaddr < end);
+}
+
+static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
+		unsigned long size)
+{
+	pmd_t * pmd;
+	unsigned long end;
+	unsigned long orig_vaddr = vaddr;
+
+	if (pgd_none(*dir))
+		return;
+	if (pgd_bad(*dir)) {
+		pgd_ERROR(*dir);
+		pgd_clear(dir);
+		return;
+	}
+	pmd = pmd_offset(dir, vaddr);
+	vaddr &= ~PGDIR_MASK;
+	end = vaddr + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	do {
+		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
+		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+		orig_vaddr += PMD_SIZE;
+		pmd++;
+	} while (vaddr < end);
+}
+
+static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
+{
+	pgd_t * dir;
+	unsigned long end = vaddr + size;
+
+	dir = pgd_offset_k(vaddr);
+	do {
+		unmap_uncached_pmd(dir, vaddr, end - vaddr);
+		vaddr = vaddr + PGDIR_SIZE;
+		dir++;
+	} while (vaddr && (vaddr < end));
+}
+
+#define PCXL_SEARCH_LOOP(idx, mask, size)  \
+       for(; res_ptr < res_end; ++res_ptr) \
+       { \
+               if(0 == ((*res_ptr) & mask)) { \
+                       *res_ptr |= mask; \
+		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
+		       pcxl_res_hint = idx + (size >> 3); \
+                       goto resource_found; \
+               } \
+       }
+
+#define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
+       u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
+       u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
+       PCXL_SEARCH_LOOP(idx, mask, size); \
+       res_ptr = (u##size *)&pcxl_res_map[0]; \
+       PCXL_SEARCH_LOOP(idx, mask, size); \
+}
+
+unsigned long
+pcxl_alloc_range(size_t size)
+{
+	int res_idx;
+	u_long mask, flags;
+	unsigned int pages_needed = size >> PAGE_SHIFT;
+
+	mask = (u_long) -1L;
+ 	mask >>= BITS_PER_LONG - pages_needed;
+
+	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
+		size, pages_needed, mask);
+
+	spin_lock_irqsave(&pcxl_res_lock, flags);
+
+	if(pages_needed <= 8) {
+		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
+	} else if(pages_needed <= 16) {
+		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
+	} else if(pages_needed <= 32) {
+		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
+	} else {
+		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
+		      __FILE__);
+	}
+
+	dump_resmap();
+	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
+	      __FILE__);
+	
+resource_found:
+	
+	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
+		res_idx, mask, pcxl_res_hint);
+
+	pcxl_used_pages += pages_needed;
+	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
+
+	spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+	dump_resmap();
+
+	/* 
+	** return the corresponding vaddr in the pcxl dma map
+	*/
+	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
+}
+
+#define PCXL_FREE_MAPPINGS(idx, m, size) \
+		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
+		/* BUG_ON((*res_ptr & m) != m); */ \
+		*res_ptr &= ~m;
+
+/*
+** clear bits in the pcxl resource map
+*/
+static void
+pcxl_free_range(unsigned long vaddr, size_t size)
+{
+	u_long mask, flags;
+	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
+	unsigned int pages_mapped = size >> PAGE_SHIFT;
+
+	mask = (u_long) -1L;
+ 	mask >>= BITS_PER_LONG - pages_mapped;
+
+	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
+		res_idx, size, pages_mapped, mask);
+
+	spin_lock_irqsave(&pcxl_res_lock, flags);
+
+	if(pages_mapped <= 8) {
+		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
+	} else if(pages_mapped <= 16) {
+		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
+	} else if(pages_mapped <= 32) {
+		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
+	} else {
+		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
+		      __FILE__);
+	}
+	
+	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
+	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
+
+	spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+	dump_resmap();
+}
+
+static int proc_pcxl_dma_show(struct seq_file *m, void *v)
+{
+#if 0
+	u_long i = 0;
+	unsigned long *res_ptr = (u_long *)pcxl_res_map;
+#endif
+	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
+
+	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
+		PCXL_DMA_MAP_SIZE, total_pages);
+
+	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
+
+	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
+	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
+		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
+		(pcxl_used_bytes * 100) / pcxl_res_size);
+
+	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
+		total_pages - pcxl_used_pages, pcxl_used_pages,
+		(pcxl_used_pages * 100 / total_pages));
+
+#if 0
+	seq_puts(m, "\nResource bitmap:");
+
+	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
+		if ((i & 7) == 0)
+		    seq_puts(m,"\n   ");
+		seq_printf(m, "%s %08lx", buf, *res_ptr);
+	}
+#endif
+	seq_putc(m, '\n');
+	return 0;
+}
+
+static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_pcxl_dma_show, NULL);
+}
+
+static const struct file_operations proc_pcxl_dma_ops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_pcxl_dma_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init
+pcxl_dma_init(void)
+{
+	if (pcxl_dma_start == 0)
+		return 0;
+
+	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
+	pcxl_res_hint = 0;
+	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
+					    get_order(pcxl_res_size));
+	memset(pcxl_res_map, 0, pcxl_res_size);
+	proc_gsc_root = proc_mkdir("gsc", NULL);
+	if (!proc_gsc_root)
+    		printk(KERN_WARNING
+			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
+	else {
+		struct proc_dir_entry* ent;
+		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
+				  &proc_pcxl_dma_ops);
+		if (!ent)
+			printk(KERN_WARNING
+				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
+	}
+	return 0;
+}
+
+__initcall(pcxl_dma_init);
+
+static void *pa11_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
+{
+	unsigned long vaddr;
+	unsigned long paddr;
+	int order;
+
+	order = get_order(size);
+	size = 1 << (order + PAGE_SHIFT);
+	vaddr = pcxl_alloc_range(size);
+	paddr = __get_free_pages(flag, order);
+	flush_kernel_dcache_range(paddr, size);
+	paddr = __pa(paddr);
+	map_uncached_pages(vaddr, size, paddr);
+	*dma_handle = (dma_addr_t) paddr;
+
+#if 0
+/* This probably isn't needed to support EISA cards.
+** ISA cards will certainly only support 24-bit DMA addressing.
+** Not clear if we can, want, or need to support ISA.
+*/
+	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
+		gfp |= GFP_DMA;
+#endif
+	return (void *)vaddr;
+}
+
+static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
+		dma_addr_t dma_handle, unsigned long attrs)
+{
+	int order;
+
+	order = get_order(size);
+	size = 1 << (order + PAGE_SHIFT);
+	unmap_uncached_pages((unsigned long)vaddr, size);
+	pcxl_free_range((unsigned long)vaddr, size);
+	free_pages((unsigned long)__va(dma_handle), order);
+}
+
+static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
+		unsigned long offset, size_t size,
+		enum dma_data_direction direction, unsigned long attrs)
+{
+	void *addr = page_address(page) + offset;
+	BUG_ON(direction == DMA_NONE);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		flush_kernel_dcache_range((unsigned long) addr, size);
+
+	return virt_to_phys(addr);
+}
+
+static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		size_t size, enum dma_data_direction direction,
+		unsigned long attrs)
+{
+	BUG_ON(direction == DMA_NONE);
+
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
+	if (direction == DMA_TO_DEVICE)
+		return;
+
+	/*
+	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
+	 * simple map/unmap case. However, it IS necessary if if
+	 * pci_dma_sync_single_* has been called and the buffer reused.
+	 */
+
+	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+}
+
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+		int nents, enum dma_data_direction direction,
+		unsigned long attrs)
+{
+	int i;
+	struct scatterlist *sg;
+
+	BUG_ON(direction == DMA_NONE);
+
+	for_each_sg(sglist, sg, nents, i) {
+		unsigned long vaddr = (unsigned long)sg_virt(sg);
+
+		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
+		sg_dma_len(sg) = sg->length;
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
+		flush_kernel_dcache_range(vaddr, sg->length);
+	}
+	return nents;
+}
+
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+		int nents, enum dma_data_direction direction,
+		unsigned long attrs)
+{
+	int i;
+	struct scatterlist *sg;
+
+	BUG_ON(direction == DMA_NONE);
+
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
+	if (direction == DMA_TO_DEVICE)
+		return;
+
+	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+	for_each_sg(sglist, sg, nents, i)
+		flush_kernel_vmap_range(sg_virt(sg), sg->length);
+}
+
+static void pa11_dma_sync_single_for_cpu(struct device *dev,
+		dma_addr_t dma_handle, size_t size,
+		enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+
+	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+			size);
+}
+
+static void pa11_dma_sync_single_for_device(struct device *dev,
+		dma_addr_t dma_handle, size_t size,
+		enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+
+	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+			size);
+}
+
+static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+	int i;
+	struct scatterlist *sg;
+
+	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+	for_each_sg(sglist, sg, nents, i)
+		flush_kernel_vmap_range(sg_virt(sg), sg->length);
+}
+
+static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+	int i;
+	struct scatterlist *sg;
+
+	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+	for_each_sg(sglist, sg, nents, i)
+		flush_kernel_vmap_range(sg_virt(sg), sg->length);
+}
+
+const struct dma_map_ops pcxl_dma_ops = {
+	.dma_supported =	pa11_dma_supported,
+	.alloc =		pa11_dma_alloc,
+	.free =			pa11_dma_free,
+	.map_page =		pa11_dma_map_page,
+	.unmap_page =		pa11_dma_unmap_page,
+	.map_sg =		pa11_dma_map_sg,
+	.unmap_sg =		pa11_dma_unmap_sg,
+	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
+	.sync_single_for_device = pa11_dma_sync_single_for_device,
+	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
+	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
+};
+
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
+{
+	void *addr;
+
+	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+		return NULL;
+
+	addr = (void *)__get_free_pages(flag, get_order(size));
+	if (addr)
+		*dma_handle = (dma_addr_t)virt_to_phys(addr);
+
+	return addr;
+}
+
+static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
+		dma_addr_t iova, unsigned long attrs)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+	return;
+}
+
+const struct dma_map_ops pcx_dma_ops = {
+	.dma_supported =	pa11_dma_supported,
+	.alloc =		pcx_dma_alloc,
+	.free =			pcx_dma_free,
+	.map_page =		pa11_dma_map_page,
+	.unmap_page =		pa11_dma_unmap_page,
+	.map_sg =		pa11_dma_map_sg,
+	.unmap_sg =		pa11_dma_unmap_sg,
+	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
+	.sync_single_for_device = pa11_dma_sync_single_for_device,
+	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
+	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
+};
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pci.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pci.c
new file mode 100644
index 0000000..13ee356
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pci.c
@@ -0,0 +1,279 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH
+ * Copyright (C) 1999-2001 Hewlett-Packard Company
+ * Copyright (C) 1999-2001 Grant Grundler
+ */
+#include <linux/eisa.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/superio.h>
+
+#define DEBUG_RESOURCES 0
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBGC(x...)	printk(KERN_DEBUG x)
+#else
+# define DBGC(x...)
+#endif
+
+
+#if DEBUG_RESOURCES
+#define DBG_RES(x...)	printk(KERN_DEBUG x)
+#else
+#define DBG_RES(x...)
+#endif
+
+/* To be used as: mdelay(pci_post_reset_delay);
+ *
+ * post_reset is the time the kernel should stall to prevent anyone from
+ * accessing the PCI bus once #RESET is de-asserted. 
+ * PCI spec somewhere says 1 second but with multi-PCI bus systems,
+ * this makes the boot time much longer than necessary.
+ * 20ms seems to work for all the HP PCI implementations to date.
+ *
+ * #define pci_post_reset_delay 50
+ */
+
+struct pci_port_ops *pci_port __read_mostly;
+struct pci_bios_ops *pci_bios __read_mostly;
+
+static int pci_hba_count __read_mostly;
+
+/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data.  */
+#define PCI_HBA_MAX 32
+static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
+
+
+/********************************************************************
+**
+** I/O port space support
+**
+*********************************************************************/
+
+/* EISA port numbers and PCI port numbers share the same interface.  Some
+ * machines have both EISA and PCI adapters installed.  Rather than turn
+ * pci_port into an array, we reserve bus 0 for EISA and call the EISA
+ * routines if the access is to a port on bus 0.  We don't want to fix
+ * EISA and ISA drivers which assume port space is <= 0xffff.
+ */
+
+#ifdef CONFIG_EISA
+#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
+#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
+#else
+#define EISA_IN(size)
+#define EISA_OUT(size)
+#endif
+
+#define PCI_PORT_IN(type, size) \
+u##size in##type (int addr) \
+{ \
+	int b = PCI_PORT_HBA(addr); \
+	EISA_IN(size); \
+	if (!parisc_pci_hba[b]) return (u##size) -1; \
+	return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
+} \
+EXPORT_SYMBOL(in##type);
+
+PCI_PORT_IN(b,  8)
+PCI_PORT_IN(w, 16)
+PCI_PORT_IN(l, 32)
+
+
+#define PCI_PORT_OUT(type, size) \
+void out##type (u##size d, int addr) \
+{ \
+	int b = PCI_PORT_HBA(addr); \
+	EISA_OUT(size); \
+	if (!parisc_pci_hba[b]) return; \
+	pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
+} \
+EXPORT_SYMBOL(out##type);
+
+PCI_PORT_OUT(b,  8)
+PCI_PORT_OUT(w, 16)
+PCI_PORT_OUT(l, 32)
+
+
+
+/*
+ * BIOS32 replacement.
+ */
+static int __init pcibios_init(void)
+{
+	if (!pci_bios)
+		return -1;
+
+	if (pci_bios->init) {
+		pci_bios->init();
+	} else {
+		printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
+	}
+
+	/* Set the CLS for PCI as early as possible. */
+	pci_cache_line_size = pci_dfl_cache_line_size;
+
+	return 0;
+}
+
+
+/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	if (pci_bios->fixup_bus) {
+		pci_bios->fixup_bus(bus);
+	} else {
+		printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
+	}
+}
+
+
+/*
+ * Called by pci_set_master() - a driver interface.
+ *
+ * Legacy PDC guarantees to set:
+ *	Map Memory BAR's into PA IO space.
+ *	Map Expansion ROM BAR into one common PA IO space per bus.
+ *	Map IO BAR's into PCI IO space.
+ *	Command (see below)
+ *	Cache Line Size
+ *	Latency Timer
+ *	Interrupt Line
+ *	PPB: secondary latency timer, io/mmio base/limit,
+ *		bus numbers, bridge control
+ *
+ */
+void pcibios_set_master(struct pci_dev *dev)
+{
+	u8 lat;
+
+	/* If someone already mucked with this, don't touch it. */
+	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+	if (lat >= 16) return;
+
+	/*
+	** HP generally has fewer devices on the bus than other architectures.
+	** upper byte is PCI_LATENCY_TIMER.
+	*/
+	pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
+			      (0x80 << 8) | pci_cache_line_size);
+}
+
+/*
+ * pcibios_init_bridge() initializes cache line and default latency
+ * for pci controllers and pci-pci bridges
+ */
+void __init pcibios_init_bridge(struct pci_dev *dev)
+{
+	unsigned short bridge_ctl, bridge_ctl_new;
+
+	/* We deal only with pci controllers and pci-pci bridges. */
+	if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+		return;
+
+	/* PCI-PCI bridge - set the cache line and default latency
+	 * (32) for primary and secondary buses.
+	 */
+	pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
+
+	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
+
+	bridge_ctl_new = bridge_ctl | PCI_BRIDGE_CTL_PARITY |
+		PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_MASTER_ABORT;
+	dev_info(&dev->dev, "Changing bridge control from 0x%08x to 0x%08x\n",
+		bridge_ctl, bridge_ctl_new);
+
+	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl_new);
+}
+
+/*
+ * pcibios align resources() is called every time generic PCI code
+ * wants to generate a new address. The process of looking for
+ * an available address, each candidate is first "aligned" and
+ * then checked if the resource is available until a match is found.
+ *
+ * Since we are just checking candidates, don't use any fields other
+ * than res->start.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t alignment)
+{
+	resource_size_t mask, align, start = res->start;
+
+	DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
+		pci_name(((struct pci_dev *) data)),
+		res->parent, res->start, res->end,
+		(int) res->flags, size, alignment);
+
+	/* If it's not IO, then it's gotta be MEM */
+	align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+	/* Align to largest of MIN or input size */
+	mask = max(alignment, align) - 1;
+	start += mask;
+	start &= ~mask;
+
+	return start;
+}
+
+/*
+ * A driver is enabling the device.  We make sure that all the appropriate
+ * bits are set to allow the device to operate as the driver is expecting.
+ * We enable the port IO and memory IO bits if the device has any BARs of
+ * that type, and we enable the PERR and SERR bits unconditionally.
+ * Drivers that do not need parity (eg graphics and possibly networking)
+ * can clear these bits if they want.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	int err;
+	u16 cmd, old_cmd;
+
+	err = pci_enable_resources(dev, mask);
+	if (err < 0)
+		return err;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+
+	cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+#if 0
+	/* If bridge/bus controller has FBB enabled, child must too. */
+	if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
+		cmd |= PCI_COMMAND_FAST_BACK;
+#endif
+
+	if (cmd != old_cmd) {
+		dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n",
+			old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+	return 0;
+}
+
+
+/* PA-RISC specific */
+void pcibios_register_hba(struct pci_hba_data *hba)
+{
+	if (pci_hba_count >= PCI_HBA_MAX) {
+		printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
+		return;
+	}
+
+	parisc_pci_hba[pci_hba_count] = hba;
+	hba->hba_num = pci_hba_count++;
+}
+
+subsys_initcall(pcibios_init);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_chassis.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_chassis.c
new file mode 100644
index 0000000..3e04242
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_chassis.c
@@ -0,0 +1,302 @@
+/* 
+ *    interfaces to Chassis Codes via PDC (firmware)
+ *
+ *    Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
+ *    Copyright (C) 2002-2006 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License, version 2, as
+ *    published by the Free Software Foundation.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ *    TODO: poll chassis warns, trigger (configurable) machine shutdown when
+ *    		needed.
+ *    	    Find out how to get Chassis warnings out of PAT boxes?
+ */
+
+#undef PDC_CHASSIS_DEBUG
+#ifdef PDC_CHASSIS_DEBUG
+#define DPRINTK(fmt, args...)	printk(fmt, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/cache.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/pdc_chassis.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+
+#define PDC_CHASSIS_VER	"0.05"
+
+#ifdef CONFIG_PDC_CHASSIS
+static unsigned int pdc_chassis_enabled __read_mostly = 1;
+
+
+/**
+ * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
+ * @str configuration param: 0 to disable chassis log
+ * @return 1
+ */
+ 
+static int __init pdc_chassis_setup(char *str)
+{
+	/*panic_timeout = simple_strtoul(str, NULL, 0);*/
+	get_option(&str, &pdc_chassis_enabled);
+	return 1;
+}
+__setup("pdcchassis=", pdc_chassis_setup);
+
+
+/** 
+ * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
+ * @pdc_chassis_old: 1 if old pdc chassis style
+ * 
+ * Currently, only E class and A180 are known to work with this.
+ * Inspired by Christoph Plattner
+ */
+#if 0
+static void __init pdc_chassis_checkold(void)
+{
+	switch(CPU_HVERSION) {
+		case 0x480:		/* E25 */
+		case 0x481:		/* E35 */
+		case 0x482:		/* E45 */
+		case 0x483:		/* E55 */
+		case 0x516:		/* A180 */
+			break;
+
+		default:
+			break;
+	}
+	DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
+}
+#endif
+
+/**
+ * pdc_chassis_panic_event() - Called by the panic handler.
+ *
+ * As soon as a panic occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_panic_event(struct notifier_block *this,
+		        unsigned long event, void *ptr)
+{
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+		return NOTIFY_DONE;
+}   
+
+
+static struct notifier_block pdc_chassis_panic_block = {
+	.notifier_call = pdc_chassis_panic_event,
+	.priority = INT_MAX,
+};
+
+
+/**
+ * parisc_reboot_event() - Called by the reboot handler.
+ *
+ * As soon as a reboot occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_reboot_event(struct notifier_block *this,
+		        unsigned long event, void *ptr)
+{
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+		return NOTIFY_DONE;
+}   
+
+
+static struct notifier_block pdc_chassis_reboot_block = {
+	.notifier_call = pdc_chassis_reboot_event,
+	.priority = INT_MAX,
+};
+#endif /* CONFIG_PDC_CHASSIS */
+
+
+/**
+ * parisc_pdc_chassis_init() - Called at boot time.
+ */
+
+void __init parisc_pdc_chassis_init(void)
+{
+#ifdef CONFIG_PDC_CHASSIS
+	if (likely(pdc_chassis_enabled)) {
+		DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
+
+		/* Let see if we have something to handle... */
+		printk(KERN_INFO "Enabling %s chassis codes support v%s\n",
+				is_pdc_pat() ? "PDC_PAT" : "regular",
+				PDC_CHASSIS_VER);
+
+		/* initialize panic notifier chain */
+		atomic_notifier_chain_register(&panic_notifier_list,
+				&pdc_chassis_panic_block);
+
+		/* initialize reboot notifier chain */
+		register_reboot_notifier(&pdc_chassis_reboot_block);
+	}
+#endif /* CONFIG_PDC_CHASSIS */
+}
+
+
+/** 
+ * pdc_chassis_send_status() - Sends a predefined message to the chassis,
+ * and changes the front panel LEDs according to the new system state
+ * @retval: PDC call return value.
+ *
+ * Only machines with 64 bits PDC PAT and those reported in
+ * pdc_chassis_checkold() are supported atm.
+ * 
+ * returns 0 if no error, -1 if no supported PDC is present or invalid message,
+ * else returns the appropriate PDC error code.
+ * 
+ * For a list of predefined messages, see asm-parisc/pdc_chassis.h
+ */
+
+int pdc_chassis_send_status(int message)
+{
+	/* Maybe we should do that in an other way ? */
+	int retval = 0;
+#ifdef CONFIG_PDC_CHASSIS
+	if (likely(pdc_chassis_enabled)) {
+
+		DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
+
+#ifdef CONFIG_64BIT
+		if (is_pdc_pat()) {
+			switch(message) {
+				case PDC_CHASSIS_DIRECT_BSTART:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+					break;
+
+				case PDC_CHASSIS_DIRECT_BCOMPLETE:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+					break;
+
+				case PDC_CHASSIS_DIRECT_SHUTDOWN:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
+					break;
+
+				case PDC_CHASSIS_DIRECT_PANIC:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
+					break;
+
+				case PDC_CHASSIS_DIRECT_LPMC:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
+					break;
+
+				case PDC_CHASSIS_DIRECT_HPMC:
+					retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
+					break;
+
+				default:
+					retval = -1;
+			}
+		} else retval = -1;
+#else
+		if (1) {
+			switch (message) {
+				case PDC_CHASSIS_DIRECT_BSTART:
+					retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_INIT));
+					break;
+
+				case PDC_CHASSIS_DIRECT_BCOMPLETE:
+					retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
+					break;
+
+				case PDC_CHASSIS_DIRECT_SHUTDOWN:
+					retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
+					break;
+
+				case PDC_CHASSIS_DIRECT_HPMC:
+				case PDC_CHASSIS_DIRECT_PANIC:
+					retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
+					break;
+
+				case PDC_CHASSIS_DIRECT_LPMC:
+					retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
+					break;
+
+				default:
+					retval = -1;
+			}
+		} else retval = -1;
+#endif /* CONFIG_64BIT */
+	}	/* if (pdc_chassis_enabled) */
+#endif /* CONFIG_PDC_CHASSIS */
+	return retval;
+}
+
+#ifdef CONFIG_PDC_CHASSIS_WARN
+#ifdef CONFIG_PROC_FS
+static int pdc_chassis_warn_show(struct seq_file *m, void *v)
+{
+	unsigned long warn;
+	u32 warnreg;
+
+	if (pdc_chassis_warn(&warn) != PDC_OK)
+		return -EIO;
+
+	warnreg = (warn & 0xFFFFFFFF);
+
+	if ((warnreg >> 24) & 0xFF)
+		seq_printf(m, "Chassis component failure! (eg fan or PSU): 0x%.2x\n",
+			   (warnreg >> 24) & 0xFF);
+
+	seq_printf(m, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
+	seq_printf(m, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
+	seq_printf(m, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
+	return 0;
+}
+
+static int pdc_chassis_warn_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pdc_chassis_warn_show, NULL);
+}
+
+static const struct file_operations pdc_chassis_warn_fops = {
+	.open		= pdc_chassis_warn_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init pdc_chassis_create_procfs(void)
+{
+	unsigned long test;
+	int ret;
+
+	ret = pdc_chassis_warn(&test);
+	if ((ret == PDC_BAD_PROC) || (ret == PDC_BAD_OPTION)) {
+		/* seems that some boxes (eg L1000) do not implement this */
+		printk(KERN_INFO "Chassis warnings not supported.\n");
+		return 0;
+	}
+
+	printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n",
+			PDC_CHASSIS_VER);
+	proc_create("chassis", 0400, NULL, &pdc_chassis_warn_fops);
+	return 0;
+}
+
+__initcall(pdc_chassis_create_procfs);
+
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_PDC_CHASSIS_WARN */
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_cons.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_cons.c
new file mode 100644
index 0000000..10a5ae9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pdc_cons.c
@@ -0,0 +1,281 @@
+/* 
+ *    PDC Console support - ie use firmware to dump text via boot console
+ *
+ *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ *    Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ *    Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ *    Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
+ *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ *  The PDC console is a simple console, which can be used for debugging 
+ *  boot related problems on HP PA-RISC machines. It is also useful when no
+ *  other console works.
+ *
+ *  This code uses the ROM (=PDC) based functions to read and write characters
+ *  from and to PDC's boot path.
+ */
+
+/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems. 
+ * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
+#define EARLY_BOOTUP_DEBUG
+
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/major.h>
+#include <linux/tty.h>
+#include <asm/page.h>		/* for PAGE0 */
+#include <asm/pdc.h>		/* for iodc_call() proto and friends */
+
+static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
+
+static void pdc_console_write(struct console *co, const char *s, unsigned count)
+{
+	int i = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_console_lock, flags);
+	do {
+		i += pdc_iodc_print(s + i, count - i);
+	} while (i < count);
+	spin_unlock_irqrestore(&pdc_console_lock, flags);
+}
+
+int pdc_console_poll_key(struct console *co)
+{
+	int c;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_console_lock, flags);
+	c = pdc_iodc_getc();
+	spin_unlock_irqrestore(&pdc_console_lock, flags);
+
+	return c;
+}
+
+static int pdc_console_setup(struct console *co, char *options)
+{
+	return 0;
+}
+
+#if defined(CONFIG_PDC_CONSOLE)
+#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static void pdc_console_poll(unsigned long unused);
+static DEFINE_TIMER(pdc_console_timer, pdc_console_poll, 0, 0);
+static struct tty_port tty_port;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	tty_port_tty_set(&tty_port, tty);
+	mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+	return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	if (tty->count == 1) {
+		del_timer_sync(&pdc_console_timer);
+		tty_port_tty_set(&tty_port, NULL);
+	}
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	pdc_console_write(NULL, buf, count);
+	return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+	return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	return 0; /* no buffer */
+}
+
+static const struct tty_operations pdc_console_tty_ops = {
+	.open = pdc_console_tty_open,
+	.close = pdc_console_tty_close,
+	.write = pdc_console_tty_write,
+	.write_room = pdc_console_tty_write_room,
+	.chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+	int data, count = 0;
+
+	while (1) {
+		data = pdc_console_poll_key(NULL);
+		if (data == -1)
+			break;
+		tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL);
+		count ++;
+	}
+
+	if (count)
+		tty_flip_buffer_push(&tty_port);
+
+	if (pdc_cons.flags & CON_ENABLED)
+		mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static int __init pdc_console_tty_driver_init(void)
+{
+	int err;
+
+	/* Check if the console driver is still registered.
+	 * It is unregistered if the pdc console was not selected as the
+	 * primary console. */
+
+	struct console *tmp;
+
+	console_lock();
+	for_each_console(tmp)
+		if (tmp == &pdc_cons)
+			break;
+	console_unlock();
+
+	if (!tmp) {
+		printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+		return -ENODEV;
+	}
+
+	printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+	pdc_cons.flags &= ~CON_BOOT;
+
+	pdc_console_tty_driver = alloc_tty_driver(1);
+
+	if (!pdc_console_tty_driver)
+		return -ENOMEM;
+
+	tty_port_init(&tty_port);
+
+	pdc_console_tty_driver->driver_name = "pdc_cons";
+	pdc_console_tty_driver->name = "ttyB";
+	pdc_console_tty_driver->major = MUX_MAJOR;
+	pdc_console_tty_driver->minor_start = 0;
+	pdc_console_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+	pdc_console_tty_driver->init_termios = tty_std_termios;
+	pdc_console_tty_driver->flags = TTY_DRIVER_REAL_RAW |
+		TTY_DRIVER_RESET_TERMIOS;
+	tty_set_operations(pdc_console_tty_driver, &pdc_console_tty_ops);
+	tty_port_link_device(&tty_port, pdc_console_tty_driver, 0);
+
+	err = tty_register_driver(pdc_console_tty_driver);
+	if (err) {
+		printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+		tty_port_destroy(&tty_port);
+		return err;
+	}
+
+	return 0;
+}
+device_initcall(pdc_console_tty_driver_init);
+
+static struct tty_driver * pdc_console_device (struct console *c, int *index)
+{
+	*index = c->index;
+	return pdc_console_tty_driver;
+}
+#else
+#define pdc_console_device NULL
+#endif
+
+static struct console pdc_cons = {
+	.name =		"ttyB",
+	.write =	pdc_console_write,
+	.device =	pdc_console_device,
+	.setup =	pdc_console_setup,
+	.flags =	CON_BOOT | CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+static int pdc_console_initialized;
+
+static void pdc_console_init_force(void)
+{
+	if (pdc_console_initialized)
+		return;
+	++pdc_console_initialized;
+	
+	/* If the console is duplex then copy the COUT parameters to CIN. */
+	if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
+		memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
+
+	/* register the pdc console */
+	register_console(&pdc_cons);
+}
+
+void __init pdc_console_init(void)
+{
+#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
+	pdc_console_init_force();
+#endif
+#ifdef EARLY_BOOTUP_DEBUG
+	printk(KERN_INFO "Initialized PDC Console for debugging.\n");
+#endif
+}
+
+
+/*
+ * Used for emergencies. Currently only used if an HPMC occurs. If an
+ * HPMC occurs, it is possible that the current console may not be
+ * properly initialised after the PDC IO reset. This routine unregisters
+ * all of the current consoles, reinitializes the pdc console and
+ * registers it.
+ */
+
+void pdc_console_restart(void)
+{
+	struct console *console;
+
+	if (pdc_console_initialized)
+		return;
+
+	/* If we've already seen the output, don't bother to print it again */
+	if (console_drivers != NULL)
+		pdc_cons.flags &= ~CON_PRINTBUFFER;
+
+	while ((console = console_drivers) != NULL)
+		unregister_console(console_drivers);
+
+	/* force registering the pdc console */
+	pdc_console_init_force();
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/pdt.c b/src/kernel/linux/v4.14/arch/parisc/kernel/pdt.c
new file mode 100644
index 0000000..e07eb34
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/pdt.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *    Page Deallocation Table (PDT) support
+ *
+ *    The Page Deallocation Table (PDT) is maintained by firmware and holds a
+ *    list of memory addresses in which memory errors were detected.
+ *    The list contains both single-bit (correctable) and double-bit
+ *    (uncorrectable) errors.
+ *
+ *    Copyright 2017 by Helge Deller <deller@gmx.de>
+ *
+ *    possible future enhancements:
+ *    - add userspace interface via procfs or sysfs to clear PDT
+ */
+
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/kthread.h>
+#include <linux/initrd.h>
+
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+
+enum pdt_access_type {
+	PDT_NONE,
+	PDT_PDC,
+	PDT_PAT_NEW,
+	PDT_PAT_CELL
+};
+
+static enum pdt_access_type pdt_type;
+
+/* PDT poll interval: 1 minute if errors, 5 minutes if everything OK. */
+#define PDT_POLL_INTERVAL_DEFAULT	(5*60*HZ)
+#define PDT_POLL_INTERVAL_SHORT		(1*60*HZ)
+static unsigned long pdt_poll_interval = PDT_POLL_INTERVAL_DEFAULT;
+
+/* global PDT status information */
+static struct pdc_mem_retinfo pdt_status;
+
+#define MAX_PDT_TABLE_SIZE	PAGE_SIZE
+#define MAX_PDT_ENTRIES		(MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
+static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
+
+/*
+ * Constants for the pdt_entry format:
+ * A pdt_entry holds the physical address in bits 0-57, bits 58-61 are
+ * reserved, bit 62 is the perm bit and bit 63 is the error_type bit.
+ * The perm bit indicates whether the error have been verified as a permanent
+ * error (value of 1) or has not been verified, and may be transient (value
+ * of 0). The error_type bit indicates whether the error is a single bit error
+ * (value of 1) or a multiple bit error.
+ * On non-PAT machines phys_addr is encoded in bits 0-59 and error_type in bit
+ * 63. Those machines don't provide the perm bit.
+ */
+
+#define PDT_ADDR_PHYS_MASK	(pdt_type != PDT_PDC ? ~0x3f : ~0x0f)
+#define PDT_ADDR_PERM_ERR	(pdt_type != PDT_PDC ? 2UL : 0UL)
+#define PDT_ADDR_SINGLE_ERR	1UL
+
+/* report PDT entries via /proc/meminfo */
+void arch_report_meminfo(struct seq_file *m)
+{
+	if (pdt_type == PDT_NONE)
+		return;
+
+	seq_printf(m, "PDT_max_entries: %7lu\n",
+			pdt_status.pdt_size);
+	seq_printf(m, "PDT_cur_entries: %7lu\n",
+			pdt_status.pdt_entries);
+}
+
+static int get_info_pat_new(void)
+{
+	struct pdc_pat_mem_retinfo pat_rinfo;
+	int ret;
+
+	/* newer PAT machines like C8000 report info for all cells */
+	if (is_pdc_pat())
+		ret = pdc_pat_mem_pdt_info(&pat_rinfo);
+	else
+		return PDC_BAD_PROC;
+
+	pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
+	pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
+	pdt_status.pdt_status = 0;
+	pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
+	pdt_status.good_mem = pat_rinfo.good_mem;
+
+	return ret;
+}
+
+static int get_info_pat_cell(void)
+{
+	struct pdc_pat_mem_cell_pdt_retinfo cell_rinfo;
+	int ret;
+
+	/* older PAT machines like rp5470 report cell info only */
+	if (is_pdc_pat())
+		ret = pdc_pat_mem_pdt_cell_info(&cell_rinfo, parisc_cell_num);
+	else
+		return PDC_BAD_PROC;
+
+	pdt_status.pdt_size = cell_rinfo.max_pdt_entries;
+	pdt_status.pdt_entries = cell_rinfo.current_pdt_entries;
+	pdt_status.pdt_status = 0;
+	pdt_status.first_dbe_loc = cell_rinfo.first_dbe_loc;
+	pdt_status.good_mem = cell_rinfo.good_mem;
+
+	return ret;
+}
+
+static void report_mem_err(unsigned long pde)
+{
+	struct pdc_pat_mem_phys_mem_location loc;
+	unsigned long addr;
+	char dimm_txt[32];
+
+	addr = pde & PDT_ADDR_PHYS_MASK;
+
+	/* show DIMM slot description on PAT machines */
+	if (is_pdc_pat()) {
+		pdc_pat_mem_get_dimm_phys_location(&loc, addr);
+		sprintf(dimm_txt, "DIMM slot %02x, ", loc.dimm_slot);
+	} else
+		dimm_txt[0] = 0;
+
+	pr_warn("PDT: BAD MEMORY at 0x%08lx, %s%s%s-bit error.\n",
+		addr, dimm_txt,
+		pde & PDT_ADDR_PERM_ERR ? "permanent ":"",
+		pde & PDT_ADDR_SINGLE_ERR ? "single":"multi");
+}
+
+
+/*
+ * pdc_pdt_init()
+ *
+ * Initialize kernel PDT structures, read initial PDT table from firmware,
+ * report all current PDT entries and mark bad memory with memblock_reserve()
+ * to avoid that the kernel will use broken memory areas.
+ *
+ */
+void __init pdc_pdt_init(void)
+{
+	int ret, i;
+	unsigned long entries;
+	struct pdc_mem_read_pdt pdt_read_ret;
+
+	pdt_type = PDT_PAT_NEW;
+	ret = get_info_pat_new();
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_PAT_CELL;
+		ret = get_info_pat_cell();
+	}
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_PDC;
+		/* non-PAT machines provide the standard PDC call */
+		ret = pdc_mem_pdt_info(&pdt_status);
+	}
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_NONE;
+		pr_info("PDT: Firmware does not provide any page deallocation"
+			" information.\n");
+		return;
+	}
+
+	entries = pdt_status.pdt_entries;
+	if (WARN_ON(entries > MAX_PDT_ENTRIES))
+		entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES;
+
+	pr_info("PDT: type %s, size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
+		" good_mem %lu MB\n",
+			pdt_type == PDT_PDC ? __stringify(PDT_PDC) :
+			pdt_type == PDT_PAT_CELL ? __stringify(PDT_PAT_CELL)
+						 : __stringify(PDT_PAT_NEW),
+			pdt_status.pdt_size, pdt_status.pdt_entries,
+			pdt_status.pdt_status, pdt_status.first_dbe_loc,
+			pdt_status.good_mem / 1024 / 1024);
+
+	if (entries == 0) {
+		pr_info("PDT: Firmware reports all memory OK.\n");
+		return;
+	}
+
+	if (pdt_status.first_dbe_loc &&
+		pdt_status.first_dbe_loc <= __pa((unsigned long)&_end))
+		pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n");
+
+	pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n",
+		entries);
+
+	if (pdt_type == PDT_PDC)
+		ret = pdc_mem_pdt_read_entries(&pdt_read_ret, pdt_entry);
+	else {
+#ifdef CONFIG_64BIT
+		struct pdc_pat_mem_read_pd_retinfo pat_pret;
+
+		if (pdt_type == PDT_PAT_CELL)
+			ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
+				MAX_PDT_ENTRIES);
+		else
+			ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
+				MAX_PDT_TABLE_SIZE, 0);
+#else
+		ret = PDC_BAD_PROC;
+#endif
+	}
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_NONE;
+		pr_warn("PDT: Get PDT entries failed with %d\n", ret);
+		return;
+	}
+
+	for (i = 0; i < pdt_status.pdt_entries; i++) {
+		unsigned long addr;
+
+		report_mem_err(pdt_entry[i]);
+
+		addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK;
+		if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+			addr >= initrd_start && addr < initrd_end)
+			pr_crit("CRITICAL: initrd possibly broken "
+				"due to bad memory!\n");
+
+		/* mark memory page bad */
+		memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
+	}
+}
+
+
+/*
+ * This is the PDT kernel thread main loop.
+ */
+
+static int pdt_mainloop(void *unused)
+{
+	struct pdc_mem_read_pdt pdt_read_ret;
+	struct pdc_pat_mem_read_pd_retinfo pat_pret __maybe_unused;
+	unsigned long old_num_entries;
+	unsigned long *bad_mem_ptr;
+	int num, ret;
+
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		old_num_entries = pdt_status.pdt_entries;
+
+		schedule_timeout(pdt_poll_interval);
+		if (kthread_should_stop())
+			break;
+
+		/* Do we have new PDT entries? */
+		switch (pdt_type) {
+		case PDT_PAT_NEW:
+			ret = get_info_pat_new();
+			break;
+		case PDT_PAT_CELL:
+			ret = get_info_pat_cell();
+			break;
+		default:
+			ret = pdc_mem_pdt_info(&pdt_status);
+			break;
+		}
+
+		if (ret != PDC_OK) {
+			pr_warn("PDT: unexpected failure %d\n", ret);
+			return -EINVAL;
+		}
+
+		/* if no new PDT entries, just wait again */
+		num = pdt_status.pdt_entries - old_num_entries;
+		if (num <= 0)
+			continue;
+
+		/* decrease poll interval in case we found memory errors */
+		if (pdt_status.pdt_entries &&
+			pdt_poll_interval == PDT_POLL_INTERVAL_DEFAULT)
+			pdt_poll_interval = PDT_POLL_INTERVAL_SHORT;
+
+		/* limit entries to get */
+		if (num > MAX_PDT_ENTRIES) {
+			num = MAX_PDT_ENTRIES;
+			pdt_status.pdt_entries = old_num_entries + num;
+		}
+
+		/* get new entries */
+		switch (pdt_type) {
+#ifdef CONFIG_64BIT
+		case PDT_PAT_CELL:
+			if (pdt_status.pdt_entries > MAX_PDT_ENTRIES) {
+				pr_crit("PDT: too many entries.\n");
+				return -ENOMEM;
+			}
+			ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
+				MAX_PDT_ENTRIES);
+			bad_mem_ptr = &pdt_entry[old_num_entries];
+			break;
+		case PDT_PAT_NEW:
+			ret = pdc_pat_mem_read_pd_pdt(&pat_pret,
+				pdt_entry,
+				num * sizeof(unsigned long),
+				old_num_entries * sizeof(unsigned long));
+			bad_mem_ptr = &pdt_entry[0];
+			break;
+#endif
+		default:
+			ret = pdc_mem_pdt_read_entries(&pdt_read_ret,
+				pdt_entry);
+			bad_mem_ptr = &pdt_entry[old_num_entries];
+			break;
+		}
+
+		/* report and mark memory broken */
+		while (num--) {
+			unsigned long pde = *bad_mem_ptr++;
+
+			report_mem_err(pde);
+
+#ifdef CONFIG_MEMORY_FAILURE
+			if ((pde & PDT_ADDR_PERM_ERR) ||
+			    ((pde & PDT_ADDR_SINGLE_ERR) == 0))
+				memory_failure(pde >> PAGE_SHIFT, 0, 0);
+			else
+				soft_offline_page(
+					pfn_to_page(pde >> PAGE_SHIFT), 0);
+#else
+			pr_crit("PDT: memory error at 0x%lx ignored.\n"
+				"Rebuild kernel with CONFIG_MEMORY_FAILURE=y "
+				"for real handling.\n",
+				pde & PDT_ADDR_PHYS_MASK);
+#endif
+
+		}
+	}
+
+	return 0;
+}
+
+
+static int __init pdt_initcall(void)
+{
+	struct task_struct *kpdtd_task;
+
+	if (pdt_type == PDT_NONE)
+		return -ENODEV;
+
+	kpdtd_task = kthread_create(pdt_mainloop, NULL, "kpdtd");
+	if (IS_ERR(kpdtd_task))
+		return PTR_ERR(kpdtd_task);
+
+	wake_up_process(kpdtd_task);
+
+	return 0;
+}
+
+late_initcall(pdt_initcall);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/perf.c b/src/kernel/linux/v4.14/arch/parisc/kernel/perf.c
new file mode 100644
index 0000000..0813359
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/perf.c
@@ -0,0 +1,851 @@
+/*
+ *  Parisc performance counters
+ *  Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ *
+ *  This code is derived, with permission, from HP/UX sources.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ *  Edited comment from original sources:
+ *
+ *  This driver programs the PCX-U/PCX-W performance counters
+ *  on the PA-RISC 2.0 chips.  The driver keeps all images now
+ *  internally to the kernel to hopefully eliminate the possibility
+ *  of a bad image halting the CPU.  Also, there are different
+ *  images for the PCX-W and later chips vs the PCX-U chips.
+ *
+ *  Only 1 process is allowed to access the driver at any time,
+ *  so the only protection that is needed is at open and close.
+ *  A variable "perf_enabled" is used to hold the state of the
+ *  driver.  The spinlock "perf_lock" is used to protect the
+ *  modification of the state during open/close operations so
+ *  multiple processes don't get into the driver simultaneously.
+ *
+ *  This driver accesses the processor directly vs going through
+ *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
+ *  in various PDC revisions.  The code is much more maintainable
+ *  and reliable this way vs having to debug on every version of PDC
+ *  on every box.
+ */
+
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include <linux/uaccess.h>
+#include <asm/perf.h>
+#include <asm/parisc-device.h>
+#include <asm/processor.h>
+#include <asm/runway.h>
+#include <asm/io.h>		/* for __raw_read() */
+
+#include "perf_images.h"
+
+#define MAX_RDR_WORDS	24
+#define PERF_VERSION	2	/* derived from hpux's PI v2 interface */
+
+/* definition of RDR regs */
+struct rdr_tbl_ent {
+	uint16_t	width;
+	uint8_t		num_words;
+	uint8_t		write_control;
+};
+
+static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
+static int perf_enabled __read_mostly;
+static DEFINE_SPINLOCK(perf_lock);
+struct parisc_device *cpu_device __read_mostly;
+
+/* RDRs to write for PCX-W */
+static const int perf_rdrs_W[] =
+	{ 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDRs to write for PCX-U */
+static const int perf_rdrs_U[] =
+	{ 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDR register descriptions for PCX-W */
+static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
+	{ 19,	1,	8 },   /* RDR 0 */
+	{ 16,	1,	16 },  /* RDR 1 */
+	{ 72,	2,	0 },   /* RDR 2 */
+	{ 81,	2,	0 },   /* RDR 3 */
+	{ 328,	6,	0 },   /* RDR 4 */
+	{ 160,	3,	0 },   /* RDR 5 */
+	{ 336,	6,	0 },   /* RDR 6 */
+	{ 164,	3,	0 },   /* RDR 7 */
+	{ 0,	0,	0 },   /* RDR 8 */
+	{ 35,	1,	0 },   /* RDR 9 */
+	{ 6,	1,	0 },   /* RDR 10 */
+	{ 18,	1,	0 },   /* RDR 11 */
+	{ 13,	1,	0 },   /* RDR 12 */
+	{ 8,	1,	0 },   /* RDR 13 */
+	{ 8,	1,	0 },   /* RDR 14 */
+	{ 8,	1,	0 },   /* RDR 15 */
+	{ 1530,	24,	0 },   /* RDR 16 */
+	{ 16,	1,	0 },   /* RDR 17 */
+	{ 4,	1,	0 },   /* RDR 18 */
+	{ 0,	0,	0 },   /* RDR 19 */
+	{ 152,	3,	24 },  /* RDR 20 */
+	{ 152,	3,	24 },  /* RDR 21 */
+	{ 233,	4,	48 },  /* RDR 22 */
+	{ 233,	4,	48 },  /* RDR 23 */
+	{ 71,	2,	0 },   /* RDR 24 */
+	{ 71,	2,	0 },   /* RDR 25 */
+	{ 11,	1,	0 },   /* RDR 26 */
+	{ 18,	1,	0 },   /* RDR 27 */
+	{ 128,	2,	0 },   /* RDR 28 */
+	{ 0,	0,	0 },   /* RDR 29 */
+	{ 16,	1,	0 },   /* RDR 30 */
+	{ 16,	1,	0 },   /* RDR 31 */
+};
+
+/* RDR register descriptions for PCX-U */
+static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
+	{ 19,	1,	8 },              /* RDR 0 */
+	{ 32,	1,	16 },             /* RDR 1 */
+	{ 20,	1,	0 },              /* RDR 2 */
+	{ 0,	0,	0 },              /* RDR 3 */
+	{ 344,	6,	0 },              /* RDR 4 */
+	{ 176,	3,	0 },              /* RDR 5 */
+	{ 336,	6,	0 },              /* RDR 6 */
+	{ 0,	0,	0 },              /* RDR 7 */
+	{ 0,	0,	0 },              /* RDR 8 */
+	{ 0,	0,	0 },              /* RDR 9 */
+	{ 28,	1,	0 },              /* RDR 10 */
+	{ 33,	1,	0 },              /* RDR 11 */
+	{ 0,	0,	0 },              /* RDR 12 */
+	{ 230,	4,	0 },              /* RDR 13 */
+	{ 32,	1,	0 },              /* RDR 14 */
+	{ 128,	2,	0 },              /* RDR 15 */
+	{ 1494,	24,	0 },              /* RDR 16 */
+	{ 18,	1,	0 },              /* RDR 17 */
+	{ 4,	1,	0 },              /* RDR 18 */
+	{ 0,	0,	0 },              /* RDR 19 */
+	{ 158,	3,	24 },             /* RDR 20 */
+	{ 158,	3,	24 },             /* RDR 21 */
+	{ 194,	4,	48 },             /* RDR 22 */
+	{ 194,	4,	48 },             /* RDR 23 */
+	{ 71,	2,	0 },              /* RDR 24 */
+	{ 71,	2,	0 },              /* RDR 25 */
+	{ 28,	1,	0 },              /* RDR 26 */
+	{ 33,	1,	0 },              /* RDR 27 */
+	{ 88,	2,	0 },              /* RDR 28 */
+	{ 32,	1,	0 },              /* RDR 29 */
+	{ 24,	1,	0 },              /* RDR 30 */
+	{ 16,	1,	0 },              /* RDR 31 */
+};
+
+/*
+ * A non-zero write_control in the above tables is a byte offset into
+ * this array.
+ */
+static const uint64_t perf_bitmasks[] = {
+	0x0000000000000000ul,     /* first dbl word must be zero */
+	0xfdffe00000000000ul,     /* RDR0 bitmask */
+	0x003f000000000000ul,     /* RDR1 bitmask */
+	0x00fffffffffffffful,     /* RDR20-RDR21 bitmask (152 bits) */
+	0xfffffffffffffffful,
+	0xfffffffc00000000ul,
+	0xfffffffffffffffful,     /* RDR22-RDR23 bitmask (233 bits) */
+	0xfffffffffffffffful,
+	0xfffffffffffffffcul,
+	0xff00000000000000ul
+};
+
+/*
+ * Write control bitmasks for Pa-8700 processor given
+ * some things have changed slightly.
+ */
+static const uint64_t perf_bitmasks_piranha[] = {
+	0x0000000000000000ul,     /* first dbl word must be zero */
+	0xfdffe00000000000ul,     /* RDR0 bitmask */
+	0x003f000000000000ul,     /* RDR1 bitmask */
+	0x00fffffffffffffful,     /* RDR20-RDR21 bitmask (158 bits) */
+	0xfffffffffffffffful,
+	0xfffffffc00000000ul,
+	0xfffffffffffffffful,     /* RDR22-RDR23 bitmask (210 bits) */
+	0xfffffffffffffffful,
+	0xfffffffffffffffful,
+	0xfffc000000000000ul
+};
+
+static const uint64_t *bitmask_array;   /* array of bitmasks to use */
+
+/******************************************************************************
+ * Function Prototypes
+ *****************************************************************************/
+static int perf_config(uint32_t *image_ptr);
+static int perf_release(struct inode *inode, struct file *file);
+static int perf_open(struct inode *inode, struct file *file);
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos);
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+static void perf_start_counters(void);
+static int perf_stop_counters(uint32_t *raddr);
+static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
+static int perf_rdr_read_ubuf(uint32_t	rdr_num, uint64_t *buffer);
+static int perf_rdr_clear(uint32_t rdr_num);
+static int perf_write_image(uint64_t *memaddr);
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
+
+/* External Assembly Routines */
+extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
+extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
+extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
+extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
+extern void perf_intrigue_enable_perf_counters (void);
+extern void perf_intrigue_disable_perf_counters (void);
+
+/******************************************************************************
+ * Function Definitions
+ *****************************************************************************/
+
+
+/*
+ * configure:
+ *
+ * Configure the cpu with a given data image.  First turn off the counters,
+ * then download the image, then turn the counters back on.
+ */
+static int perf_config(uint32_t *image_ptr)
+{
+	long error;
+	uint32_t raddr[4];
+
+	/* Stop the counters*/
+	error = perf_stop_counters(raddr);
+	if (error != 0) {
+		printk("perf_config: perf_stop_counters = %ld\n", error);
+		return -EINVAL;
+	}
+
+printk("Preparing to write image\n");
+	/* Write the image to the chip */
+	error = perf_write_image((uint64_t *)image_ptr);
+	if (error != 0) {
+		printk("perf_config: DOWNLOAD = %ld\n", error);
+		return -EINVAL;
+	}
+
+printk("Preparing to start counters\n");
+
+	/* Start the counters */
+	perf_start_counters();
+
+	return sizeof(uint32_t);
+}
+
+/*
+ * Open the device and initialize all of its memory.  The device is only
+ * opened once, but can be "queried" by multiple processes that know its
+ * file descriptor.
+ */
+static int perf_open(struct inode *inode, struct file *file)
+{
+	spin_lock(&perf_lock);
+	if (perf_enabled) {
+		spin_unlock(&perf_lock);
+		return -EBUSY;
+	}
+	perf_enabled = 1;
+ 	spin_unlock(&perf_lock);
+
+	return 0;
+}
+
+/*
+ * Close the device.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+	spin_lock(&perf_lock);
+	perf_enabled = 0;
+	spin_unlock(&perf_lock);
+
+	return 0;
+}
+
+/*
+ * Read does nothing for this driver
+ */
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
+{
+	return 0;
+}
+
+/*
+ * write:
+ *
+ * This routine downloads the image to the chip.  It must be
+ * called on the processor that the download should happen
+ * on.
+ */
+static ssize_t perf_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	size_t image_size;
+	uint32_t image_type;
+	uint32_t interface_type;
+	uint32_t test;
+
+	if (perf_processor_interface == ONYX_INTF)
+		image_size = PCXU_IMAGE_SIZE;
+	else if (perf_processor_interface == CUDA_INTF)
+		image_size = PCXW_IMAGE_SIZE;
+	else
+		return -EFAULT;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (count != sizeof(uint32_t))
+		return -EIO;
+
+	if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
+		return -EFAULT;
+
+	/* Get the interface type and test type */
+   	interface_type = (image_type >> 16) & 0xffff;
+	test           = (image_type & 0xffff);
+
+	/* Make sure everything makes sense */
+
+	/* First check the machine type is correct for
+	   the requested image */
+	if (((perf_processor_interface == CUDA_INTF) &&
+			(interface_type != CUDA_INTF)) ||
+		((perf_processor_interface == ONYX_INTF) &&
+			(interface_type != ONYX_INTF)))
+		return -EINVAL;
+
+	/* Next check to make sure the requested image
+	   is valid */
+	if (((interface_type == CUDA_INTF) &&
+		       (test >= MAX_CUDA_IMAGES)) ||
+	    ((interface_type == ONYX_INTF) &&
+		       (test >= MAX_ONYX_IMAGES)))
+		return -EINVAL;
+
+	/* Copy the image into the processor */
+	if (interface_type == CUDA_INTF)
+		return perf_config(cuda_images[test]);
+	else
+		return perf_config(onyx_images[test]);
+
+	return count;
+}
+
+/*
+ * Patch the images that need to know the IVA addresses.
+ */
+static void perf_patch_images(void)
+{
+#if 0 /* FIXME!! */
+/*
+ * NOTE:  this routine is VERY specific to the current TLB image.
+ * If the image is changed, this routine might also need to be changed.
+ */
+	extern void $i_itlb_miss_2_0();
+	extern void $i_dtlb_miss_2_0();
+	extern void PA2_0_iva();
+
+	/*
+	 * We can only use the lower 32-bits, the upper 32-bits should be 0
+	 * anyway given this is in the kernel
+	 */
+	uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
+	uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
+	uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
+
+	if (perf_processor_interface == ONYX_INTF) {
+		/* clear last 2 bytes */
+		onyx_images[TLBMISS][15] &= 0xffffff00;
+		/* set 2 bytes */
+		onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+		onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
+		onyx_images[TLBMISS][17] = itlb_addr;
+
+		/* clear last 2 bytes */
+		onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+		/* set 2 bytes */
+		onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+		onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
+		onyx_images[TLBHANDMISS][17] = itlb_addr;
+
+		/* clear last 2 bytes */
+		onyx_images[BIG_CPI][15] &= 0xffffff00;
+		/* set 2 bytes */
+		onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
+		onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
+		onyx_images[BIG_CPI][17] = itlb_addr;
+
+	    onyx_images[PANIC][15] &= 0xffffff00;  /* clear last 2 bytes */
+	 	onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
+		onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
+
+
+	} else if (perf_processor_interface == CUDA_INTF) {
+		/* Cuda interface */
+		cuda_images[TLBMISS][16] =
+			(cuda_images[TLBMISS][16]&0xffff0000) |
+			((dtlb_addr >> 8)&0x0000ffff);
+		cuda_images[TLBMISS][17] =
+			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+		cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+		cuda_images[TLBHANDMISS][16] =
+			(cuda_images[TLBHANDMISS][16]&0xffff0000) |
+			((dtlb_addr >> 8)&0x0000ffff);
+		cuda_images[TLBHANDMISS][17] =
+			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+		cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+		cuda_images[BIG_CPI][16] =
+			(cuda_images[BIG_CPI][16]&0xffff0000) |
+			((dtlb_addr >> 8)&0x0000ffff);
+		cuda_images[BIG_CPI][17] =
+			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+		cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
+	} else {
+		/* Unknown type */
+	}
+#endif
+}
+
+
+/*
+ * ioctl routine
+ * All routines effect the processor that they are executed on.  Thus you
+ * must be running on the processor that you wish to change.
+ */
+
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long error_start;
+	uint32_t raddr[4];
+	int error = 0;
+
+	switch (cmd) {
+
+	    case PA_PERF_ON:
+			/* Start the counters */
+			perf_start_counters();
+			break;
+
+	    case PA_PERF_OFF:
+			error_start = perf_stop_counters(raddr);
+			if (error_start != 0) {
+				printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
+				error = -EFAULT;
+				break;
+			}
+
+			/* copy out the Counters */
+			if (copy_to_user((void __user *)arg, raddr,
+					sizeof (raddr)) != 0) {
+				error =  -EFAULT;
+				break;
+			}
+			break;
+
+	    case PA_PERF_VERSION:
+  	  		/* Return the version # */
+			error = put_user(PERF_VERSION, (int *)arg);
+			break;
+
+	    default:
+  	 		error = -ENOTTY;
+	}
+
+	return error;
+}
+
+static const struct file_operations perf_fops = {
+	.llseek = no_llseek,
+	.read = perf_read,
+	.write = perf_write,
+	.unlocked_ioctl = perf_ioctl,
+	.compat_ioctl = perf_ioctl,
+	.open = perf_open,
+	.release = perf_release
+};
+
+static struct miscdevice perf_dev = {
+	MISC_DYNAMIC_MINOR,
+	PA_PERF_DEV,
+	&perf_fops
+};
+
+/*
+ * Initialize the module
+ */
+static int __init perf_init(void)
+{
+	int ret;
+
+	/* Determine correct processor interface to use */
+	bitmask_array = perf_bitmasks;
+
+	if (boot_cpu_data.cpu_type == pcxu ||
+	    boot_cpu_data.cpu_type == pcxu_) {
+		perf_processor_interface = ONYX_INTF;
+	} else if (boot_cpu_data.cpu_type == pcxw ||
+		 boot_cpu_data.cpu_type == pcxw_ ||
+		 boot_cpu_data.cpu_type == pcxw2 ||
+		 boot_cpu_data.cpu_type == mako ||
+		 boot_cpu_data.cpu_type == mako2) {
+		perf_processor_interface = CUDA_INTF;
+		if (boot_cpu_data.cpu_type == pcxw2 ||
+		    boot_cpu_data.cpu_type == mako ||
+		    boot_cpu_data.cpu_type == mako2)
+			bitmask_array = perf_bitmasks_piranha;
+	} else {
+		perf_processor_interface = UNKNOWN_INTF;
+		printk("Performance monitoring counters not supported on this processor\n");
+		return -ENODEV;
+	}
+
+	ret = misc_register(&perf_dev);
+	if (ret) {
+		printk(KERN_ERR "Performance monitoring counters: "
+			"cannot register misc device.\n");
+		return ret;
+	}
+
+	/* Patch the images to match the system */
+    	perf_patch_images();
+
+	/* TODO: this only lets us access the first cpu.. what to do for SMP? */
+	cpu_device = per_cpu(cpu_data, 0).dev;
+	printk("Performance monitoring counters enabled for %s\n",
+		per_cpu(cpu_data, 0).dev->name);
+
+	return 0;
+}
+device_initcall(perf_init);
+
+/*
+ * perf_start_counters(void)
+ *
+ * Start the counters.
+ */
+static void perf_start_counters(void)
+{
+	/* Enable performance monitor counters */
+	perf_intrigue_enable_perf_counters();
+}
+
+/*
+ * perf_stop_counters
+ *
+ * Stop the performance counters and save counts
+ * in a per_processor array.
+ */
+static int perf_stop_counters(uint32_t *raddr)
+{
+	uint64_t userbuf[MAX_RDR_WORDS];
+
+	/* Disable performance counters */
+	perf_intrigue_disable_perf_counters();
+
+	if (perf_processor_interface == ONYX_INTF) {
+		uint64_t tmp64;
+		/*
+		 * Read the counters
+		 */
+		if (!perf_rdr_read_ubuf(16, userbuf))
+			return -13;
+
+		/* Counter0 is bits 1398 to 1429 */
+		tmp64 =  (userbuf[21] << 22) & 0x00000000ffc00000;
+		tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
+		/* OR sticky0 (bit 1430) to counter0 bit 32 */
+		tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
+		raddr[0] = (uint32_t)tmp64;
+
+		/* Counter1 is bits 1431 to 1462 */
+		tmp64 =  (userbuf[22] >> 9) & 0x00000000ffffffff;
+		/* OR sticky1 (bit 1463) to counter1 bit 32 */
+		tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
+		raddr[1] = (uint32_t)tmp64;
+
+		/* Counter2 is bits 1464 to 1495 */
+		tmp64 =  (userbuf[22] << 24) & 0x00000000ff000000;
+		tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
+		/* OR sticky2 (bit 1496) to counter2 bit 32 */
+		tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
+		raddr[2] = (uint32_t)tmp64;
+
+		/* Counter3 is bits 1497 to 1528 */
+		tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
+		/* OR sticky3 (bit 1529) to counter3 bit 32 */
+		tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
+		raddr[3] = (uint32_t)tmp64;
+
+		/*
+		 * Zero out the counters
+		 */
+
+		/*
+		 * The counters and sticky-bits comprise the last 132 bits
+		 * (1398 - 1529) of RDR16 on a U chip.  We'll zero these
+		 * out the easy way: zero out last 10 bits of dword 21,
+		 * all of dword 22 and 58 bits (plus 6 don't care bits) of
+		 * dword 23.
+		 */
+		userbuf[21] &= 0xfffffffffffffc00ul;	/* 0 to last 10 bits */
+		userbuf[22] = 0;
+		userbuf[23] = 0;
+
+		/*
+		 * Write back the zeroed bytes + the image given
+		 * the read was destructive.
+		 */
+		perf_rdr_write(16, userbuf);
+	} else {
+
+		/*
+		 * Read RDR-15 which contains the counters and sticky bits
+		 */
+		if (!perf_rdr_read_ubuf(15, userbuf)) {
+			return -13;
+		}
+
+		/*
+		 * Clear out the counters
+		 */
+		perf_rdr_clear(15);
+
+		/*
+		 * Copy the counters 
+		 */
+		raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
+		raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
+		raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
+		raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
+	}
+
+	return 0;
+}
+
+/*
+ * perf_rdr_get_entry
+ *
+ * Retrieve a pointer to the description of what this
+ * RDR contains.
+ */
+static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
+{
+	if (perf_processor_interface == ONYX_INTF) {
+		return &perf_rdr_tbl_U[rdr_num];
+	} else {
+		return &perf_rdr_tbl_W[rdr_num];
+	}
+}
+
+/*
+ * perf_rdr_read_ubuf
+ *
+ * Read the RDR value into the buffer specified.
+ */
+static int perf_rdr_read_ubuf(uint32_t	rdr_num, uint64_t *buffer)
+{
+	uint64_t	data, data_mask = 0;
+	uint32_t	width, xbits, i;
+	const struct rdr_tbl_ent *tentry;
+
+	tentry = perf_rdr_get_entry(rdr_num);
+	if ((width = tentry->width) == 0)
+		return 0;
+
+	/* Clear out buffer */
+	i = tentry->num_words;
+	while (i--) {
+		buffer[i] = 0;
+	}
+
+	/* Check for bits an even number of 64 */
+	if ((xbits = width & 0x03f) != 0) {
+		data_mask = 1;
+		data_mask <<= (64 - xbits);
+		data_mask--;
+	}
+
+	/* Grab all of the data */
+	i = tentry->num_words;
+	while (i--) {
+
+		if (perf_processor_interface == ONYX_INTF) {
+			data = perf_rdr_shift_in_U(rdr_num, width);
+		} else {
+			data = perf_rdr_shift_in_W(rdr_num, width);
+		}
+		if (xbits) {
+			buffer[i] |= (data << (64 - xbits));
+			if (i) {
+				buffer[i-1] |= ((data >> xbits) & data_mask);
+			}
+		} else {
+			buffer[i] = data;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * perf_rdr_clear
+ *
+ * Zero out the given RDR register
+ */
+static int perf_rdr_clear(uint32_t	rdr_num)
+{
+	const struct rdr_tbl_ent *tentry;
+	int32_t		i;
+
+	tentry = perf_rdr_get_entry(rdr_num);
+
+	if (tentry->width == 0) {
+		return -1;
+	}
+
+	i = tentry->num_words;
+	while (i--) {
+		if (perf_processor_interface == ONYX_INTF) {
+			perf_rdr_shift_out_U(rdr_num, 0UL);
+		} else {
+			perf_rdr_shift_out_W(rdr_num, 0UL);
+		}
+	}
+
+	return 0;
+}
+
+
+/*
+ * perf_write_image
+ *
+ * Write the given image out to the processor
+ */
+static int perf_write_image(uint64_t *memaddr)
+{
+	uint64_t buffer[MAX_RDR_WORDS];
+	uint64_t *bptr;
+	uint32_t dwords;
+	const uint32_t *intrigue_rdr;
+	const uint64_t *intrigue_bitmask;
+	uint64_t tmp64;
+	void __iomem *runway;
+	const struct rdr_tbl_ent *tentry;
+	int i;
+
+	/* Clear out counters */
+	if (perf_processor_interface == ONYX_INTF) {
+
+		perf_rdr_clear(16);
+
+		/* Toggle performance monitor */
+		perf_intrigue_enable_perf_counters();
+		perf_intrigue_disable_perf_counters();
+
+		intrigue_rdr = perf_rdrs_U;
+	} else {
+		perf_rdr_clear(15);
+		intrigue_rdr = perf_rdrs_W;
+	}
+
+	/* Write all RDRs */
+	while (*intrigue_rdr != -1) {
+		tentry = perf_rdr_get_entry(*intrigue_rdr);
+		perf_rdr_read_ubuf(*intrigue_rdr, buffer);
+		bptr   = &buffer[0];
+		dwords = tentry->num_words;
+		if (tentry->write_control) {
+			intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
+			while (dwords--) {
+				tmp64 = *intrigue_bitmask & *memaddr++;
+				tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
+				*bptr++ = tmp64;
+			}
+		} else {
+			while (dwords--) {
+				*bptr++ = *memaddr++;
+			}
+		}
+
+		perf_rdr_write(*intrigue_rdr, buffer);
+		intrigue_rdr++;
+	}
+
+	/*
+	 * Now copy out the Runway stuff which is not in RDRs
+	 */
+
+	if (cpu_device == NULL)
+	{
+		printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
+		return -1;
+	}
+
+	runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+	if (!runway) {
+		pr_err("perf_write_image: ioremap failed!\n");
+		return -ENOMEM;
+	}
+
+	/* Merge intrigue bits into Runway STATUS 0 */
+	tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
+	__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+		     runway + RUNWAY_STATUS);
+
+	/* Write RUNWAY DEBUG registers */
+	for (i = 0; i < 8; i++) {
+		__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
+	}
+
+	return 0;
+}
+
+/*
+ * perf_rdr_write
+ *
+ * Write the given RDR register with the contents
+ * of the given buffer.
+ */
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
+{
+	const struct rdr_tbl_ent *tentry;
+	int32_t		i;
+
+printk("perf_rdr_write\n");
+	tentry = perf_rdr_get_entry(rdr_num);
+	if (tentry->width == 0) { return; }
+
+	i = tentry->num_words;
+	while (i--) {
+		if (perf_processor_interface == ONYX_INTF) {
+			perf_rdr_shift_out_U(rdr_num, buffer[i]);
+		} else {
+			perf_rdr_shift_out_W(rdr_num, buffer[i]);
+		}
+	}
+printk("perf_rdr_write done\n");
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/perf_asm.S b/src/kernel/linux/v4.14/arch/parisc/kernel/perf_asm.S
new file mode 100644
index 0000000..fa6ea99
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/perf_asm.S
@@ -0,0 +1,1692 @@
+
+/*    low-level asm for "intrigue" (PA8500-8700 CPU perf counters)
+ * 
+ *    Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ *    Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ * 
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ * 
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ * 
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/assembly.h>
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#ifdef CONFIG_64BIT
+	.level		2.0w
+#endif /* CONFIG_64BIT */
+
+#define MTDIAG_1(gr)    .word 0x14201840 + gr*0x10000
+#define MTDIAG_2(gr)    .word 0x14401840 + gr*0x10000
+#define MFDIAG_1(gr)    .word 0x142008A0 + gr
+#define MFDIAG_2(gr)    .word 0x144008A0 + gr
+#define STDIAG(dr)      .word 0x14000AA0 + dr*0x200000
+#define SFDIAG(dr)      .word 0x14000BA0 + dr*0x200000
+#define DR2_SLOW_RET    53
+
+
+;
+; Enable the performance counters
+;
+; The coprocessor only needs to be enabled when
+; starting/stopping the coprocessor with the pmenb/pmdis.
+;
+	.text
+
+ENTRY(perf_intrigue_enable_perf_counters)
+	.proc
+	.callinfo  frame=0,NO_CALLS
+	.entry
+
+	ldi     0x20,%r25                ; load up perfmon bit
+	mfctl   ccr,%r26                 ; get coprocessor register
+	or      %r25,%r26,%r26             ; set bit
+	mtctl   %r26,ccr                 ; turn on performance coprocessor
+	pmenb                           ; enable performance monitor
+	ssm     0,0                     ; dummy op to ensure completion
+	sync                            ; follow ERS
+	andcm   %r26,%r25,%r26             ; clear bit now 
+	mtctl   %r26,ccr                 ; turn off performance coprocessor
+	nop                             ; NOPs as specified in ERS
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bve    (%r2)
+	nop
+	.exit
+	.procend
+ENDPROC(perf_intrigue_enable_perf_counters)
+
+ENTRY(perf_intrigue_disable_perf_counters)
+	.proc
+	.callinfo  frame=0,NO_CALLS
+	.entry
+	ldi     0x20,%r25                ; load up perfmon bit
+	mfctl   ccr,%r26                 ; get coprocessor register
+	or      %r25,%r26,%r26             ; set bit
+	mtctl   %r26,ccr                 ; turn on performance coprocessor
+	pmdis                           ; disable performance monitor
+	ssm     0,0                     ; dummy op to ensure completion
+	andcm   %r26,%r25,%r26             ; clear bit now 
+	bve    (%r2)
+	mtctl   %r26,ccr                 ; turn off performance coprocessor
+	.exit
+	.procend
+ENDPROC(perf_intrigue_disable_perf_counters)
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_in_W
+;*
+;* Description:
+;*	This routine shifts data in from the RDR in arg0 and returns
+;*	the result in ret0.  If the RDR is <= 64 bits in length, it
+;*	is shifted shifted backup immediately.  This is to compensate
+;*	for RDR10 which has bits that preclude PDC stack operations
+;*	when they are in the wrong state.
+;*
+;* Arguments:
+;*	arg0 : rdr to be read
+;*	arg1 : bit length of rdr
+;*
+;* Returns:
+;*	ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;*	arg0 : rdr to be read
+;*	arg1 : bit length of rdr
+;*	%r24  - original DR2 value
+;*	%r1   - scratch
+;*  %r29  - scratch
+;*
+;* Returns:
+;*	ret0 = RDR data (right justified)
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_in_W)
+	.proc
+	.callinfo frame=0,NO_CALLS
+	.entry
+;
+; read(shift in) the RDR.
+;
+
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+;
+
+	depdi,z		1,DR2_SLOW_RET,1,%r29
+	MFDIAG_2	(24)
+	or		    %r24,%r29,%r29
+	MTDIAG_2	(29)			; set DR2_SLOW_RET
+
+	nop
+	nop
+	nop
+	nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+	nop
+	nop
+	nop
+	extrd,u		arg1,63,6,%r1	; setup shift amount by bits to move 
+
+	mtsar		%r1
+	shladd		arg0,2,%r0,%r1	; %r1 = 4 * RDR number
+	blr  		%r1,%r0		; branch to 8-instruction sequence
+	nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+
+	;
+	; RDR 0 sequence
+	;
+	SFDIAG		(0)
+	ssm		    0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)			; mtdiag %dr1, %r1 
+	STDIAG		(0)
+	ssm		    0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 1 sequence
+	;
+	sync
+	ssm		    0,0
+	SFDIAG		(1)
+	ssm		    0,0
+	MFDIAG_1	(28)
+	ssm		    0,0
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+
+	;
+	; RDR 2 read sequence
+	;
+	SFDIAG		(2)
+	ssm		    0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(2)
+	ssm		    0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 3 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 4 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(4)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	; 
+	; RDR 5 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(5)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 6 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(6)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 7 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 8 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 9 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 10 read sequence
+	;
+	SFDIAG		(10)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(10)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 11 read sequence
+	;
+	SFDIAG		(11)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(11)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 12 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 13 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(13)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 14 read sequence
+	;
+	SFDIAG		(14)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(14)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 15 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(15)
+	ssm		0,0
+	MFDIAG_1	(28)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+
+	;
+	; RDR 16 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(16)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 17 read sequence
+	;
+	SFDIAG		(17)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(17)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 18 read sequence
+	;
+	SFDIAG		(18)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(18)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 19 read sequence
+	;
+	b,n         perf_rdr_shift_in_W_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	;
+	; RDR 20 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(20)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 21 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(21)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 22 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(22)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 23 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(23)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 24 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(24)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 25 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(25)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 26 read sequence
+	;
+	SFDIAG		(26)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(26)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 27 read sequence
+	;
+	SFDIAG		(27)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(27)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 28 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(28)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 29 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(29)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 30 read sequence
+	;
+	SFDIAG		(30)
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(30)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_W_leave
+
+	;
+	; RDR 31 read sequence
+	;
+	sync
+	ssm		0,0
+	SFDIAG		(31)
+	ssm		0,0
+	MFDIAG_1	(28)
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; Fallthrough
+	;
+
+perf_rdr_shift_in_W_leave:
+	bve		    (%r2)
+	.exit
+	MTDIAG_2	(24)			; restore DR2
+	.procend
+ENDPROC(perf_rdr_shift_in_W)
+
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_out_W
+;*
+;* Description:
+;*	This routine moves data to the RDR's.  The double-word that
+;*	arg1 points to is loaded and moved into the staging register.
+;*	Then the STDIAG instruction for the RDR # in arg0 is called
+;*	to move the data to the RDR.
+;*
+;* Arguments:
+;*	arg0 = rdr number
+;*	arg1 = 64-bit value to write
+;*	%r24 - DR2 | DR2_SLOW_RET
+;*	%r23 - original DR2 value
+;*
+;* Returns:
+;*	None
+;*
+;* Register usage:
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_out_W)
+	.proc
+	.callinfo frame=0,NO_CALLS
+	.entry
+;
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+	depdi,z		1,DR2_SLOW_RET,1,%r24
+	MFDIAG_2	(23)
+	or		%r24,%r23,%r24
+	MTDIAG_2	(24)		; set DR2_SLOW_RET
+	MTDIAG_1	(25)		; data to the staging register
+	shladd		arg0,2,%r0,%r1	; %r1 = 4 * RDR number
+	blr		    %r1,%r0	; branch to 8-instruction sequence
+	nop
+
+	;
+	; RDR 0 write sequence
+	;
+	sync				; RDR 0 write sequence
+	ssm		0,0
+	STDIAG		(0)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 1 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(1)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 2 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(2)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 3 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(3)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 4 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(4)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 5 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(5)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 6 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(6)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 7 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(7)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 8 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(8)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 9 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(9)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 10 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(10)
+	STDIAG		(26)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 11 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(11)
+	STDIAG		(27)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 12 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(12)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 13 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(13)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 14 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(14)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 15 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(15)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 16 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(16)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 17 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(17)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 18 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(18)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 19 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(19)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 20 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(20)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 21 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(21)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 22 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(22)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 23 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(23)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 24 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(24)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 25 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(25)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 26 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(10)
+	STDIAG		(26)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 27 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(11)
+	STDIAG		(27)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	ssm		0,0
+	nop
+
+	;
+	; RDR 28 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(28)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 29 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(29)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 30 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(30)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+	;
+	; RDR 31 write sequence
+	;
+	sync
+	ssm		0,0
+	STDIAG		(31)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_W_leave
+	nop
+	ssm		0,0
+	nop
+
+perf_rdr_shift_out_W_leave:
+	bve		(%r2)
+	.exit
+	MTDIAG_2	(23)			; restore DR2
+	.procend
+ENDPROC(perf_rdr_shift_out_W)
+
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_in_U
+;*
+;* Description:
+;*	This routine shifts data in from the RDR in arg0 and returns
+;*	the result in ret0.  If the RDR is <= 64 bits in length, it
+;*	is shifted shifted backup immediately.  This is to compensate
+;*	for RDR10 which has bits that preclude PDC stack operations
+;*	when they are in the wrong state.
+;*
+;* Arguments:
+;*	arg0 : rdr to be read
+;*	arg1 : bit length of rdr
+;*
+;* Returns:
+;*	ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;*	arg0 : rdr to be read						                        
+;*	arg1 : bit length of rdr					                        
+;*	%r24 - original DR2 value
+;*	%r23 - DR2 | DR2_SLOW_RET
+;*	%r1  - scratch
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_in_U)
+	.proc
+	.callinfo frame=0,NO_CALLS
+	.entry
+
+; read(shift in) the RDR.
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+
+	depdi,z		1,DR2_SLOW_RET,1,%r29
+	MFDIAG_2	(24)
+	or			%r24,%r29,%r29
+	MTDIAG_2	(29)			; set DR2_SLOW_RET
+
+	nop
+	nop
+	nop
+	nop
+
+;
+; Start of next 32-byte cacheline
+;
+	nop
+	nop
+	nop
+	extrd,u		arg1,63,6,%r1
+
+	mtsar		%r1
+	shladd		arg0,2,%r0,%r1	; %r1 = 4 * RDR number
+	blr 		%r1,%r0		; branch to 8-instruction sequence
+	nop
+
+;
+; Start of next 32-byte cacheline
+;
+	SFDIAG		(0)		; RDR 0 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(0)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(1)		; RDR 1 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(1)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	sync				; RDR 2 read sequence
+	ssm		0,0
+	SFDIAG		(4)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 3 read sequence
+	ssm		0,0
+	SFDIAG		(3)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 4 read sequence
+	ssm		0,0
+	SFDIAG		(4)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 5 read sequence
+	ssm		0,0
+	SFDIAG		(5)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 6 read sequence
+	ssm		0,0
+	SFDIAG		(6)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 7 read sequence
+	ssm		0,0
+	SFDIAG		(7)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	b,n         perf_rdr_shift_in_U_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	SFDIAG		(9)		; RDR 9 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(9)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(10)		; RDR 10 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(10)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(11)		; RDR 11 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(11)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(12)		; RDR 12 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(12)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(13)		; RDR 13 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(13)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(14)		; RDR 14 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(14)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(15)		; RDR 15 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(15)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	sync				; RDR 16 read sequence
+	ssm		0,0
+	SFDIAG		(16)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	SFDIAG		(17)		; RDR 17 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(17)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(18)		; RDR 18 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(18)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	b,n         perf_rdr_shift_in_U_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	sync				; RDR 20 read sequence
+	ssm		0,0
+	SFDIAG		(20)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 21 read sequence
+	ssm		0,0
+	SFDIAG		(21)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 22 read sequence
+	ssm		0,0
+	SFDIAG		(22)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 23 read sequence
+	ssm		0,0
+	SFDIAG		(23)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 24 read sequence
+	ssm		0,0
+	SFDIAG		(24)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	sync				; RDR 25 read sequence
+	ssm		0,0
+	SFDIAG		(25)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	SFDIAG		(26)		; RDR 26 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(26)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(27)		; RDR 27 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(27)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	sync				; RDR 28 read sequence
+	ssm		0,0
+	SFDIAG		(28)
+	ssm		0,0
+	MFDIAG_1	(28)
+	b,n         perf_rdr_shift_in_U_leave
+	ssm		0,0
+	nop
+
+	b,n         perf_rdr_shift_in_U_leave
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	SFDIAG		(30)		; RDR 30 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(30)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+
+	SFDIAG		(31)		; RDR 31 read sequence
+	ssm		0,0
+	MFDIAG_1	(28)
+	shrpd		ret0,%r0,%sar,%r1
+	MTDIAG_1	(1)
+	STDIAG		(31)
+	ssm		0,0
+	b,n         perf_rdr_shift_in_U_leave
+	nop
+
+perf_rdr_shift_in_U_leave:
+	bve		    (%r2)
+	.exit
+	MTDIAG_2	(24)			; restore DR2
+	.procend
+ENDPROC(perf_rdr_shift_in_U)
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_out_U
+;*
+;* Description:
+;*	This routine moves data to the RDR's.  The double-word that
+;*	arg1 points to is loaded and moved into the staging register.
+;*	Then the STDIAG instruction for the RDR # in arg0 is called
+;*	to move the data to the RDR.
+;*
+;* Arguments:
+;*	arg0 = rdr target
+;*	arg1 = buffer pointer
+;*
+;* Returns:
+;*	None
+;*
+;* Register usage:
+;*	arg0 = rdr target
+;*	arg1 = buffer pointer
+;*	%r24 - DR2 | DR2_SLOW_RET
+;*	%r23 - original DR2 value
+;*
+;***********************************************************************
+
+ENTRY(perf_rdr_shift_out_U)
+	.proc
+	.callinfo frame=0,NO_CALLS
+	.entry
+
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+	depdi,z		1,DR2_SLOW_RET,1,%r24
+	MFDIAG_2	(23)
+	or		%r24,%r23,%r24
+	MTDIAG_2	(24)		; set DR2_SLOW_RET
+
+	MTDIAG_1	(25)		; data to the staging register
+	shladd		arg0,2,%r0,%r1	; %r1 = 4 * RDR number
+	blr		%r1,%r0		; branch to 8-instruction sequence
+	nop
+
+;
+; 32-byte cachline aligned
+;
+
+	sync				; RDR 0 write sequence
+	ssm		0,0
+	STDIAG		(0)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 1 write sequence
+	ssm		0,0
+	STDIAG		(1)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 2 write sequence
+	ssm		0,0
+	STDIAG		(2)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 3 write sequence
+	ssm		0,0
+	STDIAG		(3)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 4 write sequence
+	ssm		0,0
+	STDIAG		(4)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 5 write sequence
+	ssm		0,0
+	STDIAG		(5)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 6 write sequence
+	ssm		0,0
+	STDIAG		(6)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 7 write sequence
+	ssm		0,0
+	STDIAG		(7)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 8 write sequence
+	ssm		0,0
+	STDIAG		(8)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 9 write sequence
+	ssm		0,0
+	STDIAG		(9)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 10 write sequence
+	ssm		0,0
+	STDIAG		(10)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 11 write sequence
+	ssm		0,0
+	STDIAG		(11)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 12 write sequence
+	ssm		0,0
+	STDIAG		(12)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 13 write sequence
+	ssm		0,0
+	STDIAG		(13)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 14 write sequence
+	ssm		0,0
+	STDIAG		(14)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 15 write sequence
+	ssm		0,0
+	STDIAG		(15)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 16 write sequence
+	ssm		0,0
+	STDIAG		(16)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 17 write sequence
+	ssm		0,0
+	STDIAG		(17)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 18 write sequence
+	ssm		0,0
+	STDIAG		(18)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 19 write sequence
+	ssm		0,0
+	STDIAG		(19)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 20 write sequence
+	ssm		0,0
+	STDIAG		(20)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 21 write sequence
+	ssm		0,0
+	STDIAG		(21)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 22 write sequence
+	ssm		0,0
+	STDIAG		(22)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 23 write sequence
+	ssm		0,0
+	STDIAG		(23)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 24 write sequence
+	ssm		0,0
+	STDIAG		(24)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 25 write sequence
+	ssm		0,0
+	STDIAG		(25)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 26 write sequence
+	ssm		0,0
+	STDIAG		(26)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 27 write sequence
+	ssm		0,0
+	STDIAG		(27)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 28 write sequence
+	ssm		0,0
+	STDIAG		(28)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 29 write sequence
+	ssm		0,0
+	STDIAG		(29)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 30 write sequence
+	ssm		0,0
+	STDIAG		(30)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+	sync				; RDR 31 write sequence
+	ssm		0,0
+	STDIAG		(31)
+	ssm		0,0
+	b,n         perf_rdr_shift_out_U_leave
+	nop
+	ssm		0,0
+	nop
+
+perf_rdr_shift_out_U_leave:
+	bve		(%r2)
+	.exit
+	MTDIAG_2	(23)			; restore DR2
+	.procend
+ENDPROC(perf_rdr_shift_out_U)
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/perf_images.h b/src/kernel/linux/v4.14/arch/parisc/kernel/perf_images.h
new file mode 100644
index 0000000..7fef964
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/perf_images.h
@@ -0,0 +1,3138 @@
+/* 
+ *    Imagine for use with the Onyx (PCX-U) CPU interface 
+ *
+ *    Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ *    Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef PERF_IMAGES_H
+#define PERF_IMAGES_H
+
+/* Magic numbers taken without modification from HPUX stuff */
+
+#define PCXU_IMAGE_SIZE 584
+
+static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+/*
+ * CPI:
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+         {
+         0x4c00c000, 0x00000000, 0x00060000, 0x00000000,
+         0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+         0x0101ffff, 0xfffff104, 0xe000c07f, 0xfffffffc,
+         0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+         0x00000fff, 0xff00000f, 0xffff0000, 0x0fffff00,
+         0x000fffff, 0x00000000, 0x00000000, 0x00ffffff,
+         0xfffff000, 0x0000000f, 0xffffffff, 0xff000000,
+         0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+         0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+         0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+         0xf0000030, 0x00003c00, 0x067f080c, 0x02019fc0,
+         0x02804067, 0xf0009030, 0x19fc002c, 0x40067f08,
+         0x0c12019f, 0xc0028440, 0x67f00091, 0x3019fc00,
+         0x2fc007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+         0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+         0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+         0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+         0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+         0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+         0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+         0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+         0x00000000, 0x00000000, 0x00000000, 0x00000000,
+         0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff},
+
+/* Bus utilization image (bus_util)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+         {
+         0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+         0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+         0x0000000c, 0x00003c00, 0x07930000, 0x0041e4c0,
+         0x01002079, 0x3000800c, 0x1e4c0030, 0x00279300,
+         0x010049e4, 0xc0014022, 0x79300090, 0x0c9e4c00,
+         0x34004793, 0x00020051, 0xe4c00180, 0x24793000,
+         0xa00d1e4c, 0x00380067, 0x93000300, 0x59e4c001,
+         0xc0267930, 0x00b00d9e, 0x4c003fff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+         0x00000000, 0x00000000, 0x00000000, 0x00000000,
+         0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+         0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0x00100000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff },
+
+/*
+ * TLB counts (same as tlbStats image):
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+         {
+         0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+         0xe7e7e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+         0x0101ffff, 0xfffff104, 0xe000c06a, 0xafffc85c,
+         0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+         0x01b9e000, 0x0001b8c0, 0x00000000, 0x0fffff00,
+         0x000fffff, 0x00000000, 0x00000000, 0x00400000,
+         0x00001000, 0x00000004, 0x00000000, 0x01000000,
+         0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+         0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+         0xfff55ff5, 0xffffffff, 0xffffffff, 0xf0000000,
+         0xf0000000, 0x00003c00, 0x01ff0001, 0x08007fc2,
+         0x02c1001f, 0xf0807100, 0x1bfc200c, 0x4806ff00,
+         0x03f001ff, 0xfe003c00, 0x7fff800f, 0x001fffe0,
+         0x03c007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+         0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+         0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+         0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+         0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+         0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+         0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+         0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+         0x00000000, 0x00000000, 0x00000000, 0x00000000,
+         0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff },
+
+/* tlbHandMiss
+ *
+ * ctr0: counts TLB misses 
+ * ctr1: counts dmisses inside tlb miss handlers 
+ * ctr2: counts cycles in the tlb miss handlers 
+ * ctr3: counts overflows of ctr2 
+ */
+{
+0x1c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000000,0x00003c00,0x01fd0000,0x08007f42,
+0x0281001f,0xd080a100,0x19f42008,0x44067d08,
+0x0612019f,0x400084c0,0x67d00060,0x0047f400,
+0x042011fd,0x080b0404,0x7f4202c4,0x0167d080,
+0x311059f4,0x201c4816,0x7d000313,0x059f4001,
+0xfc007fff,0x800f001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* branch_taken image (ptkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: all predicted taken branches (nullfied or not)
+ * ctr3: overflow for ctr2
+ */
+
+        {
+        0xcc01e000, 0x00000000, 0x00060000, 0x00000000,
+        0xa08080a0, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+        0xf0000000, 0x00003c00, 0x04f90000, 0x02013e40,
+        0x0081004f, 0x90004060, 0x13e40018, 0x0024f900,
+        0x0802093e, 0x40028102, 0x4f9000c0, 0x6093e400,
+        0x380014f9, 0x00010205, 0x3e4000c1, 0x014f9000,
+        0x506053e4, 0x001c0034, 0xf9000902, 0x0d3e4002,
+        0xc1034f90, 0x00d060d3, 0xe4003fff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+        0x00000000, 0x00000000, 0x00000000, 0x00000000,
+        0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+        0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+        0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+        0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+        0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+        0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+        0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+        0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+        0xffffffff, 0xffffffff },
+
+/* branch_nottaken (pntkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: counts branches predicted not-taken, but actually taken
+ * ctr2: counts all predictable branches predicted not-taken
+ * ctr3: overflow for ctr2
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0xc0c0c0e0,0xffb1fffb,0xfff7ffff,0xffffffff,
+0xffffffff,0xfffffffb,0x1fffbfff,0x7fffffff,
+0xfcc7ffff,0xffdffffa,0x5f000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+
+/* imiss image
+ *
+ * ctr0 : counts imiss aligned on 0
+ * ctr1 : counts imiss aligned on 4
+ * ctr2 : counts imiss aligned on 8
+ * ctr3 : counts imiss aligned on C
+ */
+         {
+         0x0c00c000, 0x00000000, 0x00010000, 0x00000000,
+         0xe7ebedee, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+         0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+         0xf0000000, 0x00003c00, 0x007f0000, 0x01001fc0,
+         0x00408007, 0xf0002030, 0x01fc000c, 0x10007f00,
+         0x0405001f, 0xc0014180, 0x07f00060, 0x7001fc00,
+         0x1c20007f, 0x00080900, 0x1fc00242, 0x8007f000,
+         0xa0b001fc, 0x002c3000, 0x7f000c0d, 0x001fc003,
+         0x438007f0, 0x00e0f001, 0xfc003fff, 0xfffff800,
+         0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+         0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+         0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+         0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+         0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+         0x00000000, 0x00000000, 0x00000000, 0x00000000,
+         0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff},
+
+/* dmiss image
+ * 
+ * ctr0 : counts cycles
+ * ctr1 : counts cycles where something retired
+ * ctr2 : counts dmisses
+ * ctr3 : (same as ctr2)
+ */
+         {
+         0x3c00c000, 0x00000000, 0x00060000, 0x00000000,
+         0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+         0xf0000000, 0x00003c04, 0x007f0009, 0x02001fc0,
+         0x0280c007, 0xf000b040, 0x01fc0030, 0x14007f00,
+         0x0d06001f, 0xc00381c0, 0x07f000f0, 0x8001fc00,
+         0x2024007f, 0x00090a00, 0x1fc00282, 0xc007f000,
+         0xb0c001fc, 0x00303400, 0x7f000d0e, 0x001fc003,
+         0x83c007f0, 0x00f00001, 0xfc0023ff, 0xfffff800,
+         0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+         0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+         0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+         0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+         0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+         0x00000000, 0x00000000, 0x00000000, 0x00000000,
+         0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+         0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+         0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+         0xffffffff, 0xffffffff },
+
+/* dcmiss 
+ *
+ * ctr0: counts store instructions retired 
+ * ctr1: counts load instructions retired
+ * ctr2: counts dmisses 
+ * ctr3: counts READ_SHARED_OR_PRIV and READ_PRIVATE transactions on Runway 
+ */
+{
+0x2c90c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000e8,0x00003c02,0x00bf0001,0x02002fc0,
+0x0080a00b,0xf0003040,0x02fc0010,0x1200bf00,
+0x0506002f,0xc00181a0,0x0bf00070,0x8002fc00,
+0x202200bf,0x00090a00,0x2fc00282,0xa00bf000,
+0xb0c002fc,0x00303200,0xbf000d0e,0x002fc003,
+0x83a00bf0,0x00ffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0x55555555,0xd5555555,
+0x55555555,0x75555555,0x5e1ffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,0xf4000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_cpi
+ *
+ * ctr0: counts total cycles 
+ * ctr1: counts overflows of ctr0 (for greater than 32-bit values) 
+ * ctr2: counts overflows of ctr3 (for greater than 32-bit values) 
+ * ctr3: counts unnullified instructions retired 
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000010,0x00003c00,0x01760008,0x00025d80,
+0x02800417,0x6000c001,0x25d80038,0x04017600,
+0x0901025d,0x8002c044,0x176000d0,0x1125d800,
+0x3c2001f6,0x08080400,0x7d820203,0x001f6080,
+0x804027d8,0x20282009,0xf6080a0c,0x027d8202,
+0x81041f60,0x80c08107,0xd8203030,0x41f6080c,
+0x04127d82,0x0382049f,0x6080e0c1,0x27d82038,
+0x4006f608,0x081011bd,0x82030400,0xef6080a1,
+0x013bd820,0x384806f6,0x00081211,0xbd800304,
+0x80ef6000,0xa1213bd8,0x003bc007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_ls
+ *
+ * ctr0:counts the total number of cycles for which local_stall_A1 is asserted. 
+ * ctr1: is the overflow for counter 0. 
+ * ctr2: counts IFLUSH_AV 
+ * ctr3: is the overflow for counter 2. 
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00029408,0x02f50002,0x0800bd40,
+0x0202802f,0x5000a000,0x4bd40004,0x0812f500,
+0x030804bd,0x40024281,0x2f5000b0,0x010bd400,
+0x100842f5,0x00060810,0xbd400302,0x842f5000,
+0xe0014bd4,0x00140852,0xf5000708,0x14bd4003,
+0x42852f50,0x00ff001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x0df70000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* br_abort
+ *
+ * ctr0: counts BRAD_STALLH 
+ * ctr1: counts ONE_QUAD 
+ * ctr2: counts BR0_ABRT 
+ * ctr3: counts BR1_ABRT
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1a250000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* isnt
+ *
+ * ctr0: counts the total number of cycles for which iside_notrans is asserted 
+ * ctr1: counts the number of times iside_notrans is asserted for 1-4 cycles 
+ * ctr2: counts the number of times iside_notrans is asserted for 5-7 cycles 
+ * ctr3: counts the number of times iside_notrans is asserted for > 7 cycles 
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcdff0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* quadrant
+ *
+ * ctr0: Total number of instructions in quadrant 0 
+ * ctr1: Total number of instructions in quadrant 1 
+ * ctr2: Total number of instructions in quadrant 2 
+ * ctr3: Total number of instructions in quadrant 3 
+ * Works only with 32-bit
+ */
+
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0x004e0004,   0x07ffffff,   0xffc01380,
+   0x0101ffff,   0xfffff004,   0xe000407f,   0xfffffffc,
+   0x01380010,   0x1fffffff,   0xff000000,   0x00000000,
+   0x00000fff,   0xff00000f,   0xffff0000,   0x0fffff00,
+   0x000fffff,   0x00000000,   0x00000000,   0x00ffffff,
+   0xffcff000,   0x0000040f,   0xfffffffc,   0xff000000,
+   0x0080ffff,   0xffffcff0,   0x0000000c,   0x0fffffff,
+   0xfcff0000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xfff55ff5,   0x5fffffff,   0xffffffff,   0xf0000000,
+   0xf00000f0,   0x00003c00,   0x007f0000,   0x01001fc0,
+   0x00408007,   0xf0002030,   0x01fc000c,   0x10007f00,
+   0x0405001f,   0xc0014180,   0x07f00060,   0x7001fc00,
+   0x1c20007f,   0x00080900,   0x1fc00242,   0x8007f000,
+   0xa0b001fc,   0x002c3000,   0x7f000c0d,   0x001fc003,
+   0x438007f0,   0x00e0f001,   0xfc003fff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffffc00,
+   0x00000000,   0x00000000,   0x00000000,   0x00000000,
+   0xffff0000,   0x00000000,   0xf0000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xfffffc00,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xfffffc00,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0x00030000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff},
+
+/* rw_pdfet (READ_PRIV transactions)
+ *
+ * ctr0: counts address valid cycles 
+ * ctr1: counts *all* data valid cycles 
+ * ctr2: is the overflow from counter 0 
+ * ctr3: is the overflow from counter 1 
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* rw_wdfet (WRITEBACKS)
+ *
+ * ctr0: counts address valid cycles 
+ * ctr1: counts *all* data valid cycles 
+ * ctr2: is the overflow from counter 0 
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0x98000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* shlib_cpi
+ *
+ * ctr0: Total number of instructions in quad 0 
+ * ctr1: Total number of CPU clock cycles in quad 0 
+ * ctr2: total instructions without nullified   
+ * ctr3: total number of CPU clock cycles 
+ */
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0x004e0004,   0x07ffffff,   0xffc01380,
+   0x0101ffff,   0xfffff004,   0xe000407f,   0xfffffffc,
+   0x01380010,   0x1fffffff,   0xff000000,   0x00000000,
+   0x00000fff,   0xff00000f,   0xffffffff,   0xffffffff,
+   0xffffffff,   0x00000000,   0x00000000,   0x00ffffff,
+   0xffcff000,   0x0000000f,   0xfffffffc,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xfff77ff5,   0x7fffffff,   0xffffffff,   0xf0000000,
+   0xf00000a0,   0x00003c00,   0x01ff0005,   0x08007fc0,
+   0x03c1001f,   0xf08030c0,   0x07fc203c,   0x4001ff08,
+   0x0118007f,   0xc003c500,   0x1ff08031,   0xc007fc00,
+   0x3fffffff,   0xf800ffff,   0xfffe003f,   0xffffff80,
+   0x0fffffff,   0xe003ffff,   0xfff800ff,   0xfffffe00,
+   0x3fffffff,   0x800fffff,   0xffe003ff,   0xfffff800,
+   0xfffffffe,   0x003fffff,   0xff800fff,   0xffffe003,
+   0xfffffff8,   0x00ffffff,   0xfe003fff,   0xffff800f,
+   0xffffffe0,   0x03ffffff,   0xf800ffff,   0xfffe003f,
+   0xffffff80,   0x0fffffff,   0xe003ffff,   0xfff800ff,
+   0xfffffe00,   0x3fffffff,   0x800fffff,   0xffe00000,
+   0x00000000,   0x00000000,   0x00000000,   0x00000000,
+   0xffff0000,   0x00000000,   0xf0000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xfffffc00,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xfffffc00,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0x00030000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff},
+
+
+/* addr_inv_abort_alu
+ *
+ * ctr0: counts ABORT_ALU0L 
+ * ctr1: counts ABORT_ALU1L 
+ * ctr2: counts ADDR0_INVALID 
+ * ctr3: counts ADDR1_INVALID 
+ */
+
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000d,0x01001fc0,
+0x03008007,0xf000f030,0x01fc0038,0x10007f00,
+0x0905001f,0xc0020180,0x07f000b0,0x7001fc00,
+0x2820007f,0x00050900,0x1fc00102,0x8007f000,
+0x70b001fc,0x00183000,0x7f00010d,0x001fc000,
+0x038007f0,0x0030f001,0xfc000bff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x65380000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+
+/* brad_stall
+ *
+ * ctr0: counts the total number of cycles for which brad_stall is asserted 
+ * ctr1: counts the number of times brad_stall is asserted for 1-4 cycles 
+ * ctr2: counts the number of times brad_stall is asserted for 5-7 cycles 
+ * ctr3: counts the number of times brad_stall is asserted for > 7 cycles 
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1bff0000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* cntl_in_pipel
+ *
+ * ctr0: counts the total number of cycles for which cntl_in_pipel is asserted 
+ * ctr1: counts the number of times cntl_in_pipel is asserted for 1-4 cycles 
+ * ctr2: counts the number of times cntl_in_pipel is asserted for 5-7 cycles 
+ * ctr3: counts the number of times cntl_in_pipel is asserted for > 7 cycles 
+ */
+{
+0x0c006000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x3fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c00,0x03ff0808,0x1000ffc4,
+0x0206003f,0xf0004200,0x0ffc6020,0xa003ff00,
+0x043000ff,0xc8020e00,0x3ff00044,0x000ffca0,
+0x212003ff,0x00045000,0xffcc0216,0x003ff000,
+0x26000ffc,0xe021a003,0xff000270,0x00ffd002,
+0x1e003ff0,0x0028000f,0xfd002220,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x3fff0000,00000000,0x30000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* dsnt_xfh
+ *
+ * ctr0: counts dside_notrans 
+ * ctr1: counts xfhang 
+ * ctr2: is the overflow for ctr0 
+ * ctr3: is the overflow for ctr1 
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00030000,0x01f30000,0x00087cc0,
+0x0040041f,0x30002001,0x87cc000c,0x1001f300,
+0x0404087c,0xc0014104,0x1f300060,0x4187cc00,
+0x1c2001f3,0x00080808,0x7cc00242,0x041f3000,
+0xa08187cc,0x002c3001,0xf3000c0c,0x087cc003,
+0x43041f30,0x00e0c187,0xcc003fc0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcb3f0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff }, 
+
+/* fet_sig1
+ *
+ * ctr0: counts ICORE_AV 
+ * ctr1: counts ITRANS_STALL 
+ * ctr2: counts SEL_PCQH 
+ * ctr3: counts OUT_OF_CONTEXT 
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x07c10000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* fet_sig2
+ *
+ * ctr0: counts ICORE_AV  
+ * ctr1: counts IRTN_AV 
+ * ctr2: counts ADDRESS_INC 
+ * ctr3: counts ADDRESS_DEC 
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x06930000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_1
+ *
+ * ctr0: counts HIT_RETRY0 
+ * ctr1: counts HIT_RETRY1 
+ * ctr2: counts GO_TAG_E 
+ * ctr3: counts GO_TAG_O 
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x71c10000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_2
+ *
+ * ctr0: counts HIT_DM0 
+ * ctr1: counts HIT_DM1 
+ * ctr2: counts GO_STORE_E 
+ * ctr3: counts GO_STORE_O 
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x72930000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_3
+ *
+ * ctr0: counts HIT_DV0 
+ * ctr1: counts HIT_DV1 
+ * ctr2: counts STBYPT_E (load bypasses from store queue) 
+ * ctr3: counts STBYPT_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f0002,0x01001fc0,
+0x00c08007,0xf0000030,0x01fc0004,0x10007f00,
+0x0605001f,0xc001c180,0x07f00040,0x7001fc00,
+0x1420007f,0x000a0900,0x1fc002c2,0x8007f000,
+0x80b001fc,0x00243000,0x7f000e0d,0x001fc003,
+0xc38007f0,0x00c0f001,0xfc0037ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x77250000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_4
+ *
+ * ctr0: counts HIT_DIRTY0 
+ * ctr1: counts HIT_DIRTY1 
+ * ctr2: counts CA_BYP_E (quick launch) 
+ * ctr3: counts CA_BYP_O 
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x7bb70000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* mpb_labort
+ *
+ * ctr0: counts L_ABORT_ALU0L
+ * ctr1: counts L_ABORT_ALU1L 
+ * ctr2: counts MPB0H 
+ * ctr3: counts MPB1H 
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x605c0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* panic
+ *
+ * ctr0: is the overflow for counter 1 
+ * ctr1: counts traps and RFI's 
+ * ctr2: counts panic traps 
+ * ctr3: is the overflow for counter 2
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe7efe0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffffc,
+0x41380030,0x1aabfff2,0x17000000,00000000,
+0x01b80000,0x3effffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,00000000,0x00400000,
+0x00001fff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+0xb0000000,0x00012c04,0x05790804,0x14013e44,
+0x0008004f,0x90000040,0x15e46000,0xc0047920,
+0x004a003e,0x40011080,0x0f900024,0x4003e460,
+0x00c80479,0x00023301,0x1e400100,0x4157d080,
+0x514053f4,0x40048014,0xfd000104,0x055f4600,
+0x4c0147d2,0x0014a043,0xf4001508,0x10fd0003,
+0x44043f46,0x004c8147,0xd0003330,0x51f40014,
+0x04257908,0x0c14093e,0x44020802,0x4f900080,
+0x4095e460,0x20c02479,0x20084a08,0x3e400310,
+0x820f9000,0xa44083e4,0x6020c824,0x79000a33,
+0x091e4003,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x10400000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rare_inst
+ *
+ * ctr0: counts sync and syncdma instructions 
+ * ctr1: counts pxtlbx,x instructions 
+ * ctr2: counts ixtlbt instructions 
+ * ctr3: counts cycles 
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0x004e000c,0x000843fc,0x85c09380,
+0x0121ebfd,0xff217124,0xe0004000,0x943fc85f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xe00000e0,0x00003c00,0x007f0001,0x01001fc0,
+0x00408007,0xf0003030,0x01fc000c,0x10007f00,
+0x0505001f,0xc0014180,0x07f00070,0x7001fc00,
+0x1c20007f,0x00090900,0x1fc00242,0x8007f000,
+0xb0b001fc,0x002c3000,0x7f000d0d,0x001fc003,
+0x438007f0,0x00f0f001,0xfc003fff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_dfet (for D-cache misses and writebacks)
+ *
+ * ctr0: counts address valid cycles 
+ * ctr1: counts *all* data valid cycles 
+ * ctr2: is the overflow from counter 0 
+ * ctr3: is the overflow from counter 1 
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf0000000,00000000,
+00000000,00000000,0x98000000,00000000,
+0xffffffff,0xffffffff,0x0fffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_ifet (I-cache misses -- actually dumb READ transactions)
+ *
+ * ctr0: counts address valid cycles 
+ * ctr1: counts *all* data valid cycles 
+ * ctr2: is the overflow from counter 0 
+ * ctr3: is the overflow from counter 1 
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xd0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* rw_sdfet (READ_SHARED_OR_PRIVATE transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles 
+ * ctr2: is the overflow from counter 0 
+ * ctr3: is the overflow from counter 1 
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf4000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* spec_ifet
+ *
+ * ICORE_AV fires for every request which the Instruction Fetch Unit sends
+ * to the Runway Interface Block.  Hence, this counts all I-misses, speculative
+ * or not, but does *not* include I-cache prefetches, which are generated by
+ * RIB.
+ * IRTN_AV fires twice for every I-cache miss returning from RIB to the IFU.
+ * It will not fire if a second I-cache miss is issued from the IFU to RIB
+ * before the first returns.  Therefore, if the IRTN_AV count is much less
+ * than 2x the ICORE_AV count, many speculative I-cache misses are occurring
+ * which are "discovered" to be incorrect fairly quickly.
+ * The ratio of I-cache miss transactions on Runway to the ICORE_AV count is
+ * a measure of the effectiveness of instruction prefetching.  This ratio
+ * should be between 1 and 2.  If it is close to 1, most prefetches are
+ * eventually called for by the IFU; if it is close to 2, almost no prefetches
+ * are useful and they are wasted bus traffic.
+ *
+ * ctr0: counts ICORE_AV 
+ * ctr1: counts IRTN_AV 
+ * ctr2: counts all non-coherent READ transactions on Runway. (TTYPE D0) 
+ *	This should be just I-cache miss and I-prefetch transactions.
+ * ctr3: counts total processor cycles 
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x00000008,0x00030c00,0x01bf0001,0x00806fc0,
+0x00c1001b,0xf0005048,0x06fc001c,0x2001bf00,
+0x0908806f,0xc002c300,0x1bf000d0,0xc806fc00,
+0x3fffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x06bf0000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00110000,00000000,0xd0ffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+/* st_cond0
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 0C and 0E (fp ops, not fmac or fmpyadd) 
+ * ctr2: counts B,L (including long and push) and GATE (including nullified),
+ *	 predicted not-taken
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc13380,
+0x0101ffff,0xffa1f057,0xe000407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond1
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts major ops 1x (most of the load/stores) 
+ * ctr2: counts CMPB (dw) predicted not-taken 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc01b80,
+0x0101ffff,0xffb7f03d,0xe000407f,0xffffc8ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond2
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts major op 03 
+ * ctr2: counts CMPIB (dw) predicted not taken. 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc09780,
+0x0101ffff,0xff21f077,0xe000407f,0xffffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond3
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts major ops 06 & 26 
+ * ctr2: counts BB, BVB, MOVB, MOVIB (incl. nullified) predicted not-taken 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc03780,
+0x0101ffff,0xff29f016,0xe000407f,0xffffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond4
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts major op 2E 
+ * ctr2: counts CMPB, CMPIB, ADDB, ADDIB (incl. nullified) predicted not-taken 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc17780,
+0x0101ffff,0xff21f014,0xe000407f,0xffffe9ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred0
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts BE and BE,L 
+ * ctr2: counts BE and BE,L including nullified 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffdf5bbf,
+0xffffffff,0xff25f7d6,0xefffffff,0xffffc97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred1
+ *
+ * ctr0: is the overflow for ctr1 
+ * ctr1: counts BLR, BV, BVE, BVE,L 
+ * ctr2: counts BLR, BV, BVE, BVE,L including nullified 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffc15f80,
+0x0501ff7f,0xff21f057,0xe001407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* unpred
+ *
+ * ctr0: counts non-nullified unpredictable branches 
+ * ctr1: is the overflow for ctr0 
+ * ctr2: counts all unpredictable branches (nullified or not) 
+ * ctr3: is the overflow for ctr2 
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0x20202020,0xff31ffff,0xfff7fffe,0x97ffcc7f,
+0xfffffdff,0xffa5fff3,0x1fffffff,0x7fffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000a0,0x00003c00,0x02f50000,0x0004bd40,
+0x0040802f,0x50002020,0x4bd4000c,0x0042f500,
+0x040014bd,0x40014084,0x2f500060,0x214bd400,
+0x1c2002f5,0x00080804,0xbd400242,0x802f5000,
+0xa0a04bd4,0x002c2042,0xf5000c08,0x14bd4003,
+0x42842f50,0x00e0a14b,0xd4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+   
+
+/* go_store
+ *
+ * ctr0: Overflow for counter 2 
+ * ctr1: Overflow for counter 3 
+ * ctr2: count of GO_STORE_E signal 
+ * ctr3: count of GO_STORE_O signal 
+ */
+
+   {
+   0x0c00e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffa5ffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xff000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x7fffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xf0000000,
+   0x00000000,   0x0000c000,   0x067c0000,   0x01019f00,
+   0x00408067,   0xc0002030,   0x19f0000c,   0x000e7c00,
+   0x0401039f,   0x00014080,   0xe7c00060,   0x3039f000,
+   0x1c00167c,   0x00080105,   0x9f000240,   0x8167c000,
+   0xa03059f0,   0x002c001e,   0x7c000c01,   0x079f0003,
+   0x4081e7c0,   0x00e03079,   0xf0003fc0,   0x07fff800,
+   0xf001fffe,   0x003c007f,   0xff800f00,   0x1fffe003,
+   0xc007fff8,   0x00f001ff,   0xfe003c00,   0x7fff800f,
+   0x001fffe0,   0x03c007ff,   0xf800f001,   0xfffe003c,
+   0x007fff80,   0x0f001fff,   0xe003c007,   0xfff800f0,
+   0x01fffe00,   0x3c007fff,   0x800f001f,   0xffe00000,
+   0x00000000,   0x00000000,   0x00000000,   0x00000000,
+   0x70130000,   0x00000000,   0x70000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xfffffc00,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xfffffc00,   0x00000000,
+   0xffffaaaa,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffaaaa,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0x00030000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff
+   },
+
+
+/* shlib_call
+ *
+ * ctr0: SharedLib call Depth1 
+ * ctr1: SharedLib call Depth2 
+ * ctr2: SharedLib call Depth3 
+ * ctr3: SharedLib call Depth>3 
+ */
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0xc76fa005,   0x07dd7e9c,   0x87115b80,
+   0x01100200,   0x07200004,   0xe000407f,   0xfffffffc,
+   0x01380010,   0x1fffffff,   0xff000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xf0000000,
+   0xf0000000,   0x00003c20,   0x01ff0808,   0x04007fc0,
+   0x0003001f,   0xf0000180,   0x07fc4010,   0x5001ff00,
+   0x001c007f,   0xc2000a00,   0x1ff18022,   0x4007fc20,
+   0x00b001ff,   0x10003800,   0x7fc8004d,   0x001ff100,
+   0x03c007fc,   0x60012001,   0xff280144,   0x007fc600,
+   0x13001ff2,   0x00058007,   0xfcc00550,   0x01ff2000,
+   0x5c007fca,   0x001a001f,   0xf3801640,   0x07fca001,
+   0xb001ff30,   0x0078007f,   0xd0005d00,   0x1ff30007,
+   0xc007fce0,   0x022001ff,   0x48018400,   0x7fce0023,
+   0x001ff400,   0x098007fd,   0x20065001,   0xff40009c,
+   0x007fd200,   0x3fffffff,   0x800fffff,   0xffe00000,
+   0x00000000,   0x00000000,   0x00000000,   0x00000000,
+   0xffff0000,   0x00000000,   0xf0000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xfffffc00,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xfffffc00,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xf3ffffff,   0xffffffff,
+   0xfdffffff,   0xffffffff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0xffffffff,   0xfffff9ff,   0xfe000000,   0x00000000,
+   0x00030000,   0x00000000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff
+   }
+};
+#define PCXW_IMAGE_SIZE 576
+
+static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+/*
+ * CPI:     FROM CPI.IDF (Image 0)
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+   {
+   0x4c00c000,   0x00000000,   0x00060000,   0x00000000, 
+   0xe0e0e0e0,   0x00001fff,   0xfc00007f,   0xfff00001, 
+   0xffffc000,   0x07ffff00,   0x07ffffff,   0x6007ffff, 
+   0xff0007ff,   0xffff0007,   0xffffff00,   0x00000000, 
+   0x60f00000,   0x0fffff00,   0x000fffff,   0x00000fff, 
+   0xff00000f,   0xffff0000,   0x00000000,   0x00ffffff, 
+   0xfffff000,   0x0000000f,   0xffffffff,   0xff000000, 
+   0x0000ffff,   0xfffffff0,   0x00000000,   0x0fffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0x00270000,   0x00000055, 
+   0x0200000e,   0x4d300000,   0x00000000,   0x0ff00002, 
+   0x70000000,   0x00000020,   0x0000e400,   0x00000ff0, 
+   0x00000000,   0x00000000,   0x00000055,   0xffffff00, 
+   0x00000000,   0x0000ff00,   0x00000000,   0x0f000000, 
+   0x0000055f,   0xfffff000,   0x00000000,   0x000ff000, 
+   0x00000000,   0x00000000,   0x000055ff,   0xffff0000, 
+   0x00000000,   0x00ff0000,   0x00000000,   0xf0000000, 
+   0x000055ff,   0xffff0000,   0x00000000,   0x00ff0000, 
+   0x00000000,   0x00000000,   0x00055fff,   0xfff00000, 
+   0x00000000,   0x0ff00000,   0x00000030,   0x00000000, 
+   0x00157fff,   0xffc00000,   0x034c0000,   0x00000000, 
+   0x03fc0000,   0x00000000,   0x6fff0000,   0x00000000, 
+   0x60000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff7fbfc,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* Bus utilization image   FROM BUS_UTIL.IDF (Image 1)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+         {
+	 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+	 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+	 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+	 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+	 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+	 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+	 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+	 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+	 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+	 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+	 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+	 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+	 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+	 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+	 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+	 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+	 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+	 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+	 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+	 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+	 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+	 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+	 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+	 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+	 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+	 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+	 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+   },
+
+/*
+ * TLB counts:    FROM TLBSTATS.IDF (Image 2)
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+   {
+   0x0c00c000,   0x00000000,   0x00060000,   0x00000000, 
+   0xe7e7e0e0,   0x00001fff,   0xfc00007f,   0xfff00001, 
+   0xfff00000,   0x07ffff00,   0x07ffffff,   0x6007ffff, 
+   0xa00007ff,   0xffff0007,   0xffffff00,   0x00000000, 
+   0x603001c1,   0xe0000001,   0xc0c00000,   0x00000fff, 
+   0xff00000f,   0xffff0000,   0x00000000,   0x00400000, 
+   0x00001000,   0x00000004,   0x00000000,   0x01000000, 
+   0x0000ffff,   0xfffffff0,   0x00000000,   0x0fffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0x00800000,   0x00153f7f, 
+   0x55000000,   0xaf800000,   0xc0000000,   0x0403f240, 
+   0x00000000,   0x00001010,   0x00004700,   0x00000ff0, 
+   0x00000000,   0x00000000,   0x00000055,   0xffffff00, 
+   0x00000000,   0x0000ff00,   0x00000000,   0x0f000000, 
+   0x0000055f,   0xfffff000,   0x00000000,   0x000ff000, 
+   0x00000000,   0x00000000,   0x000055ff,   0xffff0000, 
+   0x00000000,   0x00ff0000,   0x00000000,   0xf0000000, 
+   0x000055ff,   0xffff0000,   0x00000000,   0x00ff0000, 
+   0x00000000,   0x00000000,   0x00055fff,   0xfff00000, 
+   0x00000000,   0x0ff00000,   0x00000000,   0x00000000, 
+   0x00157fff,   0xffc00000,   0x00000000,   0x3fc00000, 
+   0x00040000,   0x00000000,   0x6fff0000,   0x00000000, 
+   0x60000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff7fbfc,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* tlbhandler  FROM tlbHandMiss.idf (Image 3)
+ *
+ * ctr0: TLB misses
+ * ctr1: dmisses inside the TLB miss handler
+ * ctr2: cycles in the TLB miss handler
+ * ctr3: overflow of ctr2
+ */
+   {
+   0x1c00c000,   0x00000000,   0x00060000,   0x00000000, 
+   0xe7e7e0e0,   0x00001fff,   0xfc00007f,   0xfff00001, 
+   0xfff00000,   0x07ffff00,   0x07ffffff,   0x6007ffff, 
+   0xa00007ff,   0xffff0007,   0xffffff00,   0x00000000, 
+   0x603001c1,   0xe0000001,   0xc0c00000,   0x00000fff, 
+   0xff00000f,   0xffff0000,   0x00000000,   0x00400000, 
+   0x00001000,   0x00000004,   0x00000000,   0x01000000, 
+   0x0000ffff,   0xfffffff0,   0x00000000,   0x0fffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0x006c0000,   0x01000054, 
+   0x02000002,   0xc3200000,   0xc00aa000,   0x0c03f240, 
+   0x00000000,   0x00001010,   0x000044f4,   0x00000c00, 
+   0xaa0000f0,   0x0f0000b0,   0x00005005,   0x0f5f0000, 
+   0x0001f000,   0x0000ff00,   0x00000000,   0x0f000000, 
+   0x0000055f,   0xfffff000,   0x00000000,   0x000ff000, 
+   0x00000000,   0x00000000,   0x000055ff,   0xffff0000, 
+   0x00000000,   0x00ff0000,   0x00000000,   0xf0000000, 
+   0x000055ff,   0xffff0000,   0x00000000,   0x00ff0000, 
+   0x00000000,   0x00000000,   0x00055fff,   0xfff00000, 
+   0x00000000,   0x0ff00a00,   0x000f0000,   0x24004000, 
+   0x15400001,   0x40c00003,   0x3da00000,   0x0002a800, 
+   0x00ff0000,   0x00000000,   0x6fff0000,   0x00000000, 
+   0x60000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff7fbfc,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffafff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* branch_taken image  FROM PTKN.IDF (Image 4)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: predicted taken branches (includes nullfied)
+ * ctr3: all branches
+ */
+
+   {
+   0xcc01e000,   0x00000000,   0x00000000,   0x00000000, 
+   0xa08080a0,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xfffffeff,   0xfffeffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf4ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xd22d0000,   0x00000000, 
+   0x0000000b,   0x46000000,   0x00000000,   0x0ffff900, 
+   0x90000000,   0x00000000,   0x0000907e,   0x00000000, 
+   0x000000ff,   0xff00bfdf,   0x03030303,   0x03030000, 
+   0x000dbfff,   0xffffff00,   0x00000000,   0x0f0fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffff5555,   0x55500000,   0x003f3ff0,   0x2766c000, 
+   0x00000000,   0x00000002,   0x67840000,   0x00000000, 
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* branch_nottaken  FROM PNTKN.IDF (Image 5)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: branches predicted not-taken, but actually taken
+ * ctr2: branches predicted not-taken (includes nullified)
+ * ctr3: all branches
+ */
+   {
+   0xcc01e000,   0x00000000,   0x00000000,   0x00000000, 
+   0xe0c0c0e0,   0xffffffff,   0xffffffff,   0xffefffff, 
+   0xffffbfff,   0xfffffeff,   0xfffeffff,   0xfffffeff, 
+   0xfffffffe,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf4ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xd22d0000,   0x00000000, 
+   0x0000000b,   0x46000000,   0x00000000,   0x0ffff900, 
+   0x90000000,   0x00000000,   0x0000907e,   0x00000000, 
+   0x000000ff,   0xff00bfdf,   0x03030303,   0x03030000, 
+   0x000dbfff,   0xffffff00,   0x00000000,   0x0f0fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffff5555,   0x55500000,   0x003f3ff0,   0x2766c000, 
+   0x00000000,   0x00000002,   0x67840000,   0x00000000, 
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+   
+/* IMISS image (Image 6)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+   {
+   0x2801e000,   0x00000000,   0x00010000,   0x00000000, 
+   0x00001000,   0xffffffff,   0xffffffff,   0xfff00fff, 
+   0xfffa3fff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xf2fdf0f0,   0xf0f0f0f0, 
+   0xffffffff,   0xf6c00000,   0x00000000,   0x0ff55800, 
+   0x90000000,   0x00000000,   0x0000b0ff,   0xfffffff0, 
+   0x00000003,   0x0100bfff,   0x3f3f3f3f,   0x3f3f5555, 
+   0x555fffff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xfff00000,   0x000301b0,   0x2fefcfcf, 
+   0xcfcfcfcf,   0xd5555557,   0xf7b40000,   0x00000000, 
+   0x03c14000,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* DMISS image (Image 7)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+   {
+   0x2801e000,   0x00000000,   0x00010000,   0x00000000, 
+   0x00001000,   0xffffffff,   0xffffffff,   0xfff00fff, 
+   0xfffa3fff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xf2fdf0f0,   0xf0f0f0f0, 
+   0xffffffff,   0xf6c00000,   0x00000000,   0x0ff55800, 
+   0x90000000,   0x00000000,   0x0000b0ff,   0xfffffff0, 
+   0x00000003,   0x0100bfff,   0x3f3f3f3f,   0x3f3f5555, 
+   0x555fffff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xfff00000,   0x000301b0,   0x2fefcfcf, 
+   0xcfcfcfcf,   0xd5555557,   0xf7b40000,   0x00000000, 
+   0x03c14000,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* dmiss_access image    FROM DMISS_RATIO.IDF  (Image 8)
+ * 
+ * ctr0 : all loads and stores that retire (even lines)
+ * ctr1 : all loads and stores that retire (odd lines)
+ * ctr2 : dcache misses of retired loads/stores
+ * ctr3 : all READ_PRIV and READ_SHAR_OR_PRIV on Runway
+ *        (Speculative and Non-Speculative)
+ */
+   {
+   0x2d81e000,   0x00000000,   0x00000000,   0x00000000, 
+   0x10101010,   0x00ffffff,   0xa003ffff,   0xfe800fff, 
+   0xfffa003f,   0xffffe8ff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xd2280a00,   0x00000000, 
+   0x0000000b,   0x46000000,   0x00000005,   0x555ff900, 
+   0x80200000,   0x00000000,   0x0000907e,   0x00000000, 
+   0x00005555,   0xff80bf8b,   0xab030303,   0x03030000, 
+   0x000dbfff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffff5555,   0x55500000,   0x15153fe0,   0x27628880, 
+   0x00000000,   0x00000002,   0x67840000,   0x00000001, 
+   0x5557fc00,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00110000,   0x00000000, 
+   0xf4ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xf8ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0x00ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0x00ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+
+/* big_cpi image  (Image 9)
+ * 
+ * ctr0 : Total number of CPU clock cycles. 
+ * ctr1 : Unused 
+ * ctr2 : Unused
+ * ctr3 : Total number of Non-Nullified instructions retired. 
+ */
+   {
+   0x0c00c000,   0x00000000,   0x00060000,   0x00000000,
+   0xe7e7e0e0,   0x00001fff,   0xfc00007f,   0xfff00001,
+   0xfff00000,   0x07ffff00,   0x07ffffff,   0x6007ffff,
+   0xa00007ff,   0xffff0007,   0xffffff00,   0x00000000,
+   0x603001c1,   0xe0000001,   0xc0c00000,   0x00000fff,
+   0xff00000f,   0xffff0000,   0x00000000,   0x00400000,
+   0x00001000,   0x00000004,   0x00000000,   0x01000000,
+   0x0000ffff,   0xfffffff0,   0x00000000,   0x0fffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x00550005,   0x00220000,
+   0x0000000c,   0x71f00000,   0x00f00aa0,   0x0aaff000,
+   0x00005002,   0x20000000,   0x0000c413,   0x00000c0f,
+   0x00aa0000,   0xff00b600,   0x000500a0,   0x00000300,
+   0x000cc3f0,   0x0000c0f0,   0x0aa0000f,   0xff000000,
+   0x011000a0,   0x05503000,   0x00d03700,   0x00000f00,
+   0xaa005500,   0x00000000,   0x000055ff,   0xffff0000,
+   0x00000000,   0x00ff0000,   0x00000000,   0xf000aa00,
+   0x11000a00,   0x55000000,   0x0d037000,   0x00c0f00a,
+   0xa0055000,   0x0db00005,   0x5002a000,   0x00300000,
+   0xf40f0000,   0x0c0f00aa,   0x0000ff10,   0x27400000,
+   0x00008000,   0x00c00003,   0x037c0000,   0x003c02a8,
+   0x02abfc00,   0x00000000,   0x6fff0000,   0x00000000,
+   0x60000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffafff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffafff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* big_ls image  (Image 10)
+ * 
+ * ctr0 : Total number of CPU clock cycles during which local_stall_A1 is asserted 
+ * ctr1 : Overflow of Counter 0 
+ * ctr2 : Total number of IFLUSH_AV 
+ * ctr3 : Overflow of Counter 2 
+ */
+   {
+   0x0c000000,   0x00000000,   0x00060000,   0x00000000,
+   0xefefefef,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0x00ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x28880001,   0x54000000,
+   0x00000004,   0xb6200000,   0x000aaaa0,   0x05555288,
+   0x80000010,   0x00000000,   0x0000486e,   0x00000000,
+   0xaaaa0055,   0x55002888,   0x00545401,   0x03030000,
+   0x0007b000,   0x0000ff00,   0x00000000,   0x05000000,
+   0x0000055f,   0xfffff000,   0x00000000,   0x000ff000,
+   0x00000000,   0x00000000,   0x000055ff,   0xffff0000,
+   0x00000000,   0x00ff0000,   0x00000000,   0x00000000,
+   0x000055ff,   0xffff0000,   0x00000000,   0x00ff0000,
+   0x00000000,   0xa0000000,   0x00055fff,   0xfff00000,
+   0x00aa0000,   0x05502a2a,   0x00151500,   0x0a220015,
+   0x40400000,   0x00000001,   0xe2980000,   0x0002aaa8,
+   0x01555400,   0x00000000,   0x0df70000,   0x00000000,
+   0x00000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* br_abort image  (Image 12)
+ * 
+ * ctr0 : Total number of BRAD_STALLH
+ * ctr1 : Total number of ONE_QUAD
+ * ctr2 : Total number of BR0_ABRT
+ * ctr3 : Total number of BR1_ABRT
+ */
+
+   {
+   0x0c002000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0xffffffff,   0xffffffff,   0xff0fffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0x1077ffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x551b0000,   0x00000000,
+   0x0000000c,   0xd4f00000,   0x00000000,   0x0ffff001,
+   0xb0000000,   0x00000000,   0x0000fd4c,   0x00000000,
+   0x000000ff,   0xff00ff1b,   0x00000000,   0x00000000,
+   0x0000d000,   0x0000ff00,   0x00000000,   0x0e0fffff,
+   0xffffffff,   0xfffff000,   0x00000000,   0x000ff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffff0000,
+   0x00000000,   0x00ff0000,   0x00000000,   0x00ffffff,
+   0xffffffff,   0xffff0000,   0x00000000,   0x00ff0000,
+   0x00000000,   0xffffffff,   0xffffffff,   0xfff00000,
+   0x00400000,   0x00000000,   0x00ffff00,   0x2a86c000,
+   0x00000000,   0x00000000,   0xf50c0000,   0x00000000,
+   0x03fffc00,   0x00000000,   0x1a250000,   0x00000000,
+   0x10000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffafff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffafff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+
+/* isnt image  (Image 13)
+ * 
+ * ctr0 : Total number of cycles for which iside_notrans is asserted. 
+ * ctr1 : Total number of times iside_notrans is asserted for 1-4 cycles. 
+ * ctr2 : Total number of times iside_notrans is asserted for 5-7 cycles. 
+ * ctr3 : Total number of times iside_notrans is asserted for > 7 cycles. 
+ */
+
+   {
+   0x0c018000,   0x00000000,   0x00060000,   0x00000000,
+   0xefefefef,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0xc0ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x22000000,   0x000001bc,
+   0x10000006,   0x00900000,   0x50000000,   0x00055a20,
+   0x00000000,   0x00016060,   0x0000c021,   0x00000540,
+   0x00000000,   0x55002200,   0x00000000,   0x56bc4000,
+   0x00048000,   0x0000ff00,   0x00000000,   0x17000000,
+   0x0000055f,   0xfffff000,   0x00000000,   0x000ff000,
+   0x00000000,   0x00000000,   0x000055ff,   0xffff0000,
+   0x00000000,   0x00ff0000,   0x00000000,   0x00000000,
+   0x000055ff,   0xffff0000,   0x00000000,   0x00ff0000,
+   0x00000000,   0x80000000,   0x00015bf3,   0xf5500000,
+   0x02210000,   0x00100000,   0x00005500,   0x08800000,
+   0x00001545,   0x85000001,   0x80240000,   0x11000000,
+   0x00015400,   0x00000000,   0xcdff0000,   0x00000000,
+   0xc0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* quadrant image  (image 14)
+ * 
+ * ctr0 : Total number of instructions in quadrant 0. 
+ * ctr1 : Total number of instructions in quadrant 1. 
+ * ctr2 : Total number of instructions in quadrant 2. 
+ * ctr3 : Total number of instructions in quadrant 3. 
+ *
+ * Only works for 32-bit applications.
+ */
+
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0x00001fff,   0xfc00007f,   0xfff00001,
+   0xffffc000,   0x07ffff00,   0x07ffffff,   0x0007ffff,
+   0xff0007ff,   0xffff0007,   0xffffff00,   0x00000000,
+   0xf0000000,   0x0fffff00,   0x000fffff,   0x00000fff,
+   0xff00000f,   0xffff0000,   0x00000000,   0x00ffffff,
+   0xffcff000,   0x0000040f,   0xfffffffc,   0xff000000,
+   0x0080ffff,   0xffffcff0,   0x0000000c,   0x0fffffff,
+   0xfcff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x551b0000,   0x00000000,
+   0x00000003,   0x17000000,   0x00000000,   0x0ffff001,
+   0xb0000000,   0x00000000,   0x00000173,   0x00000000,
+   0x000000ff,   0xff00ff1b,   0x00000000,   0x00000000,
+   0x000f1ff0,   0xcfffff00,   0x00000000,   0x0f0fffff,
+   0xffffffff,   0xffffffff,   0x30ffff0c,   0xfffff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xfffffff3,
+   0x0ffff0cf,   0xffff0000,   0x00000000,   0xf0ffffff,
+   0xffffffff,   0xfffffff3,   0x0ffff0cf,   0xffff0000,
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffff30,
+   0xff7f0000,   0x00000000,   0x00fffff0,   0x2a86c000,
+   0x00000000,   0x00000003,   0x05f00000,   0x00000000,
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000,
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* rw_pdfet image (Image 15)
+ * 
+ * ctr0 : Total of all READ_PRIV address valid cycles. 
+ * ctr1 : Total of all READ_PRIV data valid cycles. 
+ * ctr2 : Overflow of Counter 0. 
+ * ctr3 : Overflow of Counter 1. 
+ */
+
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xefefefef,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x00001b00,   0xaa000000,
+   0x00000001,   0x30700000,   0x00055aaf,   0xf0000000,
+   0x01b00000,   0x00000000,   0x00001037,   0x00000000,
+   0x55aaff00,   0x00c00000,   0x1b55aa00,   0x00000000,
+   0x0001fff0,   0xcfffff00,   0x00000000,   0x0f0fffff,
+   0xffffffff,   0xffffffff,   0x30ffff0c,   0xfffff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xfffffff3,
+   0x0ffff0cf,   0xffff0000,   0x00000000,   0x00ffffff,
+   0xffffffff,   0xfffffff3,   0x0ffff0cf,   0xffff0000,
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffff30,
+   0xfff70000,   0x000055aa,   0xff000000,   0x000006d5,
+   0x40000000,   0x00000000,   0x731c0000,   0x000156ab,
+   0xfc000000,   0x00000000,   0xffff0000,   0x00000000,
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00100000,   0x00000000,
+   0xf8000000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffffff,
+   0x00ffffff,   0xffffffff,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffffff,
+   },
+
+
+/* rw_wdfet image  (Image 16)
+ * 
+ * ctr0 : Counts total number of writeback transactions. 
+ * ctr1 : Total number of data valid Runway cycles. 
+ * ctr2 : Overflow of Counter 0. 
+ * ctr3 : Overflow of Counter 1. 
+ */
+
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xefefefef,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x00001b00,   0xaa000000,
+   0x00000001,   0x30700000,   0x00055aaf,   0xf0000000,
+   0x01b00000,   0x00000000,   0x00001037,   0x00000000,
+   0x55aaff00,   0x00c00000,   0x1b55aa00,   0x00000000,
+   0x0001fff0,   0xcfffff00,   0x00000000,   0x0f0fffff,
+   0xffffffff,   0xffffffff,   0x30ffff0c,   0xfffff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xfffffff3,
+   0x0ffff0cf,   0xffff0000,   0x00000000,   0x00ffffff,
+   0xffffffff,   0xfffffff3,   0x0ffff0cf,   0xffff0000,
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffff30,
+   0xfff70000,   0x000055aa,   0xff000000,   0x000006d5,
+   0x40000000,   0x00000000,   0x731c0000,   0x000156ab,
+   0xfc000000,   0x00000000,   0xffff0000,   0x00000000,
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00100000,   0x00000000,
+   0x98000000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffffff,
+   0x00ffffff,   0xffffffff,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffffff,
+   },
+
+/* shlib_cpi image  (Image 17)
+ * 
+ * ctr0 : Total number of instructions in quadrant 0. 
+ * ctr1 : Total number of CPU clock cycles in quadrant 0. 
+ * ctr2 : Total number of Non-Nullified instructions retired. 
+ * ctr3 : Total number of CPU clock cycles. 
+ *
+ * Only works for 32-bit shared libraries.
+ */
+
+   {
+   0x0c01e000,   0x00000000,   0x00060000,   0x00000000,
+   0xe0e0e0e0,   0x00001fff,   0xfc00007f,   0xfff00001,
+   0xffffc000,   0x07ffff00,   0x07ffffff,   0x0007ffff,
+   0xff0007ff,   0xffff0007,   0xffffff00,   0x00000000,
+   0xf0150000,   0x0fffff00,   0x000fffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0x00000000,   0x00ffffff,
+   0xffcff000,   0x0000000f,   0xfffffffc,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0x27000000,   0x00000055,
+   0x02000005,   0x7f500000,   0xc0000000,   0x000ff270,
+   0x00000000,   0x00000000,   0x00007700,   0x00000ff0,
+   0x00000000,   0x0000ffff,   0xffffffff,   0xffffff00,
+   0x00000000,   0x0000ff00,   0x00000000,   0x0f0fffff,
+   0xffffffff,   0xfffff000,   0x00000000,   0x000ff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffff0000,
+   0x00000000,   0x00ff0000,   0x00000000,   0xf0ffffff,
+   0xffffffff,   0xffff0000,   0x00000000,   0x00ff0000,
+   0x00000000,   0x0fffffff,   0xffffffff,   0xfff00000,
+   0x00000000,   0x0ff00000,   0x000000a0,   0x3fffffff,
+   0xffffffff,   0xffc00000,   0x03d40000,   0x20000000,
+   0x0003fc00,   0x00000000,   0xffff0000,   0x00000000,
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff7fbfc,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff7fbfc,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00030000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* flop image  (Image 18)
+ * 
+ * ctr0 : Total number of floating point instructions (opcode = 0xc). 
+ * ctr1 : Total number of floating point instructions (opcode = 0xe, 0x6, 0x2e, 0x26). 
+ * ctr2 : Unused
+ * ctr3 : Unused 
+ */
+
+   {
+   0x0001e000,   0x00000000,   0x00000000,   0x00000000,
+   0x00001010,   0x33ffffff,   0x006fffff,   0xfc5fffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xd22d0000,   0x00000000,
+   0x0000000b,   0x46000000,   0x00000000,   0x0ffff900,
+   0x90000000,   0x00000000,   0x0000907e,   0x00000000,
+   0x000000ff,   0xff00bfdf,   0x03030303,   0x03030000,
+   0x000dbfff,   0xffffff00,   0x00000000,   0x000fffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000,
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff,
+   0xffff5555,   0x55500000,   0x003f3ff0,   0x2766c000,
+   0x00000000,   0x00000002,   0x67840000,   0x00000000,
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000,
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* cachemiss image    FROM I_D_MISSES.IDF  (Image 19)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+   {
+   0x2801e000,   0x00000000,   0x00010000,   0x00000000, 
+   0x00001000,   0xffffffff,   0xffffffff,   0xfff00fff, 
+   0xfffa3fff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf0ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xf2fdf0f0,   0xf0f0f0f0, 
+   0xffffffff,   0xf6c00000,   0x00000000,   0x0ff55800, 
+   0x90000000,   0x00000000,   0x0000b0ff,   0xfffffff0, 
+   0x00000003,   0x0100bfff,   0x3f3f3f3f,   0x3f3f5555, 
+   0x555fffff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xfff00000,   0x000301b0,   0x2fefcfcf, 
+   0xcfcfcfcf,   0xd5555557,   0xf7b40000,   0x00000000, 
+   0x03c14000,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* branch   FROM br_report3.idf 
+ *
+ * ctr0 : Total number of mispredicted branches. 
+ * ctr1 : Some Non-Nullified unpredictable branches. 
+ * ctr2 : Total number of branches (Nullified + Non-Nullified)
+ *        (Unpredicted+ Predicted Taken +Predicted Not Taken). 
+ *	  Total of All Branches.
+ * ctr3 : Remaining Non-Nullified unpredictable branches.
+ */
+   {
+   0x4001e000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0xffffffff,   0xff9fffff,   0xfe0fffff, 
+   0xffffbaff,   0xfdffc0ff,   0xfffdffff,   0xfffffeff, 
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf4ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xd22d0000,   0x00000000, 
+   0x0000000b,   0x46000000,   0x00000000,   0x0ffff900, 
+   0x90000000,   0x00000000,   0x0000907e,   0x00000000, 
+   0x000000ff,   0xff00bfdf,   0x03030303,   0x03030000, 
+   0x000dbfff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffff5555,   0x55500000,   0x003f3ff0,   0x2766c000, 
+   0x00000000,   0x00000002,   0x67840000,   0x00000000, 
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* crstack  FROM crs_report.idf
+ *
+ * ctr0: correctly predicted branches by the pop_latch
+ * ctr1: some procedure returns
+ * ctr2: all branches, (includes nullified)
+ * ctr3: remaining procedure returns
+ */
+   {
+   0x4001e000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0xffffffff,   0xffa10300,   0x000fffff, 
+   0xffffbaf8,   0x3000007f,   0xffffffff,   0xfffffeff, 
+   0xff7fffff,   0xffffffff,   0xffffff00,   0x00000000, 
+   0xf2ffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000, 
+   0x00000000,   0x00000000,   0xd22d0000,   0x00000000, 
+   0x0000000b,   0x46000000,   0x00000000,   0x0ffff900, 
+   0x90000000,   0x00000000,   0x0000907e,   0x00000000, 
+   0x000000ff,   0xff00bfdf,   0x03030303,   0x03030000, 
+   0x000dbfff,   0xffffff00,   0x00000000,   0x000fffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000, 
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffff0000,   0x00000000,   0xf0ffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000, 
+   0x00000000,   0x0fffffff,   0xffffffff,   0xffffffff, 
+   0xffff5555,   0x55500000,   0x003f3ff0,   0x2766c000, 
+   0x00000000,   0x00000002,   0x67840000,   0x00000000, 
+   0x03fffc00,   0x00000000,   0xffff0000,   0x00000000, 
+   0xf0000000,   0x00000000,   0x00ffffff,   0xff3fffff, 
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000, 
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff, 
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f, 
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc, 
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff, 
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff, 
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   },
+
+/* icache_report image 
+ * 
+ * ctr0 : Icache misses actually used by the core. 
+ * ctr1 : ICORE_AV (Icache misses the core THINKS it needs, including fetching down speculative paths). 
+ * ctr2 : READs on Runway (Icache misses that made it out to Runway, including
+ *	  prefetches).
+ * ctr3 : Prefetch returns (1x and 2x). 
+ */
+   {
+   0x00000000,   0x00000000,   0x00010000,   0x00000000,
+   0x00000000,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffff00,   0x00000000,
+   0x00ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffff0000,   0x00000000,   0x00000000,   0x00000000,
+   0x00000000,   0x00000000,   0xd2002d00,   0x00000000,
+   0x0000000b,   0x46000000,   0x0000000f,   0xf00ff900,
+   0x00900000,   0x00000000,   0x0000907e,   0x00000000,
+   0x0000ff00,   0xff83bf03,   0xdf030303,   0x03030000,
+   0x000dbfff,   0xffffff00,   0x00000000,   0x000fffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xfffff000,
+   0x00000000,   0x00ffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffff0000,   0x00000000,   0x80ffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffff0000,
+   0x00000000,   0x4fffffff,   0xffffffff,   0xffffffff,
+   0xffff5555,   0x55500000,   0x3f003f80,   0x274026c0,
+   0x00000000,   0x00000002,   0x67840000,   0x00000003,
+   0xfc03fc00,   0x00000000,   0x0eff0000,   0x00000000,
+   0x00000000,   0x00000000,   0x00ffffff,   0xff3fffff,
+   0xffffffff,   0xffcfffff,   0xfff6fb7c,   0x00000000,
+   0x00ffffff,   0xff3fffff,   0xffffffff,   0xffcfffff,
+   0xfff6fb7c,   0x00000000,   0xffff0fff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffff0fff,   0xffffff3f,
+   0xffffffff,   0xffffff7f,   0xffffffff,   0xfffffefc,
+   0x00000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0xffffffff,   0xfffff9ff,
+   0xfe000000,   0x00000000,   0x00130000,   0x00000000,
+   0xd0ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0x00ffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   0xffffffff,   0xffffffff,   0xffffffff,   0xffffffff,
+   
+   }
+
+};
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/process.c b/src/kernel/linux/v4.14/arch/parisc/kernel/process.c
new file mode 100644
index 0000000..77650dc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/process.c
@@ -0,0 +1,327 @@
+/*
+ *    PARISC Architecture-dependent parts of process handling
+ *    based on the work for i386
+ *
+ *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ *    Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ *    Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
+ *    Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
+ *    Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ *    Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
+ *    Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ *    Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <stdarg.h>
+
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/rcupdate.h>
+#include <linux/random.h>
+#include <linux/nmi.h>
+
+#include <asm/io.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembly.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/pgalloc.h>
+#include <asm/unwind.h>
+#include <asm/sections.h>
+
+#define COMMAND_GLOBAL  F_EXTEND(0xfffe0030)
+#define CMD_RESET       5       /* reset any module */
+
+/*
+** The Wright Brothers and Gecko systems have a H/W problem
+** (Lasi...'nuf said) may cause a broadcast reset to lockup
+** the system. An HVERSION dependent PDC call was developed
+** to perform a "safe", platform specific broadcast reset instead
+** of kludging up all the code.
+**
+** Older machines which do not implement PDC_BROADCAST_RESET will
+** return (with an error) and the regular broadcast reset can be
+** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
+** the PDC call will not return (the system will be reset).
+*/
+void machine_restart(char *cmd)
+{
+#ifdef FASTBOOT_SELFTEST_SUPPORT
+	/*
+	 ** If user has modified the Firmware Selftest Bitmap,
+	 ** run the tests specified in the bitmap after the
+	 ** system is rebooted w/PDC_DO_RESET.
+	 **
+	 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
+	 **
+	 ** Using "directed resets" at each processor with the MEM_TOC
+	 ** vector cleared will also avoid running destructive
+	 ** memory self tests. (Not implemented yet)
+	 */
+	if (ftc_bitmap) {
+		pdc_do_firm_test_reset(ftc_bitmap);
+	}
+#endif
+	/* set up a new led state on systems shipped with a LED State panel */
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+	
+	/* "Normal" system reset */
+	pdc_do_reset();
+
+	/* Nope...box should reset with just CMD_RESET now */
+	gsc_writel(CMD_RESET, COMMAND_GLOBAL);
+
+	/* Wait for RESET to lay us to rest. */
+	while (1) ;
+
+}
+
+void machine_halt(void)
+{
+	/*
+	** The LED/ChassisCodes are updated by the led_halt()
+	** function, called by the reboot notifier chain.
+	*/
+}
+
+void (*chassis_power_off)(void);
+
+/*
+ * This routine is called from sys_reboot to actually turn off the
+ * machine 
+ */
+void machine_power_off(void)
+{
+	/* If there is a registered power off handler, call it. */
+	if (chassis_power_off)
+		chassis_power_off();
+
+	/* Put the soft power button back under hardware control.
+	 * If the user had already pressed the power button, the
+	 * following call will immediately power off. */
+	pdc_soft_power_button(0);
+	
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+		
+	/* It seems we have no way to power the system off via
+	 * software. The user has to press the button himself. */
+
+	printk(KERN_EMERG "System shut down completed.\n"
+	       "Please power this system off now.");
+
+	/* prevent soft lockup/stalled CPU messages for endless loop. */
+	rcu_sysrq_start();
+	lockup_detector_soft_poweroff();
+	for (;;);
+}
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+void flush_thread(void)
+{
+	/* Only needs to handle fpu stuff or perf monitors.
+	** REVISIT: several arches implement a "lazy fpu state".
+	*/
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Fill in the FPU structure for a core dump.
+ */
+
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
+{
+	if (regs == NULL)
+		return 0;
+
+	memcpy(r, regs->fr, sizeof *r);
+	return 1;
+}
+
+int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
+{
+	memcpy(r, tsk->thread.regs.fr, sizeof(*r));
+	return 1;
+}
+
+/*
+ * Idle thread support
+ *
+ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
+ * QEMU idle the host too.
+ */
+
+int running_on_qemu __read_mostly;
+EXPORT_SYMBOL(running_on_qemu);
+
+void __cpuidle arch_cpu_idle_dead(void)
+{
+	/* nop on real hardware, qemu will offline CPU. */
+	asm volatile("or %%r31,%%r31,%%r31\n":::);
+}
+
+void __cpuidle arch_cpu_idle(void)
+{
+	local_irq_enable();
+
+	/* nop on real hardware, qemu will idle sleep. */
+	asm volatile("or %%r10,%%r10,%%r10\n":::);
+}
+
+static int __init parisc_idle_init(void)
+{
+	if (!running_on_qemu)
+		cpu_idle_poll_ctrl(1);
+
+	return 0;
+}
+arch_initcall(parisc_idle_init);
+
+/*
+ * Copy architecture-specific thread state
+ */
+int
+copy_thread(unsigned long clone_flags, unsigned long usp,
+	    unsigned long kthread_arg, struct task_struct *p)
+{
+	struct pt_regs *cregs = &(p->thread.regs);
+	void *stack = task_stack_page(p);
+	
+	/* We have to use void * instead of a function pointer, because
+	 * function pointers aren't a pointer to the function on 64-bit.
+	 * Make them const so the compiler knows they live in .text */
+	extern void * const ret_from_kernel_thread;
+	extern void * const child_return;
+
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* kernel thread */
+		memset(cregs, 0, sizeof(struct pt_regs));
+		if (!usp) /* idle thread */
+			return 0;
+		/* Must exit via ret_from_kernel_thread in order
+		 * to call schedule_tail()
+		 */
+		cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
+		cregs->kpc = (unsigned long) &ret_from_kernel_thread;
+		/*
+		 * Copy function and argument to be called from
+		 * ret_from_kernel_thread.
+		 */
+#ifdef CONFIG_64BIT
+		cregs->gr[27] = ((unsigned long *)usp)[3];
+		cregs->gr[26] = ((unsigned long *)usp)[2];
+#else
+		cregs->gr[26] = usp;
+#endif
+		cregs->gr[25] = kthread_arg;
+	} else {
+		/* user thread */
+		/* usp must be word aligned.  This also prevents users from
+		 * passing in the value 1 (which is the signal for a special
+		 * return for a kernel thread) */
+		if (usp) {
+			usp = ALIGN(usp, 4);
+			if (likely(usp))
+				cregs->gr[30] = usp;
+		}
+		cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
+		cregs->kpc = (unsigned long) &child_return;
+
+		/* Setup thread TLS area from the 4th parameter in clone */
+		if (clone_flags & CLONE_SETTLS)
+			cregs->cr27 = cregs->gr[23];
+	}
+
+	return 0;
+}
+
+unsigned long
+get_wchan(struct task_struct *p)
+{
+	struct unwind_frame_info info;
+	unsigned long ip;
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	/*
+	 * These bracket the sleeping functions..
+	 */
+
+	unwind_frame_init_from_blocked_task(&info, p);
+	do {
+		if (unwind_once(&info) < 0)
+			return 0;
+		ip = info.ip;
+		if (!in_sched_functions(ip))
+			return ip;
+	} while (count++ < 16);
+	return 0;
+}
+
+#ifdef CONFIG_64BIT
+void *dereference_function_descriptor(void *ptr)
+{
+	Elf64_Fdesc *desc = ptr;
+	void *p;
+
+	if (!probe_kernel_address(&desc->addr, p))
+		ptr = p;
+	return ptr;
+}
+#endif
+
+static inline unsigned long brk_rnd(void)
+{
+	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+
+	if (ret < mm->brk)
+		return mm->brk;
+	return ret;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/processor.c b/src/kernel/linux/v4.14/arch/parisc/kernel/processor.c
new file mode 100644
index 0000000..e120d63
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/processor.c
@@ -0,0 +1,454 @@
+/*
+ *    Initial setup-routines for HP 9000 based hardware.
+ *
+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
+ *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/param.h>
+#include <asm/cache.h>
+#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/irq.h>		/* for struct irq_region */
+#include <asm/parisc-device.h>
+
+struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
+EXPORT_SYMBOL(boot_cpu_data);
+#ifdef CONFIG_PA8X00
+int _parisc_requires_coherency __read_mostly;
+EXPORT_SYMBOL(_parisc_requires_coherency);
+#endif
+
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+
+/*
+**  	PARISC CPU driver - claim "device" and initialize CPU data structures.
+**
+** Consolidate per CPU initialization into (mostly) one module.
+** Monarch CPU will initialize boot_cpu_data which shouldn't
+** change once the system has booted.
+**
+** The callback *should* do per-instance initialization of
+** everything including the monarch. "Per CPU" init code in
+** setup.c:start_parisc() has migrated here and start_parisc()
+** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
+**
+** The goal of consolidating CPU initialization into one place is
+** to make sure all CPUs get initialized the same way.
+** The code path not shared is how PDC hands control of the CPU to the OS.
+** The initialization of OS data structures is the same (done below).
+*/
+
+/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void
+init_percpu_prof(unsigned long cpunum)
+{
+}
+
+
+/**
+ * processor_probe - Determine if processor driver should claim this device.
+ * @dev: The device which has been found.
+ *
+ * Determine if processor driver should claim this chip (return 0) or not 
+ * (return 1).  If so, initialize the chip and tell other partners in crime 
+ * they have work to do.
+ */
+static int __init processor_probe(struct parisc_device *dev)
+{
+	unsigned long txn_addr;
+	unsigned long cpuid;
+	struct cpuinfo_parisc *p;
+	struct pdc_pat_cpu_num cpu_info = { };
+
+#ifdef CONFIG_SMP
+	if (num_online_cpus() >= nr_cpu_ids) {
+		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
+		return 1;
+	}
+#else
+	if (boot_cpu_data.cpu_count > 0) {
+		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
+		return 1;
+	}
+#endif
+
+	/* logical CPU ID and update global counter
+	 * May get overwritten by PAT code.
+	 */
+	cpuid = boot_cpu_data.cpu_count;
+	txn_addr = dev->hpa.start;	/* for legacy PDC */
+	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
+
+#ifdef CONFIG_64BIT
+	if (is_pdc_pat()) {
+		ulong status;
+		unsigned long bytecnt;
+	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
+
+		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+		if (!pa_pdc_cell)
+			panic("couldn't allocate memory for PDC_PAT_CELL!");
+
+		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
+			dev->mod_index, PA_VIEW, pa_pdc_cell);
+
+		BUG_ON(PDC_OK != status);
+
+		/* verify it's the same as what do_pat_inventory() found */
+		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
+		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
+
+		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
+
+		kfree(pa_pdc_cell);
+
+		/* get the cpu number */
+		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
+		BUG_ON(PDC_OK != status);
+
+		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
+			"0x%lx with hpa %pa\n",
+			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
+			&dev->hpa.start);
+
+#undef USE_PAT_CPUID
+#ifdef USE_PAT_CPUID
+/* We need contiguous numbers for cpuid. Firmware's notion
+ * of cpuid is for physical CPUs and we just don't care yet.
+ * We'll care when we need to query PAT PDC about a CPU *after*
+ * boot time (ie shutdown a CPU from an OS perspective).
+ */
+		if (cpu_info.cpu_num >= NR_CPUS) {
+			printk(KERN_WARNING "IGNORING CPU at %pa,"
+				" cpu_slot_id > NR_CPUS"
+				" (%ld > %d)\n",
+				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
+			/* Ignore CPU since it will only crash */
+			boot_cpu_data.cpu_count--;
+			return 1;
+		} else {
+			cpuid = cpu_info.cpu_num;
+		}
+#endif
+	}
+#endif
+
+	p = &per_cpu(cpu_data, cpuid);
+	boot_cpu_data.cpu_count++;
+
+	/* initialize counters - CPU 0 gets it_value set in time_init() */
+	if (cpuid)
+		memset(p, 0, sizeof(struct cpuinfo_parisc));
+
+	p->loops_per_jiffy = loops_per_jiffy;
+	p->dev = dev;		/* Save IODC data in case we need it */
+	p->hpa = dev->hpa.start;	/* save CPU hpa */
+	p->cpuid = cpuid;	/* save CPU id */
+	p->txn_addr = txn_addr;	/* save CPU IRQ address */
+	p->cpu_num = cpu_info.cpu_num;
+	p->cpu_loc = cpu_info.cpu_loc;
+#ifdef CONFIG_SMP
+	/*
+	** FIXME: review if any other initialization is clobbered
+	**	  for boot_cpu by the above memset().
+	*/
+	init_percpu_prof(cpuid);
+#endif
+
+	/*
+	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
+	** OS control. RENDEZVOUS is the default state - see mem_set above.
+	**	p->state = STATE_RENDEZVOUS;
+	*/
+
+#if 0
+	/* CPU 0 IRQ table is statically allocated/initialized */
+	if (cpuid) {
+		struct irqaction actions[];
+
+		/*
+		** itimer and ipi IRQ handlers are statically initialized in
+		** arch/parisc/kernel/irq.c. ie Don't need to register them.
+		*/
+		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
+		if (!actions) {
+			/* not getting it's own table, share with monarch */
+			actions = cpu_irq_actions[0];
+		}
+
+		cpu_irq_actions[cpuid] = actions;
+	}
+#endif
+
+	/* 
+	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
+	 */
+#ifdef CONFIG_SMP
+	if (cpuid) {
+		set_cpu_present(cpuid, true);
+		cpu_up(cpuid);
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * collect_boot_cpu_data - Fill the boot_cpu_data structure.
+ *
+ * This function collects and stores the generic processor information
+ * in the boot_cpu_data structure.
+ */
+void __init collect_boot_cpu_data(void)
+{
+	unsigned long cr16_seed;
+
+	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
+
+	cr16_seed = get_cycles();
+	add_device_randomness(&cr16_seed, sizeof(cr16_seed));
+
+	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
+
+	/* get CPU-Model Information... */
+#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
+		printk(KERN_INFO 
+			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+
+		add_device_randomness(&boot_cpu_data.pdc.model,
+			sizeof(boot_cpu_data.pdc.model));
+	}
+#undef p
+
+	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
+		printk(KERN_INFO "vers  %08lx\n", 
+			boot_cpu_data.pdc.versions);
+
+		add_device_randomness(&boot_cpu_data.pdc.versions,
+			sizeof(boot_cpu_data.pdc.versions));
+	}
+
+	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
+		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
+			(boot_cpu_data.pdc.cpuid >> 5) & 127,
+			boot_cpu_data.pdc.cpuid & 31,
+			boot_cpu_data.pdc.cpuid);
+
+		add_device_randomness(&boot_cpu_data.pdc.cpuid,
+			sizeof(boot_cpu_data.pdc.cpuid));
+	}
+
+	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
+		printk(KERN_INFO "capabilities 0x%lx\n",
+			boot_cpu_data.pdc.capabilities);
+
+	if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
+		printk(KERN_INFO "model %s\n",
+			boot_cpu_data.pdc.sys_model_name);
+
+	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
+	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
+
+	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
+	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
+	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
+
+#ifdef CONFIG_PA8X00
+	_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
+				(boot_cpu_data.cpu_type == mako2);
+#endif
+}
+
+
+/**
+ * init_per_cpu - Handle individual processor initializations.
+ * @cpunum: logical processor number.
+ *
+ * This function handles initialization for *every* CPU
+ * in the system:
+ *
+ * o Set "default" CPU width for trap handlers
+ *
+ * o Enable FP coprocessor
+ *   REVISIT: this could be done in the "code 22" trap handler.
+ *	(frowands idea - that way we know which processes need FP
+ *	registers saved on the interrupt stack.)
+ *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
+ *	formatted printing of %lx for example (double divides I think)
+ *
+ * o Enable CPU profiling hooks.
+ */
+int __init init_per_cpu(int cpunum)
+{
+	int ret;
+	struct pdc_coproc_cfg coproc_cfg;
+
+	set_firmware_width();
+	ret = pdc_coproc_cfg(&coproc_cfg);
+
+	if(ret >= 0 && coproc_cfg.ccr_functional) {
+		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
+
+		/* FWIW, FP rev/model is a more accurate way to determine
+		** CPU type. CPU rev/model has some ambiguous cases.
+		*/
+		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
+
+		if (cpunum == 0)
+			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
+				cpunum, coproc_cfg.revision, coproc_cfg.model);
+
+		/*
+		** store status register to stack (hopefully aligned)
+		** and clear the T-bit.
+		*/
+		asm volatile ("fstd    %fr0,8(%sp)");
+
+	} else {
+		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
+			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
+#ifdef CONFIG_64BIT
+			"Halting Machine - FP required\n"
+#endif
+			, coproc_cfg.ccr_functional);
+#ifdef CONFIG_64BIT
+		mdelay(100);	/* previous chars get pushed to console */
+		panic("FP CoProc not reported");
+#endif
+	}
+
+	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
+	init_percpu_prof(cpunum);
+
+	return ret;
+}
+
+/*
+ * Display CPU info for all CPUs.
+ */
+int
+show_cpuinfo (struct seq_file *m, void *v)
+{
+	unsigned long cpu;
+
+	for_each_online_cpu(cpu) {
+		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+#ifdef CONFIG_SMP
+		if (0 == cpuinfo->hpa)
+			continue;
+#endif
+		seq_printf(m, "processor\t: %lu\n"
+				"cpu family\t: PA-RISC %s\n",
+				 cpu, boot_cpu_data.family_name);
+
+		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
+
+		/* cpu MHz */
+		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
+				 boot_cpu_data.cpu_hz / 1000000,
+				 boot_cpu_data.cpu_hz % 1000000  );
+
+		seq_printf(m, "capabilities\t:");
+		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
+			seq_puts(m, " os32");
+		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
+			seq_puts(m, " os64");
+		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
+			seq_puts(m, " iopdir_fdc");
+		switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
+		case PDC_MODEL_NVA_SUPPORTED:
+			seq_puts(m, " nva_supported");
+			break;
+		case PDC_MODEL_NVA_SLOW:
+			seq_puts(m, " nva_slow");
+			break;
+		case PDC_MODEL_NVA_UNSUPPORTED:
+			seq_puts(m, " needs_equivalent_aliasing");
+			break;
+		}
+		seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
+
+		seq_printf(m, "model\t\t: %s\n"
+				"model name\t: %s\n",
+				 boot_cpu_data.pdc.sys_model_name,
+				 cpuinfo->dev ?
+				 cpuinfo->dev->name : "Unknown");
+
+		seq_printf(m, "hversion\t: 0x%08x\n"
+			        "sversion\t: 0x%08x\n",
+				 boot_cpu_data.hversion,
+				 boot_cpu_data.sversion );
+
+		/* print cachesize info */
+		show_cache_info(m);
+
+		seq_printf(m, "bogomips\t: %lu.%02lu\n",
+			     cpuinfo->loops_per_jiffy / (500000 / HZ),
+			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
+
+		seq_printf(m, "software id\t: %ld\n\n",
+				boot_cpu_data.pdc.model.sw_id);
+	}
+	return 0;
+}
+
+static const struct parisc_device_id processor_tbl[] __initconst = {
+	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
+	{ 0, }
+};
+
+static struct parisc_driver cpu_driver __refdata = {
+	.name		= "CPU",
+	.id_table	= processor_tbl,
+	.probe		= processor_probe
+};
+
+/**
+ * processor_init - Processor initialization procedure.
+ *
+ * Register this driver.
+ */
+void __init processor_init(void)
+{
+	register_parisc_driver(&cpu_driver);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/ptrace.c b/src/kernel/linux/v4.14/arch/parisc/kernel/ptrace.c
new file mode 100644
index 0000000..f468a5b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/ptrace.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel support for the ptrace() and syscall tracing interfaces.
+ *
+ * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
+ * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/user.h>
+#include <linux/personality.h>
+#include <linux/regset.h>
+#include <linux/security.h>
+#include <linux/seccomp.h>
+#include <linux/compat.h>
+#include <linux/signal.h>
+#include <linux/audit.h>
+
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/asm-offsets.h>
+
+/* PSW bits we allow the debugger to modify */
+#define USER_PSW_BITS	(PSW_N | PSW_B | PSW_V | PSW_CB)
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * These are our native regset flavors.
+ */
+enum parisc_regset {
+	REGSET_GENERAL,
+	REGSET_FP
+};
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *task)
+{
+	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+	clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+
+	/* make sure the trap bits are not set */
+	pa_psw(task)->r = 0;
+	pa_psw(task)->t = 0;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
+
+/*
+ * The following functions are called by ptrace_resume() when
+ * enabling or disabling single/block tracing.
+ */
+void user_disable_single_step(struct task_struct *task)
+{
+	ptrace_disable(task);
+}
+
+void user_enable_single_step(struct task_struct *task)
+{
+	clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+	set_tsk_thread_flag(task, TIF_SINGLESTEP);
+
+	if (pa_psw(task)->n) {
+		struct siginfo si;
+
+		/* Nullified, just crank over the queue. */
+		task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
+		task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
+		task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
+		pa_psw(task)->n = 0;
+		pa_psw(task)->x = 0;
+		pa_psw(task)->y = 0;
+		pa_psw(task)->z = 0;
+		pa_psw(task)->b = 0;
+		ptrace_disable(task);
+		/* Don't wake up the task, but let the
+		   parent know something happened. */
+		si.si_code = TRAP_TRACE;
+		si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
+		si.si_signo = SIGTRAP;
+		si.si_errno = 0;
+		force_sig_info(SIGTRAP, &si, task);
+		/* notify_parent(task, SIGCHLD); */
+		return;
+	}
+
+	/* Enable recovery counter traps.  The recovery counter
+	 * itself will be set to zero on a task switch.  If the
+	 * task is suspended on a syscall then the syscall return
+	 * path will overwrite the recovery counter with a suitable
+	 * value such that it traps once back in user space.  We
+	 * disable interrupts in the tasks PSW here also, to avoid
+	 * interrupts while the recovery counter is decrementing.
+	 */
+	pa_psw(task)->r = 1;
+	pa_psw(task)->t = 0;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+	set_tsk_thread_flag(task, TIF_BLOCKSTEP);
+
+	/* Enable taken branch trap. */
+	pa_psw(task)->r = 0;
+	pa_psw(task)->t = 1;
+	pa_psw(task)->h = 0;
+	pa_psw(task)->l = 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	unsigned long __user *datap = (unsigned long __user *)data;
+	unsigned long tmp;
+	long ret = -EIO;
+
+	switch (request) {
+
+	/* Read the word at location addr in the USER area.  For ptraced
+	   processes, the kernel saves all regs on a syscall. */
+	case PTRACE_PEEKUSR:
+		if ((addr & (sizeof(unsigned long)-1)) ||
+		     addr >= sizeof(struct pt_regs))
+			break;
+		tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
+		ret = put_user(tmp, datap);
+		break;
+
+	/* Write the word at location addr in the USER area.  This will need
+	   to change when the kernel no longer saves all regs on a syscall.
+	   FIXME.  There is a problem at the moment in that r3-r18 are only
+	   saved if the process is ptraced on syscall entry, and even then
+	   those values are overwritten by actual register values on syscall
+	   exit. */
+	case PTRACE_POKEUSR:
+		/* Some register values written here may be ignored in
+		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+		 * r31/r31+4, and not with the values in pt_regs.
+		 */
+		if (addr == PT_PSW) {
+			/* Allow writing to Nullify, Divide-step-correction,
+			 * and carry/borrow bits.
+			 * BEWARE, if you set N, and then single step, it won't
+			 * stop on the nullified instruction.
+			 */
+			data &= USER_PSW_BITS;
+			task_regs(child)->gr[0] &= ~USER_PSW_BITS;
+			task_regs(child)->gr[0] |= data;
+			ret = 0;
+			break;
+		}
+
+		if ((addr & (sizeof(unsigned long)-1)) ||
+		     addr >= sizeof(struct pt_regs))
+			break;
+		if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+			data |= 3; /* ensure userspace privilege */
+		}
+		if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+				addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+				(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+				addr == PT_SAR) {
+			*(unsigned long *) ((char *) task_regs(child) + addr) = data;
+			ret = 0;
+		}
+		break;
+
+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
+		return copy_regset_to_user(child,
+					   task_user_regset_view(current),
+					   REGSET_GENERAL,
+					   0, sizeof(struct user_regs_struct),
+					   datap);
+
+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
+		return copy_regset_from_user(child,
+					     task_user_regset_view(current),
+					     REGSET_GENERAL,
+					     0, sizeof(struct user_regs_struct),
+					     datap);
+
+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
+		return copy_regset_to_user(child,
+					   task_user_regset_view(current),
+					   REGSET_FP,
+					   0, sizeof(struct user_fp_struct),
+					   datap);
+
+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
+		return copy_regset_from_user(child,
+					     task_user_regset_view(current),
+					     REGSET_FP,
+					     0, sizeof(struct user_fp_struct),
+					     datap);
+
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+
+#ifdef CONFIG_COMPAT
+
+/* This function is needed to translate 32 bit pt_regs offsets in to
+ * 64 bit pt_regs offsets.  For example, a 32 bit gdb under a 64 bit kernel
+ * will request offset 12 if it wants gr3, but the lower 32 bits of
+ * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
+ * This code relies on a 32 bit pt_regs being comprised of 32 bit values
+ * except for the fp registers which (a) are 64 bits, and (b) follow
+ * the gr registers at the start of pt_regs.  The 32 bit pt_regs should
+ * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
+ * being 64 bit in both cases.
+ */
+
+static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
+{
+	compat_ulong_t pos;
+
+	if (offset < 32*4)	/* gr[0..31] */
+		pos = offset * 2 + 4;
+	else if (offset < 32*4+32*8)	/* fr[0] ... fr[31] */
+		pos = (offset - 32*4) + PT_FR0;
+	else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
+		pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
+	else
+		pos = sizeof(struct pt_regs);
+
+	return pos;
+}
+
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+			compat_ulong_t addr, compat_ulong_t data)
+{
+	compat_uint_t tmp;
+	long ret = -EIO;
+
+	switch (request) {
+
+	case PTRACE_PEEKUSR:
+		if (addr & (sizeof(compat_uint_t)-1))
+			break;
+		addr = translate_usr_offset(addr);
+		if (addr >= sizeof(struct pt_regs))
+			break;
+
+		tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
+		ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
+		break;
+
+	/* Write the word at location addr in the USER area.  This will need
+	   to change when the kernel no longer saves all regs on a syscall.
+	   FIXME.  There is a problem at the moment in that r3-r18 are only
+	   saved if the process is ptraced on syscall entry, and even then
+	   those values are overwritten by actual register values on syscall
+	   exit. */
+	case PTRACE_POKEUSR:
+		/* Some register values written here may be ignored in
+		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+		 * r31/r31+4, and not with the values in pt_regs.
+		 */
+		if (addr == PT_PSW) {
+			/* Since PT_PSW==0, it is valid for 32 bit processes
+			 * under 64 bit kernels as well.
+			 */
+			ret = arch_ptrace(child, request, addr, data);
+		} else {
+			if (addr & (sizeof(compat_uint_t)-1))
+				break;
+			addr = translate_usr_offset(addr);
+			if (addr >= sizeof(struct pt_regs))
+				break;
+			if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+				data |= 3; /* ensure userspace privilege */
+			}
+			if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
+				/* Special case, fp regs are 64 bits anyway */
+				*(__u32 *) ((char *) task_regs(child) + addr) = data;
+				ret = 0;
+			}
+			else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
+					addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
+					addr == PT_SAR+4) {
+				/* Zero the top 32 bits */
+				*(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
+				*(__u32 *) ((char *) task_regs(child) + addr) = data;
+				ret = 0;
+			}
+		}
+		break;
+
+	default:
+		ret = compat_ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		int rc = tracehook_report_syscall_entry(regs);
+
+		/*
+		 * As tracesys_next does not set %r28 to -ENOSYS
+		 * when %r20 is set to -1, initialize it here.
+		 */
+		regs->gr[28] = -ENOSYS;
+
+		if (rc) {
+			/*
+			 * A nonzero return code from
+			 * tracehook_report_syscall_entry() tells us
+			 * to prevent the syscall execution.  Skip
+			 * the syscall call and the syscall restart handling.
+			 *
+			 * Note that the tracer may also just change
+			 * regs->gr[20] to an invalid syscall number,
+			 * that is handled by tracesys_next.
+			 */
+			regs->gr[20] = -1UL;
+			return -1;
+		}
+	}
+
+	/* Do the secure computing check after ptrace. */
+	if (secure_computing(NULL) == -1)
+		return -1;
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+		trace_sys_enter(regs, regs->gr[20]);
+#endif
+
+#ifdef CONFIG_64BIT
+	if (!is_compat_task())
+		audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
+				    regs->gr[24], regs->gr[23]);
+	else
+#endif
+		audit_syscall_entry(regs->gr[20] & 0xffffffff,
+			regs->gr[26] & 0xffffffff,
+			regs->gr[25] & 0xffffffff,
+			regs->gr[24] & 0xffffffff,
+			regs->gr[23] & 0xffffffff);
+
+	/*
+	 * Sign extend the syscall number to 64bit since it may have been
+	 * modified by a compat ptrace call
+	 */
+	return (int) ((u32) regs->gr[20]);
+}
+
+void do_syscall_trace_exit(struct pt_regs *regs)
+{
+	int stepping = test_thread_flag(TIF_SINGLESTEP) ||
+		test_thread_flag(TIF_BLOCKSTEP);
+
+	audit_syscall_exit(regs);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+		trace_sys_exit(regs, regs->gr[20]);
+#endif
+
+	if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(regs, stepping);
+}
+
+
+/*
+ * regset functions.
+ */
+
+static int fpr_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	__u64 *k = kbuf;
+	__u64 __user *u = ubuf;
+	__u64 reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			*k++ = regs->fr[pos++];
+	else
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			if (__put_user(regs->fr[pos++], u++))
+				return -EFAULT;
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NFPREG * sizeof(reg), -1);
+}
+
+static int fpr_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const __u64 *k = kbuf;
+	const __u64 __user *u = ubuf;
+	__u64 reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NFPREG; --count)
+			regs->fr[pos++] = *k++;
+	else
+		for (; count > 0 && pos < ELF_NFPREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			regs->fr[pos++] = reg;
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NFPREG * sizeof(reg), -1);
+}
+
+#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
+
+static unsigned long get_reg(struct pt_regs *regs, int num)
+{
+	switch (num) {
+	case RI(gr[0]) ... RI(gr[31]):	return regs->gr[num - RI(gr[0])];
+	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
+	case RI(iasq[0]):		return regs->iasq[0];
+	case RI(iasq[1]):		return regs->iasq[1];
+	case RI(iaoq[0]):		return regs->iaoq[0];
+	case RI(iaoq[1]):		return regs->iaoq[1];
+	case RI(sar):			return regs->sar;
+	case RI(iir):			return regs->iir;
+	case RI(isr):			return regs->isr;
+	case RI(ior):			return regs->ior;
+	case RI(ipsw):			return regs->ipsw;
+	case RI(cr27):			return regs->cr27;
+	case RI(cr0):			return mfctl(0);
+	case RI(cr24):			return mfctl(24);
+	case RI(cr25):			return mfctl(25);
+	case RI(cr26):			return mfctl(26);
+	case RI(cr28):			return mfctl(28);
+	case RI(cr29):			return mfctl(29);
+	case RI(cr30):			return mfctl(30);
+	case RI(cr31):			return mfctl(31);
+	case RI(cr8):			return mfctl(8);
+	case RI(cr9):			return mfctl(9);
+	case RI(cr12):			return mfctl(12);
+	case RI(cr13):			return mfctl(13);
+	case RI(cr10):			return mfctl(10);
+	case RI(cr15):			return mfctl(15);
+	default:			return 0;
+	}
+}
+
+static void set_reg(struct pt_regs *regs, int num, unsigned long val)
+{
+	switch (num) {
+	case RI(gr[0]): /*
+			 * PSW is in gr[0].
+			 * Allow writing to Nullify, Divide-step-correction,
+			 * and carry/borrow bits.
+			 * BEWARE, if you set N, and then single step, it won't
+			 * stop on the nullified instruction.
+			 */
+			val &= USER_PSW_BITS;
+			regs->gr[0] &= ~USER_PSW_BITS;
+			regs->gr[0] |= val;
+			return;
+	case RI(gr[1]) ... RI(gr[31]):
+			regs->gr[num - RI(gr[0])] = val;
+			return;
+	case RI(iaoq[0]):
+	case RI(iaoq[1]):
+			/* set 2 lowest bits to ensure userspace privilege: */
+			regs->iaoq[num - RI(iaoq[0])] = val | 3;
+			return;
+	case RI(sar):	regs->sar = val;
+			return;
+	default:	return;
+#if 0
+	/* do not allow to change any of the following registers (yet) */
+	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
+	case RI(iasq[0]):		return regs->iasq[0];
+	case RI(iasq[1]):		return regs->iasq[1];
+	case RI(iir):			return regs->iir;
+	case RI(isr):			return regs->isr;
+	case RI(ior):			return regs->ior;
+	case RI(ipsw):			return regs->ipsw;
+	case RI(cr27):			return regs->cr27;
+        case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
+        case cr8, cr9, cr12, cr13, cr10, cr15;
+#endif
+	}
+}
+
+static int gpr_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	unsigned long *k = kbuf;
+	unsigned long __user *u = ubuf;
+	unsigned long reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			*k++ = get_reg(regs, pos++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			if (__put_user(get_reg(regs, pos++), u++))
+				return -EFAULT;
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NGREG * sizeof(reg), -1);
+}
+
+static int gpr_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const unsigned long *k = kbuf;
+	const unsigned long __user *u = ubuf;
+	unsigned long reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			set_reg(regs, pos++, *k++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			set_reg(regs, pos++, reg);
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NGREG * sizeof(reg), -1);
+}
+
+static const struct user_regset native_regsets[] = {
+	[REGSET_GENERAL] = {
+		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+		.size = sizeof(long), .align = sizeof(long),
+		.get = gpr_get, .set = gpr_set
+	},
+	[REGSET_FP] = {
+		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+		.size = sizeof(__u64), .align = sizeof(__u64),
+		.get = fpr_get, .set = fpr_set
+	}
+};
+
+static const struct user_regset_view user_parisc_native_view = {
+	.name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
+	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+#ifdef CONFIG_64BIT
+#include <linux/compat.h>
+
+static int gpr32_get(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	compat_ulong_t *k = kbuf;
+	compat_ulong_t __user *u = ubuf;
+	compat_ulong_t reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			*k++ = get_reg(regs, pos++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++))
+				return -EFAULT;
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					ELF_NGREG * sizeof(reg), -1);
+}
+
+static int gpr32_set(struct task_struct *target,
+		     const struct user_regset *regset,
+		     unsigned int pos, unsigned int count,
+		     const void *kbuf, const void __user *ubuf)
+{
+	struct pt_regs *regs = task_regs(target);
+	const compat_ulong_t *k = kbuf;
+	const compat_ulong_t __user *u = ubuf;
+	compat_ulong_t reg;
+
+	pos /= sizeof(reg);
+	count /= sizeof(reg);
+
+	if (kbuf)
+		for (; count > 0 && pos < ELF_NGREG; --count)
+			set_reg(regs, pos++, *k++);
+	else
+		for (; count > 0 && pos < ELF_NGREG; --count) {
+			if (__get_user(reg, u++))
+				return -EFAULT;
+			set_reg(regs, pos++, reg);
+		}
+
+	kbuf = k;
+	ubuf = u;
+	pos *= sizeof(reg);
+	count *= sizeof(reg);
+	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+					 ELF_NGREG * sizeof(reg), -1);
+}
+
+/*
+ * These are the regset flavors matching the 32bit native set.
+ */
+static const struct user_regset compat_regsets[] = {
+	[REGSET_GENERAL] = {
+		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+		.get = gpr32_get, .set = gpr32_set
+	},
+	[REGSET_FP] = {
+		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+		.size = sizeof(__u64), .align = sizeof(__u64),
+		.get = fpr_get, .set = fpr_set
+	}
+};
+
+static const struct user_regset_view user_parisc_compat_view = {
+	.name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
+	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
+};
+#endif	/* CONFIG_64BIT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
+	BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
+#ifdef CONFIG_64BIT
+	if (is_compat_task())
+		return &user_parisc_compat_view;
+#endif
+	return &user_parisc_native_view;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/real2.S b/src/kernel/linux/v4.14/arch/parisc/kernel/real2.S
new file mode 100644
index 0000000..cc99634
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/real2.S
@@ -0,0 +1,312 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
+ *
+ */
+
+#include <asm/pdc.h>
+#include <asm/psw.h>
+#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+
+#include <linux/linkage.h>
+
+
+	.section	.bss
+
+	.export pdc_result
+	.export pdc_result2
+	.align 8
+pdc_result:
+	.block	ASM_PDC_RESULT_SIZE
+pdc_result2:
+	.block	ASM_PDC_RESULT_SIZE
+
+	.export real_stack
+	.export real32_stack
+	.export real64_stack
+	.align	64
+real_stack:
+real32_stack:
+real64_stack:
+	.block	8192
+
+#ifdef CONFIG_64BIT
+#  define REG_SZ 8
+#else
+#  define REG_SZ 4
+#endif
+
+#define N_SAVED_REGS 9
+
+save_cr_space:
+	.block	REG_SZ * N_SAVED_REGS
+save_cr_end:
+
+
+/************************ 32-bit real-mode calls ***********************/
+/* This can be called in both narrow and wide kernels */
+
+	.text
+
+	/* unsigned long real32_call_asm(unsigned int *sp,
+	 *		unsigned int *arg0p,
+	 *		unsigned int iodc_fn)
+	 *	sp is value of stack pointer to adopt before calling PDC (virt)
+	 *	arg0p points to where saved arg values may be found
+	 *	iodc_fn is the IODC function to call
+	 */
+
+ENTRY_CFI(real32_call_asm)
+	STREG	%rp, -RP_OFFSET(%sp)	/* save RP */
+#ifdef CONFIG_64BIT
+	callee_save
+	ldo	2*REG_SZ(%sp), %sp	/* room for a couple more saves */
+	STREG	%r27, -1*REG_SZ(%sp)
+	STREG	%r29, -2*REG_SZ(%sp)
+#endif
+	STREG	%sp, -REG_SZ(%arg0)	/* save SP on real-mode stack */
+	copy	%arg0, %sp		/* adopt the real-mode SP */
+
+	/* save iodc_fn */
+	copy	%arg2, %r31
+
+	/* load up the arg registers from the saved arg area */
+	/* 32-bit calling convention passes first 4 args in registers */
+	ldw	0(%arg1), %arg0		/* note overwriting arg0 */
+	ldw	-8(%arg1), %arg2
+	ldw	-12(%arg1), %arg3
+	ldw	-4(%arg1), %arg1	/* obviously must do this one last! */
+
+	tophys_r1  %sp
+
+	b,l	rfi_virt2real,%r2
+	nop
+
+	b,l	save_control_regs,%r2		/* modifies r1, r2, r28 */
+	nop
+
+#ifdef CONFIG_64BIT
+	rsm	PSW_SM_W, %r0		/* go narrow */
+#endif
+
+	load32	PA(ric_ret), %r2
+	bv	0(%r31)
+	nop
+ric_ret:
+#ifdef CONFIG_64BIT
+	ssm	PSW_SM_W, %r0		/* go wide */
+#endif
+	/* restore CRs before going virtual in case we page fault */
+	b,l	restore_control_regs, %r2	/* modifies r1, r2, r26 */
+	nop
+
+	b,l	rfi_real2virt,%r2
+	nop
+
+	tovirt_r1 %sp
+	LDREG	-REG_SZ(%sp), %sp	/* restore SP */
+#ifdef CONFIG_64BIT
+	LDREG	-1*REG_SZ(%sp), %r27
+	LDREG	-2*REG_SZ(%sp), %r29
+	ldo	-2*REG_SZ(%sp), %sp
+	callee_rest
+#endif
+	LDREG	-RP_OFFSET(%sp), %rp	/* restore RP */
+	bv	0(%rp)
+	nop
+ENDPROC_CFI(real32_call_asm)
+
+
+#  define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
+#  define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
+
+	.text
+ENTRY_CFI(save_control_regs)
+	load32	PA(save_cr_space), %r28
+	PUSH_CR(%cr24, %r28)
+	PUSH_CR(%cr25, %r28)
+	PUSH_CR(%cr26, %r28)
+	PUSH_CR(%cr27, %r28)
+	PUSH_CR(%cr28, %r28)
+	PUSH_CR(%cr29, %r28)
+	PUSH_CR(%cr30, %r28)
+	PUSH_CR(%cr31, %r28)
+	PUSH_CR(%cr15, %r28)
+	bv 0(%r2)
+	nop
+ENDPROC_CFI(save_control_regs)
+
+ENTRY_CFI(restore_control_regs)
+	load32	PA(save_cr_end), %r26
+	POP_CR(%cr15, %r26)
+	POP_CR(%cr31, %r26)
+	POP_CR(%cr30, %r26)
+	POP_CR(%cr29, %r26)
+	POP_CR(%cr28, %r26)
+	POP_CR(%cr27, %r26)
+	POP_CR(%cr26, %r26)
+	POP_CR(%cr25, %r26)
+	POP_CR(%cr24, %r26)
+	bv 0(%r2)
+	nop
+ENDPROC_CFI(restore_control_regs)
+
+/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
+ * more general-purpose use by the several places which need RFIs
+ */
+	.text
+	.align 128
+ENTRY_CFI(rfi_virt2real)
+#if !defined(BOOTLOADER)
+	/* switch to real mode... */
+	rsm		PSW_SM_I,%r0
+	load32		PA(rfi_v2r_1), %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+	
+	rsm             PSW_SM_Q,%r0  /* disable Q & I bits to load iia queue */
+	mtctl		%r0, %cr17	/* Clear IIASQ tail */
+	mtctl		%r0, %cr17	/* Clear IIASQ head */
+	mtctl		%r1, %cr18	/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18	/* IIAOQ tail */
+	load32          REAL_MODE_PSW, %r1
+	mtctl		%r1, %cr22
+	rfi
+	
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+rfi_v2r_1:
+	tophys_r1 %r2
+#endif /* defined(BOOTLOADER) */
+	bv	0(%r2)
+	nop
+ENDPROC_CFI(rfi_virt2real)
+
+	.text
+	.align 128
+ENTRY_CFI(rfi_real2virt)
+#if !defined(BOOTLOADER)
+	rsm		PSW_SM_I,%r0
+	load32		(rfi_r2v_1), %r1
+	nop
+	nop
+	nop
+	nop
+	nop
+	
+	rsm             PSW_SM_Q,%r0    /* disable Q bit to load iia queue */
+	mtctl		%r0, %cr17	/* Clear IIASQ tail */
+	mtctl		%r0, %cr17	/* Clear IIASQ head */
+	mtctl		%r1, %cr18	/* IIAOQ head */
+	ldo		4(%r1), %r1
+	mtctl		%r1, %cr18	/* IIAOQ tail */
+	load32		KERNEL_PSW, %r1
+	mtctl		%r1, %cr22
+	rfi
+	
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+rfi_r2v_1:
+	tovirt_r1 %r2
+#endif /* defined(BOOTLOADER) */
+	bv	0(%r2)
+	nop
+ENDPROC_CFI(rfi_real2virt)
+
+#ifdef CONFIG_64BIT
+
+/************************ 64-bit real-mode calls ***********************/
+/* This is only usable in wide kernels right now and will probably stay so */
+	.text
+	/* unsigned long real64_call_asm(unsigned long *sp,
+	 *		unsigned long *arg0p,
+	 *		unsigned long fn)
+	 *	sp is value of stack pointer to adopt before calling PDC (virt)
+	 *	arg0p points to where saved arg values may be found
+	 *	iodc_fn is the IODC function to call
+	 */
+ENTRY_CFI(real64_call_asm)
+	std	%rp, -0x10(%sp)		/* save RP */
+	std	%sp, -8(%arg0)		/* save SP on real-mode stack */
+	copy	%arg0, %sp		/* adopt the real-mode SP */
+
+	/* save fn */
+	copy	%arg2, %r31
+
+	/* set up the new ap */
+	ldo	64(%arg1), %r29
+
+	/* load up the arg registers from the saved arg area */
+	/* 32-bit calling convention passes first 4 args in registers */
+	ldd	0*REG_SZ(%arg1), %arg0		/* note overwriting arg0 */
+	ldd	2*REG_SZ(%arg1), %arg2
+	ldd	3*REG_SZ(%arg1), %arg3
+	ldd	4*REG_SZ(%arg1), %r22
+	ldd	5*REG_SZ(%arg1), %r21
+	ldd	6*REG_SZ(%arg1), %r20
+	ldd	7*REG_SZ(%arg1), %r19
+	ldd	1*REG_SZ(%arg1), %arg1		/* do this one last! */
+
+	tophys_r1 %sp
+
+	b,l	rfi_virt2real,%r2
+	nop
+
+	b,l	save_control_regs,%r2		/* modifies r1, r2, r28 */
+	nop
+
+	load32	PA(r64_ret), %r2
+	bv	0(%r31)
+	nop
+r64_ret:
+	/* restore CRs before going virtual in case we page fault */
+	b,l	restore_control_regs, %r2	/* modifies r1, r2, r26 */
+	nop
+
+	b,l	rfi_real2virt,%r2
+	nop
+
+	tovirt_r1 %sp
+	ldd	-8(%sp), %sp		/* restore SP */
+	ldd	-0x10(%sp), %rp		/* restore RP */
+	bv	0(%rp)
+	nop
+ENDPROC_CFI(real64_call_asm)
+
+#endif
+
+	.text
+	/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
+	**	GCC 3.3 and later has a new function in libgcc.a for
+	**	comparing function pointers.
+	*/
+ENTRY_CFI(__canonicalize_funcptr_for_compare)
+#ifdef CONFIG_64BIT
+	bve (%r2)
+#else
+	bv %r0(%r2)
+#endif
+	copy %r26,%r28
+ENDPROC_CFI(__canonicalize_funcptr_for_compare)
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/setup.c b/src/kernel/linux/v4.14/arch/parisc/kernel/setup.c
new file mode 100644
index 0000000..550f80a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/setup.c
@@ -0,0 +1,432 @@
+/*
+ *    Initial setup-routines for HP 9000 based hardware.
+ *
+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *    Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/initrd.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#define PCI_DEBUG
+#include <linux/pci.h>
+#undef PCI_DEBUG
+#include <linux/proc_fs.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/start_kernel.h>
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+#include <asm/machdep.h>	/* for pa7300lc_init() proto */
+#include <asm/pdc_chassis.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/unwind.h>
+#include <asm/smp.h>
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
+struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
+struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
+struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
+
+#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
+int parisc_bus_is_phys __read_mostly = 1;	/* Assume no IOMMU is present */
+EXPORT_SYMBOL(parisc_bus_is_phys);
+#endif
+
+void __init setup_cmdline(char **cmdline_p)
+{
+	extern unsigned int boot_args[];
+
+	/* Collect stuff passed in from the boot loader */
+
+	/* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
+	if (boot_args[0] < 64) {
+		/* called from hpux boot loader */
+		boot_command_line[0] = '\0';
+	} else {
+		strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+			COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+		if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
+		{
+		    initrd_start = (unsigned long)__va(boot_args[2]);
+		    initrd_end = (unsigned long)__va(boot_args[3]);
+		}
+#endif
+	}
+
+	strcpy(command_line, boot_command_line);
+	*cmdline_p = command_line;
+}
+
+#ifdef CONFIG_PA11
+void __init dma_ops_init(void)
+{
+	switch (boot_cpu_data.cpu_type) {
+	case pcx:
+		/*
+		 * We've got way too many dependencies on 1.1 semantics
+		 * to support 1.0 boxes at this point.
+		 */
+		panic(	"PA-RISC Linux currently only supports machines that conform to\n"
+			"the PA-RISC 1.1 or 2.0 architecture specification.\n");
+
+	case pcxs:
+	case pcxt:
+		hppa_dma_ops = &pcx_dma_ops;
+		break;
+	case pcxl2:
+		pa7300lc_init();
+	case pcxl: /* falls through */
+		hppa_dma_ops = &pcxl_dma_ops;
+		break;
+	default:
+		break;
+	}
+}
+#endif
+
+extern void collect_boot_cpu_data(void);
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_64BIT
+	extern int parisc_narrow_firmware;
+#endif
+	unwind_init();
+
+	init_per_cpu(smp_processor_id());	/* Set Modes & Enable FP */
+
+#ifdef CONFIG_64BIT
+	printk(KERN_INFO "The 64-bit Kernel has started...\n");
+#else
+	printk(KERN_INFO "The 32-bit Kernel has started...\n");
+#endif
+
+	printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
+		(int)(PAGE_SIZE / 1024));
+#ifdef CONFIG_HUGETLB_PAGE
+	printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
+		 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
+#else
+	printk(KERN_CONT "disabled");
+#endif
+	printk(KERN_CONT ".\n");
+
+	/*
+	 * Check if initial kernel page mappings are sufficient.
+	 * panic early if not, else we may access kernel functions
+	 * and variables which can't be reached.
+	 */
+	if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+		panic("KERNEL_INITIAL_ORDER too small!");
+
+	pdc_console_init();
+
+#ifdef CONFIG_64BIT
+	if(parisc_narrow_firmware) {
+		printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
+	}
+#endif
+	setup_pdc();
+	setup_cmdline(cmdline_p);
+	collect_boot_cpu_data();
+	do_memory_inventory();  /* probe for physical memory */
+	parisc_cache_init();
+	paging_init();
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+	/* initialize the LCD/LED after boot_cpu_data is available ! */
+	led_init();		/* LCD/LED initialization */
+#endif
+
+#ifdef CONFIG_PA11
+	dma_ops_init();
+#endif
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;	/* we use do_take_over_console() later ! */
+#endif
+
+	clear_sched_clock_stable();
+}
+
+/*
+ * Display CPU info for all CPUs.
+ * for parisc this is in processor.c
+ */
+extern int show_cpuinfo (struct seq_file *m, void *v);
+
+static void *
+c_start (struct seq_file *m, loff_t *pos)
+{
+    	/* Looks like the caller will call repeatedly until we return
+	 * 0, signaling EOF perhaps.  This could be used to sequence
+	 * through CPUs for example.  Since we print all cpu info in our
+	 * show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
+	 * we only allow for one "position".  */
+	return ((long)*pos < 1) ? (void *)1 : NULL;
+}
+
+static void *
+c_next (struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+
+static void
+c_stop (struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_cpuinfo
+};
+
+static void __init parisc_proc_mkdir(void)
+{
+	/*
+	** Can't call proc_mkdir() until after proc_root_init() has been
+	** called by start_kernel(). In other words, this code can't
+	** live in arch/.../setup.c because start_parisc() calls
+	** start_kernel().
+	*/
+	switch (boot_cpu_data.cpu_type) {
+	case pcxl:
+	case pcxl2:
+		if (NULL == proc_gsc_root)
+		{
+			proc_gsc_root = proc_mkdir("bus/gsc", NULL);
+		}
+		break;
+        case pcxt_:
+        case pcxu:
+        case pcxu_:
+        case pcxw:
+        case pcxw_:
+        case pcxw2:
+                if (NULL == proc_runway_root)
+                {
+                        proc_runway_root = proc_mkdir("bus/runway", NULL);
+                }
+                break;
+	case mako:
+	case mako2:
+                if (NULL == proc_mckinley_root)
+                {
+                        proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
+                }
+                break;
+	default:
+		/* FIXME: this was added to prevent the compiler 
+		 * complaining about missing pcx, pcxs and pcxt
+		 * I'm assuming they have neither gsc nor runway */
+		break;
+	}
+}
+
+static struct resource central_bus = {
+	.name	= "Central Bus",
+	.start	= F_EXTEND(0xfff80000),
+	.end    = F_EXTEND(0xfffaffff),
+	.flags	= IORESOURCE_MEM,
+};
+
+static struct resource local_broadcast = {
+	.name	= "Local Broadcast",
+	.start	= F_EXTEND(0xfffb0000),
+	.end	= F_EXTEND(0xfffdffff),
+	.flags	= IORESOURCE_MEM,
+};
+
+static struct resource global_broadcast = {
+	.name	= "Global Broadcast",
+	.start	= F_EXTEND(0xfffe0000),
+	.end	= F_EXTEND(0xffffffff),
+	.flags	= IORESOURCE_MEM,
+};
+
+static int __init parisc_init_resources(void)
+{
+	int result;
+
+	result = request_resource(&iomem_resource, &central_bus);
+	if (result < 0) {
+		printk(KERN_ERR 
+		       "%s: failed to claim %s address space!\n", 
+		       __FILE__, central_bus.name);
+		return result;
+	}
+
+	result = request_resource(&iomem_resource, &local_broadcast);
+	if (result < 0) {
+		printk(KERN_ERR 
+		       "%s: failed to claim %saddress space!\n", 
+		       __FILE__, local_broadcast.name);
+		return result;
+	}
+
+	result = request_resource(&iomem_resource, &global_broadcast);
+	if (result < 0) {
+		printk(KERN_ERR 
+		       "%s: failed to claim %s address space!\n", 
+		       __FILE__, global_broadcast.name);
+		return result;
+	}
+
+	return 0;
+}
+
+extern void gsc_init(void);
+extern void processor_init(void);
+extern void ccio_init(void);
+extern void hppb_init(void);
+extern void dino_init(void);
+extern void iosapic_init(void);
+extern void lba_init(void);
+extern void sba_init(void);
+extern void eisa_init(void);
+
+static int __init parisc_init(void)
+{
+	u32 osid = (OS_ID_LINUX << 16);
+
+	parisc_proc_mkdir();
+	parisc_init_resources();
+	do_device_inventory();                  /* probe for hardware */
+
+	parisc_pdc_chassis_init();
+	
+	/* set up a new led state on systems shipped LED State panel */
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
+
+	/* tell PDC we're Linux. Nevermind failure. */
+	pdc_stable_write(0x40, &osid, sizeof(osid));
+	
+	/* start with known state */
+	flush_cache_all_local();
+	flush_tlb_all_local(NULL);
+
+	processor_init();
+#ifdef CONFIG_SMP
+	pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
+		num_online_cpus(), num_present_cpus(),
+#else
+	pr_info("CPU(s): 1 x %s at %d.%06d MHz\n",
+#endif
+			boot_cpu_data.cpu_name,
+			boot_cpu_data.cpu_hz / 1000000,
+			boot_cpu_data.cpu_hz % 1000000	);
+
+	parisc_setup_cache_timing();
+
+	/* These are in a non-obvious order, will fix when we have an iotree */
+#if defined(CONFIG_IOSAPIC)
+	iosapic_init();
+#endif
+#if defined(CONFIG_IOMMU_SBA)
+	sba_init();
+#endif
+#if defined(CONFIG_PCI_LBA)
+	lba_init();
+#endif
+
+	/* CCIO before any potential subdevices */
+#if defined(CONFIG_IOMMU_CCIO)
+	ccio_init();
+#endif
+
+	/*
+	 * Need to register Asp & Wax before the EISA adapters for the IRQ
+	 * regions.  EISA must come before PCI to be sure it gets IRQ region
+	 * 0.
+	 */
+#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
+	gsc_init();
+#endif
+#ifdef CONFIG_EISA
+	eisa_init();
+#endif
+
+#if defined(CONFIG_HPPB)
+	hppb_init();
+#endif
+
+#if defined(CONFIG_GSC_DINO)
+	dino_init();
+#endif
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+	register_led_regions();	/* register LED port info in procfs */
+#endif
+
+	return 0;
+}
+arch_initcall(parisc_init);
+
+void __init start_parisc(void)
+{
+	extern void early_trap_init(void);
+
+	int ret, cpunum;
+	struct pdc_coproc_cfg coproc_cfg;
+
+	/* check QEMU/SeaBIOS marker in PAGE0 */
+	running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
+	cpunum = smp_processor_id();
+
+	set_firmware_width_unlocked();
+
+	ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
+	if (ret >= 0 && coproc_cfg.ccr_functional) {
+		mtctl(coproc_cfg.ccr_functional, 10);
+
+		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
+
+		asm volatile ("fstd	%fr0,8(%sp)");
+	} else {
+		panic("must have an fpu to boot linux");
+	}
+
+	early_trap_init(); /* initialize checksum of fault_vector */
+
+	start_kernel();
+	// not reached
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/signal.c b/src/kernel/linux/v4.14/arch/parisc/kernel/signal.c
new file mode 100644
index 0000000..f2a4038
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/signal.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/parisc/kernel/signal.c: Architecture-specific signal
+ *  handling support.
+ *
+ *  Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ *  Copyright (C) 2000 Linuxcare, Inc.
+ *
+ *  Based on the ia64, i386, and alpha versions.
+ *
+ *  Like the IA-64, we are a recent enough port (we are *starting*
+ *  with glibc2.2) that we do not need to support the old non-realtime
+ *  Linux signals.  Therefore we don't.
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
+#include <asm/ucontext.h>
+#include <asm/rt_sigframe.h>
+#include <linux/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_COMPAT
+#include "signal32.h"
+#endif
+
+#define DEBUG_SIG 0 
+#define DEBUG_SIG_LEVEL 2
+
+#if DEBUG_SIG
+#define DBG(LEVEL, ...) \
+        ((DEBUG_SIG_LEVEL >= LEVEL) \
+	? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+	
+/* gcc will complain if a pointer is cast to an integer of different
+ * size.  If you really need to do this (and we do for an ELF32 user
+ * application in an ELF64 kernel) then you have to do a cast to an
+ * integer of the same size first.  The A() macro accomplishes
+ * this. */
+#define A(__x)	((unsigned long)(__x))
+
+/*
+ * Do a signal return - restore sigcontext.
+ */
+
+/* Trampoline for calling rt_sigreturn() */
+#define INSN_LDI_R25_0	 0x34190000 /* ldi  0,%r25 (in_syscall=0) */
+#define INSN_LDI_R25_1	 0x34190002 /* ldi  1,%r25 (in_syscall=1) */
+#define INSN_LDI_R20	 0x3414015a /* ldi  __NR_rt_sigreturn,%r20 */
+#define INSN_BLE_SR2_R0  0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
+#define INSN_NOP	 0x08000240 /* nop */
+/* For debugging */
+#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
+
+static long
+restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
+{
+	long err = 0;
+
+	err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
+	err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+	err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
+	err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
+	err |= __get_user(regs->sar, &sc->sc_sar);
+	DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n",
+			regs->iaoq[0],regs->iaoq[1]);
+	DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
+	return err;
+}
+
+void
+sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
+{
+	struct rt_sigframe __user *frame;
+	sigset_t set;
+	unsigned long usp = (regs->gr[30] & ~(0x01UL));
+	unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef CONFIG_64BIT
+	compat_sigset_t compat_set;
+	struct compat_rt_sigframe __user * compat_frame;
+	
+	if (is_compat_task())
+		sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+
+	current->restart_block.fn = do_no_restart_syscall;
+
+	/* Unwind the user stack to get the rt_sigframe structure. */
+	frame = (struct rt_sigframe __user *)
+		(usp - sigframe_size);
+	DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
+
+	regs->orig_r28 = 1; /* no restarts for sigreturn */
+
+#ifdef CONFIG_64BIT
+	compat_frame = (struct compat_rt_sigframe __user *)frame;
+	
+	if (is_compat_task()) {
+		DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
+		if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
+			goto give_sigsegv;
+		sigset_32to64(&set,&compat_set);
+	} else
+#endif
+	{
+		if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+			goto give_sigsegv;
+	}
+		
+	set_current_blocked(&set);
+
+	/* Good thing we saved the old gr[30], eh? */
+#ifdef CONFIG_64BIT
+	if (is_compat_task()) {
+		DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
+				&compat_frame->uc.uc_mcontext);
+// FIXME: Load upper half from register file
+		if (restore_sigcontext32(&compat_frame->uc.uc_mcontext, 
+					&compat_frame->regs, regs))
+			goto give_sigsegv;
+		DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", 
+				usp, &compat_frame->uc.uc_stack);
+		if (compat_restore_altstack(&compat_frame->uc.uc_stack))
+			goto give_sigsegv;
+	} else
+#endif
+	{
+		DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
+				&frame->uc.uc_mcontext);
+		if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
+			goto give_sigsegv;
+		DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", 
+				usp, &frame->uc.uc_stack);
+		if (restore_altstack(&frame->uc.uc_stack))
+			goto give_sigsegv;
+	}
+		
+
+
+	/* If we are on the syscall path IAOQ will not be restored, and
+	 * if we are on the interrupt path we must not corrupt gr31.
+	 */
+	if (in_syscall)
+		regs->gr[31] = regs->iaoq[0];
+#if DEBUG_SIG
+	DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
+	show_regs(regs);
+#endif
+	return;
+
+give_sigsegv:
+	DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
+	force_sig(SIGSEGV, current);
+	return;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+	/*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
+	  don't use the parameter it doesn't matter */
+
+	DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
+			(unsigned long)ka, sp, frame_size);
+	
+	/* Align alternate stack and reserve 64 bytes for the signal
+	   handler's frame marker.  */
+	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
+		sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
+
+	DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
+	return (void __user *) sp; /* Stacks grow up.  Fun. */
+}
+
+static long
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
+		 
+{
+	unsigned long flags = 0;
+	long err = 0;
+
+	if (on_sig_stack((unsigned long) sc))
+		flags |= PARISC_SC_FLAG_ONSTACK;
+	if (in_syscall) {
+		flags |= PARISC_SC_FLAG_IN_SYSCALL;
+		/* regs->iaoq is undefined in the syscall return path */
+		err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
+		err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
+		err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
+		err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
+		DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
+			regs->gr[31], regs->gr[31]+4);
+	} else {
+		err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
+		err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
+		DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n", 
+			regs->iaoq[0], regs->iaoq[1]);
+	}
+
+	err |= __put_user(flags, &sc->sc_flags);
+	err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
+	err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+	err |= __put_user(regs->sar, &sc->sc_sar);
+	DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
+
+	return err;
+}
+
+static long
+setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
+	       int in_syscall)
+{
+	struct rt_sigframe __user *frame;
+	unsigned long rp, usp;
+	unsigned long haddr, sigframe_size;
+	unsigned long start, end;
+	int err = 0;
+#ifdef CONFIG_64BIT
+	struct compat_rt_sigframe __user * compat_frame;
+	compat_sigset_t compat_set;
+#endif
+	
+	usp = (regs->gr[30] & ~(0x01UL));
+	/*FIXME: frame_size parameter is unused, remove it. */
+	frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
+
+	DBG(1,"SETUP_RT_FRAME: START\n");
+	DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info);
+
+	
+#ifdef CONFIG_64BIT
+
+	compat_frame = (struct compat_rt_sigframe __user *)frame;
+	
+	if (is_compat_task()) {
+		DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
+		err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info);
+		err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]);
+		DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
+		DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
+		err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext, 
+					&compat_frame->regs, regs, in_syscall);
+		sigset_64to32(&compat_set,set);
+		err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
+	} else
+#endif
+	{	
+		DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
+		err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+		err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]);
+		DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
+		DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
+		err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
+		/* FIXME: Should probably be converted as well for the compat case */
+		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	}
+	
+	if (err)
+		return -EFAULT;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace. The first words of tramp are used to
+	   save the previous sigrestartblock trampoline that might be
+	   on the stack. We start the sigreturn trampoline at 
+	   SIGRESTARTBLOCK_TRAMP+X. */
+	err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
+			&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
+	err |= __put_user(INSN_LDI_R20, 
+			&frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
+	err |= __put_user(INSN_BLE_SR2_R0, 
+			&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
+	err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
+
+#if DEBUG_SIG
+	/* Assert that we're flushing in the correct space... */
+	{
+		unsigned long sid;
+		asm ("mfsp %%sr3,%0" : "=r" (sid));
+		DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
+		       sid, frame->tramp);
+	}
+#endif
+
+	start = (unsigned long) &frame->tramp[0];
+	end = (unsigned long) &frame->tramp[TRAMP_SIZE];
+	flush_user_dcache_range_asm(start, end);
+	flush_user_icache_range_asm(start, end);
+
+	/* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
+	 * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
+	 * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
+	 */
+	rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
+
+	if (err)
+		return -EFAULT;
+
+	haddr = A(ksig->ka.sa.sa_handler);
+	/* The sa_handler may be a pointer to a function descriptor */
+#ifdef CONFIG_64BIT
+	if (is_compat_task()) {
+#endif
+		if (haddr & PA_PLABEL_FDESC) {
+			Elf32_Fdesc fdesc;
+			Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
+
+			err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+
+			if (err)
+				return -EFAULT;
+
+			haddr = fdesc.addr;
+			regs->gr[19] = fdesc.gp;
+		}
+#ifdef CONFIG_64BIT
+	} else {
+		Elf64_Fdesc fdesc;
+		Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
+		
+		err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+		
+		if (err)
+			return -EFAULT;
+		
+		haddr = fdesc.addr;
+		regs->gr[19] = fdesc.gp;
+		DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
+		     haddr, regs->gr[19], in_syscall);
+	}
+#endif
+
+	/* The syscall return path will create IAOQ values from r31.
+	 */
+	sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef CONFIG_64BIT
+	if (is_compat_task())
+		sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+	if (in_syscall) {
+		regs->gr[31] = haddr;
+#ifdef CONFIG_64BIT
+		if (!test_thread_flag(TIF_32BIT))
+			sigframe_size |= 1;
+#endif
+	} else {
+		unsigned long psw = USER_PSW;
+#ifdef CONFIG_64BIT
+		if (!test_thread_flag(TIF_32BIT))
+			psw |= PSW_W;
+#endif
+
+		/* If we are singlestepping, arrange a trap to be delivered
+		   when we return to userspace. Note the semantics -- we
+		   should trap before the first insn in the handler is
+		   executed. Ref:
+			http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
+		 */
+		if (pa_psw(current)->r) {
+			pa_psw(current)->r = 0;
+			psw |= PSW_R;
+			mtctl(-1, 0);
+		}
+
+		regs->gr[0] = psw;
+		regs->iaoq[0] = haddr | 3;
+		regs->iaoq[1] = regs->iaoq[0] + 4;
+	}
+
+	regs->gr[2]  = rp;                /* userland return pointer */
+	regs->gr[26] = ksig->sig;               /* signal number */
+	
+#ifdef CONFIG_64BIT
+	if (is_compat_task()) {
+		regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
+		regs->gr[24] = A(&compat_frame->uc);   /* ucontext pointer */
+	} else
+#endif
+	{		
+		regs->gr[25] = A(&frame->info); /* siginfo pointer */
+		regs->gr[24] = A(&frame->uc);   /* ucontext pointer */
+	}
+	
+	DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
+	       regs->gr[30], sigframe_size,
+	       regs->gr[30] + sigframe_size);
+	/* Raise the user stack pointer to make a proper call frame. */
+	regs->gr[30] = (A(frame) + sigframe_size);
+
+
+	DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
+	       current->comm, current->pid, frame, regs->gr[30],
+	       regs->iaoq[0], regs->iaoq[1], rp);
+
+	return 0;
+}
+
+/*
+ * OK, we're invoking a handler.
+ */	
+
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
+{
+	int ret;
+	sigset_t *oldset = sigmask_to_save();
+
+	DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
+	       ksig->sig, ksig->ka, ksig->info, oldset, regs);
+	
+	/* Set up the stack frame */
+	ret = setup_rt_frame(ksig, oldset, regs, in_syscall);
+
+	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) ||
+			  test_thread_flag(TIF_BLOCKSTEP));
+
+	DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+		regs->gr[28]);
+}
+
+/*
+ * Check how the syscall number gets loaded into %r20 within
+ * the delay branch in userspace and adjust as needed.
+ */
+
+static void check_syscallno_in_delay_branch(struct pt_regs *regs)
+{
+	u32 opcode, source_reg;
+	u32 __user *uaddr;
+	int err;
+
+	/* Usually we don't have to restore %r20 (the system call number)
+	 * because it gets loaded in the delay slot of the branch external
+	 * instruction via the ldi instruction.
+	 * In some cases a register-to-register copy instruction might have
+	 * been used instead, in which case we need to copy the syscall
+	 * number into the source register before returning to userspace.
+	 */
+
+	/* A syscall is just a branch, so all we have to do is fiddle the
+	 * return pointer so that the ble instruction gets executed again.
+	 */
+	regs->gr[31] -= 8; /* delayed branching */
+
+	/* Get assembler opcode of code in delay branch */
+	uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
+	err = get_user(opcode, uaddr);
+	if (err)
+		return;
+
+	/* Check if delay branch uses "ldi int,%r20" */
+	if ((opcode & 0xffff0000) == 0x34140000)
+		return;	/* everything ok, just return */
+
+	/* Check if delay branch uses "nop" */
+	if (opcode == INSN_NOP)
+		return;
+
+	/* Check if delay branch uses "copy %rX,%r20" */
+	if ((opcode & 0xffe0ffff) == 0x08000254) {
+		source_reg = (opcode >> 16) & 31;
+		regs->gr[source_reg] = regs->gr[20];
+		return;
+	}
+
+	pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
+		current->comm, task_pid_nr(current), opcode);
+}
+
+static inline void
+syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+{
+	if (regs->orig_r28)
+		return;
+	regs->orig_r28 = 1; /* no more restarts */
+	/* Check the return code */
+	switch (regs->gr[28]) {
+	case -ERESTART_RESTARTBLOCK:
+	case -ERESTARTNOHAND:
+		DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
+		regs->gr[28] = -EINTR;
+		break;
+
+	case -ERESTARTSYS:
+		if (!(ka->sa.sa_flags & SA_RESTART)) {
+			DBG(1,"ERESTARTSYS: putting -EINTR\n");
+			regs->gr[28] = -EINTR;
+			break;
+		}
+		/* fallthrough */
+	case -ERESTARTNOINTR:
+		check_syscallno_in_delay_branch(regs);
+		break;
+	}
+}
+
+static inline void
+insert_restart_trampoline(struct pt_regs *regs)
+{
+	if (regs->orig_r28)
+		return;
+	regs->orig_r28 = 1; /* no more restarts */
+	switch(regs->gr[28]) {
+	case -ERESTART_RESTARTBLOCK: {
+		/* Restart the system call - no handlers present */
+		unsigned int *usp = (unsigned int *)regs->gr[30];
+		unsigned long start = (unsigned long) &usp[2];
+		unsigned long end  = (unsigned long) &usp[5];
+		long err = 0;
+
+		/* Setup a trampoline to restart the syscall
+		 * with __NR_restart_syscall
+		 *
+		 *  0: <return address (orig r31)>
+		 *  4: <2nd half for 64-bit>
+		 *  8: ldw 0(%sp), %r31
+		 * 12: be 0x100(%sr2, %r0)
+		 * 16: ldi __NR_restart_syscall, %r20
+		 */
+#ifdef CONFIG_64BIT
+		err |= put_user(regs->gr[31] >> 32, &usp[0]);
+		err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]);
+		err |= put_user(0x0fc010df, &usp[2]);
+#else
+		err |= put_user(regs->gr[31], &usp[0]);
+		err |= put_user(0x0fc0109f, &usp[2]);
+#endif
+		err |= put_user(0xe0008200, &usp[3]);
+		err |= put_user(0x34140000, &usp[4]);
+
+		WARN_ON(err);
+
+		/* flush data/instruction cache for new insns */
+		flush_user_dcache_range_asm(start, end);
+		flush_user_icache_range_asm(start, end);
+
+		regs->gr[31] = regs->gr[30] + 8;
+		return;
+	}
+	case -ERESTARTNOHAND:
+	case -ERESTARTSYS:
+	case -ERESTARTNOINTR:
+		check_syscallno_in_delay_branch(regs);
+		return;
+	default:
+		break;
+	}
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * We need to be able to restore the syscall arguments (r21-r26) to
+ * restart syscalls.  Thus, the syscall path should save them in the
+ * pt_regs structure (it's okay to do so since they are caller-save
+ * registers).  As noted below, the syscall number gets restored for
+ * us due to the magic of delayed branching.
+ */
+asmlinkage void
+do_signal(struct pt_regs *regs, long in_syscall)
+{
+	struct ksignal ksig;
+
+	DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+	       regs, regs->sr[7], in_syscall);
+
+	if (get_signal(&ksig)) {
+		DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
+		/* Restart a system call if necessary. */
+		if (in_syscall)
+			syscall_restart(regs, &ksig.ka);
+
+		handle_signal(&ksig, regs, in_syscall);
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if (in_syscall)
+		insert_restart_trampoline(regs);
+	
+	DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", 
+		regs->gr[28]);
+
+	restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs, long in_syscall)
+{
+	if (test_thread_flag(TIF_SIGPENDING))
+		do_signal(regs, in_syscall);
+
+	if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.c b/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.c
new file mode 100644
index 0000000..9e0cb6a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.c
@@ -0,0 +1,381 @@
+/*    Signal support for 32-bit kernel builds
+ *
+ *    Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org>
+ *
+ *    Code was mostly borrowed from kernel/signal.c.
+ *    See kernel/signal.c for additional Copyrights.
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <linux/uaccess.h>
+
+#include "signal32.h"
+
+#define DEBUG_COMPAT_SIG 0 
+#define DEBUG_COMPAT_SIG_LEVEL 2
+
+#if DEBUG_COMPAT_SIG
+#define DBG(LEVEL, ...) \
+	((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \
+	? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+
+inline void
+sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
+{
+	s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
+}
+
+inline void
+sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
+{
+	s32->sig[0] = s64->sig[0] & 0xffffffffUL;
+	s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
+}
+
+long
+restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
+		struct pt_regs *regs)
+{
+	long err = 0;
+	compat_uint_t compat_reg;
+	compat_uint_t compat_regt;
+	int regn;
+	
+	/* When loading 32-bit values into 64-bit registers make
+	   sure to clear the upper 32-bits */
+	DBG(2,"restore_sigcontext32: PER_LINUX32 process\n");
+	DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs);
+	DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc));
+	for(regn=0; regn < 32; regn++){
+		err |= __get_user(compat_reg,&sc->sc_gr[regn]);
+		regs->gr[regn] = compat_reg;
+		/* Load upper half */
+		err |= __get_user(compat_regt,&rf->rf_gr[regn]);
+		regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg;
+		DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n", 
+				regn, regs->gr[regn], compat_regt, compat_reg);
+	}
+	DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr));
+	/* XXX: BE WARNED FR's are 64-BIT! */
+	err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+		
+	/* Better safe than sorry, pass __get_user two things of
+	   the same size and let gcc do the upward conversion to 
+	   64-bits */		
+	err |= __get_user(compat_reg, &sc->sc_iaoq[0]);
+	/* Load upper half */
+	err |= __get_user(compat_regt, &rf->rf_iaoq[0]);
+	regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+	DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt);
+	DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n", 
+			&sc->sc_iaoq[0], compat_reg);
+
+	err |= __get_user(compat_reg, &sc->sc_iaoq[1]);
+	/* Load upper half */
+	err |= __get_user(compat_regt, &rf->rf_iaoq[1]);
+	regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+	DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt);
+	DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n", 
+			&sc->sc_iaoq[1],compat_reg);	
+	DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n", 
+			regs->iaoq[0],regs->iaoq[1]);		
+		
+	err |= __get_user(compat_reg, &sc->sc_iasq[0]);
+	/* Load the upper half for iasq */
+	err |= __get_user(compat_regt, &rf->rf_iasq[0]);
+	regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+	DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt);
+	
+	err |= __get_user(compat_reg, &sc->sc_iasq[1]);
+	/* Load the upper half for iasq */
+	err |= __get_user(compat_regt, &rf->rf_iasq[1]);
+	regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+	DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt);
+	DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n", 
+		regs->iasq[0],regs->iasq[1]);		
+
+	err |= __get_user(compat_reg, &sc->sc_sar);
+	/* Load the upper half for sar */
+	err |= __get_user(compat_regt, &rf->rf_sar);
+	regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg;	
+	DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt);	
+	DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar);		
+	DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]);
+	
+	return err;
+}
+
+/*
+ * Set up the sigcontext structure for this process.
+ * This is not an easy task if the kernel is 64-bit, it will require
+ * that we examine the process personality to determine if we need to
+ * truncate for a 32-bit userspace.
+ */
+long
+setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, 
+		struct pt_regs *regs, int in_syscall)		 
+{
+	compat_int_t flags = 0;
+	long err = 0;
+	compat_uint_t compat_reg;
+	compat_uint_t compat_regb;
+	int regn;
+	
+	if (on_sig_stack((unsigned long) sc))
+		flags |= PARISC_SC_FLAG_ONSTACK;
+	
+	if (in_syscall) {
+		
+		DBG(1,"setup_sigcontext32: in_syscall\n");
+		
+		flags |= PARISC_SC_FLAG_IN_SYSCALL;
+		/* Truncate gr31 */
+		compat_reg = (compat_uint_t)(regs->gr[31]);
+		/* regs->iaoq is undefined in the syscall return path */
+		err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+		DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+				&sc->sc_iaoq[0], compat_reg);
+		
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->gr[31] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
+		DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+		
+		
+		compat_reg = (compat_uint_t)(regs->gr[31]+4);
+		err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+		DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+				&sc->sc_iaoq[1], compat_reg);
+		/* Store upper half */
+		compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+		DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+		
+		/* Truncate sr3 */
+		compat_reg = (compat_uint_t)(regs->sr[3]);
+		err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+		err |= __put_user(compat_reg, &sc->sc_iasq[1]);		
+		
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->sr[3] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+		err |= __put_user(compat_reg, &rf->rf_iasq[1]);		
+		
+		DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+		DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);		
+		DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n",				
+			regs->gr[31], regs->gr[31]+4);
+		
+	} else {
+		
+		compat_reg = (compat_uint_t)(regs->iaoq[0]);
+		err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+		DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+				&sc->sc_iaoq[0], compat_reg);
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iaoq[0]);	
+		DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+		
+		compat_reg = (compat_uint_t)(regs->iaoq[1]);
+		err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+		DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+				&sc->sc_iaoq[1], compat_reg);
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+		DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+		
+		
+		compat_reg = (compat_uint_t)(regs->iasq[0]);
+		err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+		DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n",
+				&sc->sc_iasq[0], compat_reg);
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->iasq[0] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+		DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+		
+		
+		compat_reg = (compat_uint_t)(regs->iasq[1]);
+		err |= __put_user(compat_reg, &sc->sc_iasq[1]);
+		DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n",
+				&sc->sc_iasq[1], compat_reg);
+		/* Store upper half */
+		compat_reg = (compat_uint_t)(regs->iasq[1] >> 32);
+		err |= __put_user(compat_reg, &rf->rf_iasq[1]);
+		DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
+
+		/* Print out the IAOQ for debugging */		
+		DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n", 
+			regs->iaoq[0], regs->iaoq[1]);
+	}
+
+	err |= __put_user(flags, &sc->sc_flags);
+	
+	DBG(1,"setup_sigcontext32: Truncating general registers.\n");
+	
+	for(regn=0; regn < 32; regn++){
+		/* Truncate a general register */
+		compat_reg = (compat_uint_t)(regs->gr[regn]);
+		err |= __put_user(compat_reg, &sc->sc_gr[regn]);
+		/* Store upper half */
+		compat_regb = (compat_uint_t)(regs->gr[regn] >> 32);
+		err |= __put_user(compat_regb, &rf->rf_gr[regn]);
+
+		/* DEBUG: Write out the "upper / lower" register data */
+		DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn, 
+				compat_regb, compat_reg);
+	}
+	
+	/* Copy the floating point registers (same size)
+	   XXX: BE WARNED FR's are 64-BIT! */	
+	DBG(1,"setup_sigcontext32: Copying from regs to sc, "
+	      "sc->sc_fr size = %#lx, regs->fr size = %#lx\n",
+		sizeof(regs->fr), sizeof(sc->sc_fr));
+	err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+
+	compat_reg = (compat_uint_t)(regs->sar);
+	err |= __put_user(compat_reg, &sc->sc_sar);
+	DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg);
+	/* Store upper half */
+	compat_reg = (compat_uint_t)(regs->sar >> 32);
+	err |= __put_user(compat_reg, &rf->rf_sar);	
+	DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg);
+	DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]);
+
+	return err;
+}
+
+int
+copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
+{
+	compat_uptr_t addr;
+	int err;
+
+	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	err = __get_user(to->si_signo, &from->si_signo);
+	err |= __get_user(to->si_errno, &from->si_errno);
+	err |= __get_user(to->si_code, &from->si_code);
+
+	if (to->si_code < 0)
+		err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (siginfo_layout(to->si_signo, to->si_code)) {
+		      case SIL_CHLD:
+			err |= __get_user(to->si_utime, &from->si_utime);
+			err |= __get_user(to->si_stime, &from->si_stime);
+			err |= __get_user(to->si_status, &from->si_status);
+		      default:
+		      case SIL_KILL:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			break;
+		      case SIL_FAULT:
+			err |= __get_user(addr, &from->si_addr);
+			to->si_addr = compat_ptr(addr);
+			break;
+		      case SIL_POLL:
+			err |= __get_user(to->si_band, &from->si_band);
+			err |= __get_user(to->si_fd, &from->si_fd);
+			break;
+		      case SIL_RT:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			err |= __get_user(to->si_int, &from->si_int);
+			break;
+		}
+	}
+	return err;
+}
+
+int
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
+{
+	compat_uptr_t addr;
+	compat_int_t val;
+	int err;
+
+	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	/* If you change siginfo_t structure, please be sure
+	   this code is fixed accordingly.
+	   It should never copy any pad contained in the structure
+	   to avoid security leaks, but must copy the generic
+	   3 ints plus the relevant union member.
+	   This routine must convert siginfo from 64bit to 32bit as well
+	   at the same time.  */
+	err = __put_user(from->si_signo, &to->si_signo);
+	err |= __put_user(from->si_errno, &to->si_errno);
+	err |= __put_user(from->si_code, &to->si_code);
+	if (from->si_code < 0)
+		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (siginfo_layout(from->si_signo, from->si_code)) {
+		case SIL_CHLD:
+			err |= __put_user(from->si_utime, &to->si_utime);
+			err |= __put_user(from->si_stime, &to->si_stime);
+			err |= __put_user(from->si_status, &to->si_status);
+		case SIL_KILL:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			break;
+		case SIL_FAULT:
+			addr = ptr_to_compat(from->si_addr);
+			err |= __put_user(addr, &to->si_addr);
+			break;
+		case SIL_POLL:
+			err |= __put_user(from->si_band, &to->si_band);
+			err |= __put_user(from->si_fd, &to->si_fd);
+			break;
+		case SIL_TIMER:
+			err |= __put_user(from->si_tid, &to->si_tid);
+			err |= __put_user(from->si_overrun, &to->si_overrun);
+			val = (compat_int_t)from->si_int;
+			err |= __put_user(val, &to->si_int);
+			break;
+		case SIL_RT:
+			err |= __put_user(from->si_uid, &to->si_uid);
+			err |= __put_user(from->si_pid, &to->si_pid);
+			val = (compat_int_t)from->si_int;
+			err |= __put_user(val, &to->si_int);
+			break;
+		case SIL_SYS:
+			err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
+			err |= __put_user(from->si_syscall, &to->si_syscall);
+			err |= __put_user(from->si_arch, &to->si_arch);
+			break;
+		}
+	}
+	return err;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.h b/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.h
new file mode 100644
index 0000000..af51d4c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/signal32.h
@@ -0,0 +1,91 @@
+/* 
+ *    Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef _PARISC64_KERNEL_SIGNAL32_H
+#define _PARISC64_KERNEL_SIGNAL32_H
+
+#include <linux/compat.h>
+
+/* 32-bit ucontext as seen from an 64-bit kernel */
+struct compat_ucontext {
+        compat_uint_t uc_flags;
+        compat_uptr_t uc_link;
+        compat_stack_t uc_stack;        /* struct compat_sigaltstack (12 bytes)*/
+        /* FIXME: Pad out to get uc_mcontext to start at an 8-byte aligned boundary */
+        compat_uint_t pad[1];
+        struct compat_sigcontext uc_mcontext;
+        compat_sigset_t uc_sigmask;     /* mask last for extensibility */
+};
+
+/* ELF32 signal handling */
+
+int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
+int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
+
+/* In a deft move of uber-hackery, we decide to carry the top half of all
+ * 64-bit registers in a non-portable, non-ABI, hidden structure.
+ * Userspace can read the hidden structure if it *wants* but is never
+ * guaranteed to be in the same place. In fact the uc_sigmask from the
+ * ucontext_t structure may push the hidden register file downards
+ */
+struct compat_regfile {
+        /* Upper half of all the 64-bit registers that were truncated
+           on a copy to a 32-bit userspace */
+        compat_int_t rf_gr[32];
+        compat_int_t rf_iasq[2];
+        compat_int_t rf_iaoq[2];
+        compat_int_t rf_sar;
+};
+
+#define COMPAT_SIGRETURN_TRAMP 4
+#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
+#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
+				COMPAT_SIGRESTARTBLOCK_TRAMP)
+
+struct compat_rt_sigframe {
+        /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c
+                Secondary to that it must protect the ERESTART_RESTARTBLOCK
+                trampoline we left on the stack (we were bad and didn't
+                change sp so we could run really fast.) */
+        compat_uint_t tramp[COMPAT_TRAMP_SIZE];
+        compat_siginfo_t info;
+        struct compat_ucontext uc;
+        /* Hidden location of truncated registers, *must* be last. */
+        struct compat_regfile regs;
+};
+
+/*
+ * The 32-bit ABI wants at least 48 bytes for a function call frame:
+ * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
+ * which Linux/parisc uses is sp-20 for the saved return pointer...)
+ * Then, the stack pointer must be rounded to a cache line (64 bytes).
+ */
+#define SIGFRAME32              64
+#define FUNCTIONCALLFRAME32     48
+#define PARISC_RT_SIGFRAME_SIZE32 (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
+
+void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
+void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
+long restore_sigcontext32(struct compat_sigcontext __user *sc, 
+		struct compat_regfile __user *rf,
+		struct pt_regs *regs);
+long setup_sigcontext32(struct compat_sigcontext __user *sc, 
+		struct compat_regfile __user *rf,
+		struct pt_regs *regs, int in_syscall);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/smp.c b/src/kernel/linux/v4.14/arch/parisc/kernel/smp.c
new file mode 100644
index 0000000..ab4d558
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/smp.c
@@ -0,0 +1,425 @@
+/*
+** SMP Support
+**
+** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
+** 
+** Lots of stuff stolen from arch/alpha/kernel/smp.c
+** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
+**
+** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
+** -grant (1/12/2001)
+**
+**	This program is free software; you can redistribute it and/or modify
+**	it under the terms of the GNU General Public License as published by
+**      the Free Software Foundation; either version 2 of the License, or
+**      (at your option) any later version.
+*/
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/mm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/ftrace.h>
+#include <linux/cpu.h>
+
+#include <linux/atomic.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/tlbflush.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+static int smp_debug_lvl = 0;
+#define smp_debug(lvl, printargs...)		\
+		if (lvl >= smp_debug_lvl)	\
+			printk(printargs);
+#else
+#define smp_debug(lvl, ...)	do { } while(0)
+#endif /* DEBUG_SMP */
+
+volatile struct task_struct *smp_init_current_idle_task;
+
+/* track which CPU is booting */
+static volatile int cpu_now_booting;
+
+static int parisc_max_cpus = 1;
+
+static DEFINE_PER_CPU(spinlock_t, ipi_lock);
+
+enum ipi_message_type {
+	IPI_NOP=0,
+	IPI_RESCHEDULE=1,
+	IPI_CALL_FUNC,
+	IPI_CPU_START,
+	IPI_CPU_STOP,
+	IPI_CPU_TEST
+};
+
+
+/********** SMP inter processor interrupt and communication routines */
+
+#undef PER_CPU_IRQ_REGION
+#ifdef PER_CPU_IRQ_REGION
+/* XXX REVISIT Ignore for now.
+**    *May* need this "hook" to register IPI handler
+**    once we have perCPU ExtIntr switch tables.
+*/
+static void
+ipi_init(int cpuid)
+{
+#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
+
+	if(cpu_online(cpuid) )
+	{
+		switch_to_idle_task(current);
+	}
+
+	return;
+}
+#endif
+
+
+/*
+** Yoink this CPU from the runnable list... 
+**
+*/
+static void
+halt_processor(void) 
+{
+	/* REVISIT : redirect I/O Interrupts to another CPU? */
+	/* REVISIT : does PM *know* this CPU isn't available? */
+	set_cpu_online(smp_processor_id(), false);
+	local_irq_disable();
+	for (;;)
+		;
+}
+
+
+irqreturn_t __irq_entry
+ipi_interrupt(int irq, void *dev_id) 
+{
+	int this_cpu = smp_processor_id();
+	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
+	unsigned long ops;
+	unsigned long flags;
+
+	for (;;) {
+		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
+		spin_lock_irqsave(lock, flags);
+		ops = p->pending_ipi;
+		p->pending_ipi = 0;
+		spin_unlock_irqrestore(lock, flags);
+
+		mb(); /* Order bit clearing and data access. */
+
+		if (!ops)
+		    break;
+
+		while (ops) {
+			unsigned long which = ffz(~ops);
+
+			ops &= ~(1 << which);
+
+			switch (which) {
+			case IPI_NOP:
+				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
+				break;
+				
+			case IPI_RESCHEDULE:
+				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
+				inc_irq_stat(irq_resched_count);
+				scheduler_ipi();
+				break;
+
+			case IPI_CALL_FUNC:
+				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
+				generic_smp_call_function_interrupt();
+				break;
+
+			case IPI_CPU_START:
+				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
+				break;
+
+			case IPI_CPU_STOP:
+				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
+				halt_processor();
+				break;
+
+			case IPI_CPU_TEST:
+				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
+				break;
+
+			default:
+				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
+					this_cpu, which);
+				return IRQ_NONE;
+			} /* Switch */
+		/* let in any pending interrupts */
+		local_irq_enable();
+		local_irq_disable();
+		} /* while (ops) */
+	}
+	return IRQ_HANDLED;
+}
+
+
+static inline void
+ipi_send(int cpu, enum ipi_message_type op)
+{
+	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
+	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	p->pending_ipi |= 1 << op;
+	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
+	spin_unlock_irqrestore(lock, flags);
+}
+
+static void
+send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+	int cpu;
+
+	for_each_cpu(cpu, mask)
+		ipi_send(cpu, op);
+}
+
+static inline void
+send_IPI_single(int dest_cpu, enum ipi_message_type op)
+{
+	BUG_ON(dest_cpu == NO_PROC_ID);
+
+	ipi_send(dest_cpu, op);
+}
+
+static inline void
+send_IPI_allbutself(enum ipi_message_type op)
+{
+	int i;
+	
+	for_each_online_cpu(i) {
+		if (i != smp_processor_id())
+			send_IPI_single(i, op);
+	}
+}
+
+
+inline void 
+smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }
+
+void 
+smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
+
+void
+smp_send_all_nop(void)
+{
+	send_IPI_allbutself(IPI_NOP);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	send_IPI_mask(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_IPI_single(cpu, IPI_CALL_FUNC);
+}
+
+/*
+ * Called by secondaries to update state and initialize CPU registers.
+ */
+static void __init
+smp_cpu_init(int cpunum)
+{
+	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
+	extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
+
+	/* Set modes and Enable floating point coprocessor */
+	init_per_cpu(cpunum);
+
+	disable_sr_hashing();
+
+	mb();
+
+	/* Well, support 2.4 linux scheme as well. */
+	if (cpu_online(cpunum))	{
+		extern void machine_halt(void); /* arch/parisc.../process.c */
+
+		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
+		machine_halt();
+	}
+
+	notify_cpu_starting(cpunum);
+
+	set_cpu_online(cpunum, true);
+
+	/* Initialise the idle task for this CPU */
+	mmgrab(&init_mm);
+	current->active_mm = &init_mm;
+	BUG_ON(current->mm);
+	enter_lazy_tlb(&init_mm, current);
+
+	init_IRQ();   /* make sure no IRQs are enabled or pending */
+	start_cpu_itimer();
+}
+
+
+/*
+ * Slaves start using C here. Indirectly called from smp_slave_stext.
+ * Do what start_kernel() and main() do for boot strap processor (aka monarch)
+ */
+void __init smp_callin(void)
+{
+	int slave_id = cpu_now_booting;
+
+	smp_cpu_init(slave_id);
+	preempt_disable();
+
+	flush_cache_all_local(); /* start with known state */
+	flush_tlb_all_local(NULL);
+
+	local_irq_enable();  /* Interrupts have been off until now */
+
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+
+	/* NOTREACHED */
+	panic("smp_callin() AAAAaaaaahhhh....\n");
+}
+
+/*
+ * Bring one cpu online.
+ */
+int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
+{
+	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
+	long timeout;
+
+	task_thread_info(idle)->cpu = cpuid;
+
+	/* Let _start know what logical CPU we're booting
+	** (offset into init_tasks[],cpu_data[])
+	*/
+	cpu_now_booting = cpuid;
+
+	/* 
+	** boot strap code needs to know the task address since
+	** it also contains the process stack.
+	*/
+	smp_init_current_idle_task = idle ;
+	mb();
+
+	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
+
+	/*
+	** This gets PDC to release the CPU from a very tight loop.
+	**
+	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
+	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
+	** is executed after receiving the rendezvous signal (an interrupt to 
+	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
+	** contents of memory are valid."
+	*/
+	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
+	mb();
+
+	/* 
+	 * OK, wait a bit for that CPU to finish staggering about. 
+	 * Slave will set a bit when it reaches smp_cpu_init().
+	 * Once the "monarch CPU" sees the bit change, it can move on.
+	 */
+	for (timeout = 0; timeout < 10000; timeout++) {
+		if(cpu_online(cpuid)) {
+			/* Which implies Slave has started up */
+			cpu_now_booting = 0;
+			smp_init_current_idle_task = NULL;
+			goto alive ;
+		}
+		udelay(100);
+		barrier();
+	}
+	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
+	return -1;
+
+alive:
+	/* Remember the Slave data */
+	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
+		cpuid, timeout * 100);
+	return 0;
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
+
+	/* Setup BSP mappings */
+	printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
+
+	set_cpu_online(bootstrap_processor, true);
+	set_cpu_present(bootstrap_processor, true);
+}
+
+
+
+/*
+** inventory.c:do_inventory() hasn't yet been run and thus we
+** don't 'discover' the additional CPUs until later.
+*/
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		spin_lock_init(&per_cpu(ipi_lock, cpu));
+
+	init_cpu_present(cpumask_of(0));
+
+	parisc_max_cpus = max_cpus;
+	if (!max_cpus)
+		printk(KERN_INFO "SMP mode deactivated.\n");
+}
+
+
+void smp_cpus_done(unsigned int cpu_max)
+{
+	return;
+}
+
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+	if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
+		return -ENOSYS;
+
+	return cpu_online(cpu) ? 0 : -ENOSYS;
+}
+
+#ifdef CONFIG_PROC_FS
+int setup_profiling_timer(unsigned int multiplier)
+{
+	return -EINVAL;
+}
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/stacktrace.c b/src/kernel/linux/v4.14/arch/parisc/kernel/stacktrace.c
new file mode 100644
index 0000000..2fe914c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/stacktrace.c
@@ -0,0 +1,63 @@
+/*
+ * Stack trace management functions
+ *
+ *  Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ *  based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
+ *  and parisc unwind functions by Randolph Chung <tausq@debian.org>
+ *
+ *  TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
+ */
+#include <linux/module.h>
+#include <linux/stacktrace.h>
+
+#include <asm/unwind.h>
+
+static void dump_trace(struct task_struct *task, struct stack_trace *trace)
+{
+	struct unwind_frame_info info;
+
+	/* initialize unwind info */
+	if (task == current) {
+		unsigned long sp;
+		struct pt_regs r;
+HERE:
+		asm volatile ("copy %%r30, %0" : "=r"(sp));
+		memset(&r, 0, sizeof(struct pt_regs));
+		r.iaoq[0] = (unsigned long)&&HERE;
+		r.gr[2] = (unsigned long)__builtin_return_address(0);
+		r.gr[30] = sp;
+		unwind_frame_init(&info, task, &r);
+	} else {
+		unwind_frame_init_from_blocked_task(&info, task);
+	}
+
+	/* unwind stack and save entries in stack_trace struct */
+	trace->nr_entries = 0;
+	while (trace->nr_entries < trace->max_entries) {
+		if (unwind_once(&info) < 0 || info.ip == 0)
+			break;
+
+		if (__kernel_text_address(info.ip))
+			trace->entries[trace->nr_entries++] = info.ip;
+	}
+}
+
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+	dump_trace(current, trace);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	dump_trace(tsk, trace);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc.c b/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc.c
new file mode 100644
index 0000000..378a754
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc.c
@@ -0,0 +1,387 @@
+
+/*
+ *    PARISC specific syscalls
+ *
+ *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *    Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/uaccess.h>
+#include <asm/elf.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+#include <linux/shm.h>
+#include <linux/syscalls.h>
+#include <linux/utsname.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+
+/* we construct an artificial offset for the mapping based on the physical
+ * address of the kernel mapping variable */
+#define GET_LAST_MMAP(filp)		\
+	(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
+#define SET_LAST_MMAP(filp, val)	\
+	 { /* nothing */ }
+
+static int get_offset(unsigned int last_mmap)
+{
+	return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
+}
+
+static unsigned long shared_align_offset(unsigned int last_mmap,
+					 unsigned long pgoff)
+{
+	return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
+}
+
+static inline unsigned long COLOR_ALIGN(unsigned long addr,
+			 unsigned int last_mmap, unsigned long pgoff)
+{
+	unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
+	unsigned long off  = (SHM_COLOUR-1) &
+		(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
+
+	return base + off;
+}
+
+/*
+ * Top of mmap area (just below the process stack).
+ */
+
+static unsigned long mmap_upper_limit(void)
+{
+	unsigned long stack_base;
+
+	/* Limit stack size - see setup_arg_pages() in fs/exec.c */
+	stack_base = rlimit_max(RLIMIT_STACK);
+	if (stack_base > STACK_SIZE_MAX)
+		stack_base = STACK_SIZE_MAX;
+
+	/* Add space for stack randomization. */
+	stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
+	return PAGE_ALIGN(STACK_TOP - stack_base);
+}
+
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma, *prev;
+	unsigned long task_size = TASK_SIZE;
+	int do_color_align, last_mmap;
+	struct vm_unmapped_area_info info;
+
+	if (len > task_size)
+		return -ENOMEM;
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = 1;
+	last_mmap = GET_LAST_MMAP(filp);
+
+	if (flags & MAP_FIXED) {
+		if ((flags & MAP_SHARED) && last_mmap &&
+		    (addr - shared_align_offset(last_mmap, pgoff))
+				& (SHM_COLOUR - 1))
+			return -EINVAL;
+		goto found_addr;
+	}
+
+	if (addr) {
+		if (do_color_align && last_mmap)
+			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma_prev(mm, addr, &prev);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
+			goto found_addr;
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = mm->mmap_legacy_base;
+	info.high_limit = mmap_upper_limit();
+	info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+	info.align_offset = shared_align_offset(last_mmap, pgoff);
+	addr = vm_unmapped_area(&info);
+
+found_addr:
+	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+
+	return addr;
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma, *prev;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+	int do_color_align, last_mmap;
+	struct vm_unmapped_area_info info;
+
+#ifdef CONFIG_64BIT
+	/* This should only ever run for 32-bit processes.  */
+	BUG_ON(!test_thread_flag(TIF_32BIT));
+#endif
+
+	/* requested length too big for entire address space */
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	do_color_align = 0;
+	if (filp || (flags & MAP_SHARED))
+		do_color_align = 1;
+	last_mmap = GET_LAST_MMAP(filp);
+
+	if (flags & MAP_FIXED) {
+		if ((flags & MAP_SHARED) && last_mmap &&
+		    (addr - shared_align_offset(last_mmap, pgoff))
+			& (SHM_COLOUR - 1))
+			return -EINVAL;
+		goto found_addr;
+	}
+
+	/* requesting a specific address */
+	if (addr) {
+		if (do_color_align && last_mmap)
+			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma_prev(mm, addr, &prev);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
+			goto found_addr;
+	}
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = PAGE_SIZE;
+	info.high_limit = mm->mmap_base;
+	info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+	info.align_offset = shared_align_offset(last_mmap, pgoff);
+	addr = vm_unmapped_area(&info);
+	if (!(addr & ~PAGE_MASK))
+		goto found_addr;
+	VM_BUG_ON(addr != -ENOMEM);
+
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+
+found_addr:
+	if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+		SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+
+	return addr;
+}
+
+static int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+
+	/* parisc stack always grows up - so a unlimited stack should
+	 * not be an indicator to use the legacy memory layout.
+	 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+	 *	return 1;
+	 */
+
+	return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+	unsigned long rnd = 0;
+
+	if (current->flags & PF_RANDOMIZE)
+		rnd = get_random_int() & MMAP_RND_MASK;
+
+	return rnd << PAGE_SHIFT;
+}
+
+unsigned long arch_mmap_rnd(void)
+{
+	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+}
+
+static unsigned long mmap_legacy_base(void)
+{
+	return TASK_UNMAPPED_BASE + mmap_rnd();
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+	mm->mmap_legacy_base = mmap_legacy_base();
+	mm->mmap_base = mmap_upper_limit();
+
+	if (mmap_is_legacy()) {
+		mm->mmap_base = mm->mmap_legacy_base;
+		mm->get_unmapped_area = arch_get_unmapped_area;
+	} else {
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+	}
+}
+
+
+asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags, unsigned long fd,
+	unsigned long pgoff)
+{
+	/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
+	   we have. */
+	return sys_mmap_pgoff(addr, len, prot, flags, fd,
+			      pgoff >> (PAGE_SHIFT - 12));
+}
+
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+		unsigned long prot, unsigned long flags, unsigned long fd,
+		unsigned long offset)
+{
+	if (!(offset & ~PAGE_MASK)) {
+		return sys_mmap_pgoff(addr, len, prot, flags, fd,
+					offset >> PAGE_SHIFT);
+	} else {
+		return -EINVAL;
+	}
+}
+
+/* Fucking broken ABI */
+
+#ifdef CONFIG_64BIT
+asmlinkage long parisc_truncate64(const char __user * path,
+					unsigned int high, unsigned int low)
+{
+	return sys_truncate(path, (long)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+					unsigned int high, unsigned int low)
+{
+	return sys_ftruncate(fd, (long)high << 32 | low);
+}
+
+/* stubs for the benefit of the syscall_table since truncate64 and truncate 
+ * are identical on LP64 */
+asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
+{
+	return sys_truncate(path, length);
+}
+asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
+{
+	return sys_ftruncate(fd, length);
+}
+asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+	return sys_fcntl(fd, cmd, arg);
+}
+#else
+
+asmlinkage long parisc_truncate64(const char __user * path,
+					unsigned int high, unsigned int low)
+{
+	return sys_truncate64(path, (loff_t)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+					unsigned int high, unsigned int low)
+{
+	return sys_ftruncate64(fd, (loff_t)high << 32 | low);
+}
+#endif
+
+asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
+					unsigned int high, unsigned int low)
+{
+	return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
+			size_t count, unsigned int high, unsigned int low)
+{
+	return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
+		                    size_t count)
+{
+	return sys_readahead(fd, (loff_t)high << 32 | low, count);
+}
+
+asmlinkage long parisc_fadvise64_64(int fd,
+			unsigned int high_off, unsigned int low_off,
+			unsigned int high_len, unsigned int low_len, int advice)
+{
+	return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
+			(loff_t)high_len << 32 | low_len, advice);
+}
+
+asmlinkage long parisc_sync_file_range(int fd,
+			u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
+			unsigned int flags)
+{
+	return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
+			(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
+}
+
+asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+				u32 lenhi, u32 lenlo)
+{
+        return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
+                             ((u64)lenhi << 32) | lenlo);
+}
+
+long parisc_personality(unsigned long personality)
+{
+	long err;
+
+	if (personality(current->personality) == PER_LINUX32
+	    && personality(personality) == PER_LINUX)
+		personality = (personality & ~PER_MASK) | PER_LINUX32;
+
+	err = sys_personality(personality);
+	if (personality(err) == PER_LINUX32)
+		err = (err & ~PER_MASK) | PER_LINUX;
+
+	return err;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc32.c b/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc32.c
new file mode 100644
index 0000000..2a12a54
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/sys_parisc32.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 2000-2001 Hewlett Packard Company
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2001 Matthew Wilcox
+ * Copyright (C) 2014 Helge Deller <deller@gmx.de>
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
+ */
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+
+
+asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+	int r22, int r21, int r20)
+{
+    printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n", 
+    	current->comm, current->pid, r20);
+    return -ENOSYS;
+}
+
+asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+	compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+	const char  __user * pathname)
+{
+	return sys_fanotify_mark(fanotify_fd, flags,
+			((__u64)mask1 << 32) | mask0,
+			 dfd, pathname);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/syscall.S b/src/kernel/linux/v4.14/arch/parisc/kernel/syscall.S
new file mode 100644
index 0000000..0cf379a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/syscall.S
@@ -0,0 +1,957 @@
+/* 
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * 
+ * System call entry code / Linux gateway page
+ * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * Licensed under the GNU GPL.
+ * thanks to Philipp Rumpf, Mike Shaver and various others
+ * sorry about the wall, puffin..
+ */
+
+/*
+How does the Linux gateway page on PA-RISC work?
+------------------------------------------------
+The Linux gateway page on PA-RISC is "special".
+It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
+terminology it's Execute, promote to PL0) in the page map.  So anything
+executing on this page executes with kernel level privilege (there's more to it
+than that: to have this happen, you also have to use a branch with a ,gate
+completer to activate the privilege promotion).  The upshot is that everything
+that runs on the gateway page runs at kernel privilege but with the current
+user process address space (although you have access to kernel space via %sr2).
+For the 0x100 syscall entry, we redo the space registers to point to the kernel
+address space (preserving the user address space in %sr3), move to wide mode if
+required, save the user registers and branch into the kernel syscall entry
+point.  For all the other functions, we execute at kernel privilege but don't
+flip address spaces. The basic upshot of this is that these code snippets are
+executed atomically (because the kernel can't be pre-empted) and they may
+perform architecturally forbidden (to PL3) operations (like setting control
+registers).
+*/
+
+
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+#include <asm/psw.h>
+#include <asm/thread_info.h>
+#include <asm/assembly.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+#include <linux/linkage.h>
+
+	/* We fill the empty parts of the gateway page with
+ 	 * something that will kill the kernel or a
+ 	 * userspace application.
+	 */
+#define KILL_INSN	break	0,0
+
+	.level          PA_ASM_LEVEL
+
+	.text
+
+	.import syscall_exit,code
+	.import syscall_exit_rfi,code
+
+	/* Linux gateway page is aliased to virtual page 0 in the kernel
+	 * address space. Since it is a gateway page it cannot be
+	 * dereferenced, so null pointers will still fault. We start
+	 * the actual entry point at 0x100. We put break instructions
+	 * at the beginning of the page to trap null indirect function
+	 * pointers.
+	 */
+
+	.align PAGE_SIZE
+ENTRY(linux_gateway_page)
+
+        /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
+	.rept 44
+	KILL_INSN
+	.endr
+
+	/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
+	/* Light-weight-syscall entry must always be located at 0xb0 */
+	/* WARNING: Keep this number updated with table size changes */
+#define __NR_lws_entries (3)
+
+lws_entry:
+	gate	lws_start, %r0		/* increase privilege */
+	depi	3, 31, 2, %r31		/* Ensure we return into user mode. */
+
+	/* Fill from 0xb8 to 0xe0 */
+	.rept 10
+	KILL_INSN
+	.endr
+
+	/* This function MUST be located at 0xe0 for glibc's threading 
+	mechanism to work. DO NOT MOVE THIS CODE EVER! */
+set_thread_pointer:
+	gate	.+8, %r0		/* increase privilege */
+	depi	3, 31, 2, %r31		/* Ensure we return into user mode. */
+	be	0(%sr7,%r31)		/* return to user space */
+	mtctl	%r26, %cr27		/* move arg0 to the control register */
+
+	/* Increase the chance of trapping if random jumps occur to this
+	address, fill from 0xf0 to 0x100 */
+	.rept 4
+	KILL_INSN
+	.endr
+
+/* This address must remain fixed at 0x100 for glibc's syscalls to work */
+	.align LINUX_GATEWAY_ADDR
+linux_gateway_entry:
+	gate	.+8, %r0			/* become privileged */
+	mtsp	%r0,%sr4			/* get kernel space into sr4 */
+	mtsp	%r0,%sr5			/* get kernel space into sr5 */
+	mtsp	%r0,%sr6			/* get kernel space into sr6 */
+
+#ifdef CONFIG_64BIT
+	/* for now we can *always* set the W bit on entry to the syscall
+	 * since we don't support wide userland processes.  We could
+	 * also save the current SM other than in r0 and restore it on
+	 * exit from the syscall, and also use that value to know
+	 * whether to do narrow or wide syscalls. -PB
+	 */
+	ssm	PSW_SM_W, %r1
+	extrd,u	%r1,PSW_W_BIT,1,%r1
+	/* sp must be aligned on 4, so deposit the W bit setting into
+	 * the bottom of sp temporarily */
+	or,ev	%r1,%r30,%r30
+	b,n	1f
+	/* The top halves of argument registers must be cleared on syscall
+	 * entry from narrow executable.
+	 */
+	depdi	0, 31, 32, %r26
+	depdi	0, 31, 32, %r25
+	depdi	0, 31, 32, %r24
+	depdi	0, 31, 32, %r23
+	depdi	0, 31, 32, %r22
+	depdi	0, 31, 32, %r21
+1:	
+#endif
+
+	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
+	 * by external interrupts.
+	 */
+	mfsp    %sr7,%r1                        /* save user sr7 */
+	rsm	PSW_SM_I, %r0			/* disable interrupts */
+	mtsp    %r1,%sr3                        /* and store it in sr3 */
+
+	mfctl   %cr30,%r1
+	xor     %r1,%r30,%r30                   /* ye olde xor trick */
+	xor     %r1,%r30,%r1
+	xor     %r1,%r30,%r30
+	
+	ldo     THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30  /* set up kernel stack */
+
+	/* N.B.: It is critical that we don't set sr7 to 0 until r30
+	 *       contains a valid kernel stack pointer. It is also
+	 *       critical that we don't start using the kernel stack
+	 *       until after sr7 has been set to 0.
+	 */
+
+	mtsp	%r0,%sr7			/* get kernel space into sr7 */
+	ssm	PSW_SM_I, %r0			/* enable interrupts */
+	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
+	mfctl	%cr30,%r1			/* get task ptr in %r1 */
+	LDREG	TI_TASK(%r1),%r1
+
+	/* Save some registers for sigcontext and potential task
+	   switch (see entry.S for the details of which ones are
+	   saved/restored).  TASK_PT_PSW is zeroed so we can see whether
+	   a process is on a syscall or not.  For an interrupt the real
+	   PSW value is stored.  This is needed for gdb and sys_ptrace. */
+	STREG	%r0,  TASK_PT_PSW(%r1)
+	STREG	%r2,  TASK_PT_GR2(%r1)		/* preserve rp */
+	STREG	%r19, TASK_PT_GR19(%r1)
+
+	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
+#ifdef CONFIG_64BIT
+	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
+#if 0
+	xor	%r19,%r2,%r2			/* clear bottom bit */
+	depd,z	%r19,1,1,%r19
+	std	%r19,TASK_PT_PSW(%r1)
+#endif
+#endif
+	STREG	%r2,  TASK_PT_GR30(%r1)		/* ... and save it */
+	
+	STREG	%r20, TASK_PT_GR20(%r1)		/* Syscall number */
+	STREG	%r21, TASK_PT_GR21(%r1)
+	STREG	%r22, TASK_PT_GR22(%r1)
+	STREG	%r23, TASK_PT_GR23(%r1)		/* 4th argument */
+	STREG	%r24, TASK_PT_GR24(%r1)		/* 3rd argument */
+	STREG	%r25, TASK_PT_GR25(%r1)		/* 2nd argument */
+	STREG	%r26, TASK_PT_GR26(%r1)	 	/* 1st argument */
+	STREG	%r27, TASK_PT_GR27(%r1)		/* user dp */
+	STREG   %r28, TASK_PT_GR28(%r1)         /* return value 0 */
+	STREG   %r0, TASK_PT_ORIG_R28(%r1)      /* don't prohibit restarts */
+	STREG	%r29, TASK_PT_GR29(%r1)		/* return value 1 */
+	STREG	%r31, TASK_PT_GR31(%r1)		/* preserve syscall return ptr */
+	
+	ldo	TASK_PT_FR0(%r1), %r27		/* save fpregs from the kernel */
+	save_fp	%r27				/* or potential task switch  */
+
+	mfctl	%cr11, %r27			/* i.e. SAR */
+	STREG	%r27, TASK_PT_SAR(%r1)
+
+	loadgp
+
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+	copy	%r19,%r2			/* W bit back to r2 */
+#else
+	/* no need to save these on stack in wide mode because the first 8
+	 * args are passed in registers */
+	stw     %r22, -52(%r30)                 /* 5th argument */
+	stw     %r21, -56(%r30)                 /* 6th argument */
+#endif
+
+	/* Are we being ptraced? */
+	mfctl	%cr30, %r1
+	LDREG	TI_FLAGS(%r1),%r1
+	ldi	_TIF_SYSCALL_TRACE_MASK, %r19
+	and,COND(=) %r1, %r19, %r0
+	b,n	.Ltracesys
+	
+	/* Note!  We cannot use the syscall table that is mapped
+	nearby since the gateway page is mapped execute-only. */
+
+#ifdef CONFIG_64BIT
+	ldil	L%sys_call_table, %r1
+	or,=	%r2,%r2,%r2
+	addil	L%(sys_call_table64-sys_call_table), %r1
+	ldo	R%sys_call_table(%r1), %r19
+	or,=	%r2,%r2,%r2
+	ldo	R%sys_call_table64(%r1), %r19
+#else
+	ldil	L%sys_call_table, %r1
+	ldo     R%sys_call_table(%r1), %r19
+#endif	
+	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
+	b,n	.Lsyscall_nosys
+	
+	LDREGX  %r20(%r19), %r19
+
+	/* If this is a sys_rt_sigreturn call, and the signal was received
+	 * when not in_syscall, then we want to return via syscall_exit_rfi,
+	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
+	 * trampoline code in signal.c).
+	 */
+	ldi	__NR_rt_sigreturn,%r2
+	comb,=	%r2,%r20,.Lrt_sigreturn
+.Lin_syscall:
+	ldil	L%syscall_exit,%r2
+	be      0(%sr7,%r19)
+	ldo	R%syscall_exit(%r2),%r2
+.Lrt_sigreturn:
+	comib,<> 0,%r25,.Lin_syscall
+	ldil	L%syscall_exit_rfi,%r2
+	be      0(%sr7,%r19)
+	ldo	R%syscall_exit_rfi(%r2),%r2
+
+	/* Note!  Because we are not running where we were linked, any
+	calls to functions external to this file must be indirect.  To
+	be safe, we apply the opposite rule to functions within this
+	file, with local labels given to them to ensure correctness. */
+	
+.Lsyscall_nosys:
+syscall_nosys:
+	ldil	L%syscall_exit,%r1
+	be	R%syscall_exit(%sr7,%r1)
+	ldo	-ENOSYS(%r0),%r28		   /* set errno */
+
+
+/* Warning! This trace code is a virtual duplicate of the code above so be
+ * sure to maintain both! */
+.Ltracesys:
+tracesys:
+	/* Need to save more registers so the debugger can see where we
+	 * are.  This saves only the lower 8 bits of PSW, so that the C
+	 * bit is still clear on syscalls, and the D bit is set if this
+	 * full register save path has been executed.  We check the D
+	 * bit on syscall_return_rfi to determine which registers to
+	 * restore.  An interrupt results in a full PSW saved with the
+	 * C bit set, a non-straced syscall entry results in C and D clear
+	 * in the saved PSW.
+	 */
+	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+	LDREG	TI_TASK(%r1), %r1
+	ssm	0,%r2
+	STREG	%r2,TASK_PT_PSW(%r1)		/* Lower 8 bits only!! */
+	mfsp	%sr0,%r2
+	STREG	%r2,TASK_PT_SR0(%r1)
+	mfsp	%sr1,%r2
+	STREG	%r2,TASK_PT_SR1(%r1)
+	mfsp	%sr2,%r2
+	STREG	%r2,TASK_PT_SR2(%r1)
+	mfsp	%sr3,%r2
+	STREG	%r2,TASK_PT_SR3(%r1)
+	STREG	%r2,TASK_PT_SR4(%r1)
+	STREG	%r2,TASK_PT_SR5(%r1)
+	STREG	%r2,TASK_PT_SR6(%r1)
+	STREG	%r2,TASK_PT_SR7(%r1)
+	STREG	%r2,TASK_PT_IASQ0(%r1)
+	STREG	%r2,TASK_PT_IASQ1(%r1)
+	LDREG	TASK_PT_GR31(%r1),%r2
+	STREG	%r2,TASK_PT_IAOQ0(%r1)
+	ldo	4(%r2),%r2
+	STREG	%r2,TASK_PT_IAOQ1(%r1)
+	ldo	TASK_REGS(%r1),%r2
+	/* reg_save %r2 */
+	STREG	%r3,PT_GR3(%r2)
+	STREG	%r4,PT_GR4(%r2)
+	STREG	%r5,PT_GR5(%r2)
+	STREG	%r6,PT_GR6(%r2)
+	STREG	%r7,PT_GR7(%r2)
+	STREG	%r8,PT_GR8(%r2)
+	STREG	%r9,PT_GR9(%r2)
+	STREG	%r10,PT_GR10(%r2)
+	STREG	%r11,PT_GR11(%r2)
+	STREG	%r12,PT_GR12(%r2)
+	STREG	%r13,PT_GR13(%r2)
+	STREG	%r14,PT_GR14(%r2)
+	STREG	%r15,PT_GR15(%r2)
+	STREG	%r16,PT_GR16(%r2)
+	STREG	%r17,PT_GR17(%r2)
+	STREG	%r18,PT_GR18(%r2)
+	/* Finished saving things for the debugger */
+
+	copy	%r2,%r26
+	ldil	L%do_syscall_trace_enter,%r1
+	ldil	L%tracesys_next,%r2
+	be	R%do_syscall_trace_enter(%sr7,%r1)
+	ldo	R%tracesys_next(%r2),%r2
+	
+tracesys_next:
+	/* do_syscall_trace_enter either returned the syscallno, or -1L,
+	 *  so we skip restoring the PT_GR20 below, since we pulled it from
+	 *  task->thread.regs.gr[20] above.
+	 */
+	copy	%ret0,%r20
+	ldil	L%sys_call_table,%r1
+	ldo     R%sys_call_table(%r1), %r19
+
+	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+	LDREG	TI_TASK(%r1), %r1
+	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
+	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
+	LDREG   TASK_PT_GR25(%r1), %r25
+	LDREG   TASK_PT_GR24(%r1), %r24
+	LDREG   TASK_PT_GR23(%r1), %r23
+	LDREG   TASK_PT_GR22(%r1), %r22
+	LDREG   TASK_PT_GR21(%r1), %r21
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+#else
+	stw     %r22, -52(%r30)                 /* 5th argument */
+	stw     %r21, -56(%r30)                 /* 6th argument */
+#endif
+
+	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
+	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
+	b,n	.Ltracesys_nosys
+
+	LDREGX  %r20(%r19), %r19
+
+	/* If this is a sys_rt_sigreturn call, and the signal was received
+	 * when not in_syscall, then we want to return via syscall_exit_rfi,
+	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
+	 * trampoline code in signal.c).
+	 */
+	ldi	__NR_rt_sigreturn,%r2
+	comb,=	%r2,%r20,.Ltrace_rt_sigreturn
+.Ltrace_in_syscall:
+	ldil	L%tracesys_exit,%r2
+	be      0(%sr7,%r19)
+	ldo	R%tracesys_exit(%r2),%r2
+
+.Ltracesys_nosys:
+	ldo	-ENOSYS(%r0),%r28		/* set errno */
+
+	/* Do *not* call this function on the gateway page, because it
+	makes a direct call to syscall_trace. */
+	
+tracesys_exit:
+	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+	LDREG	TI_TASK(%r1), %r1
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+#endif
+	ldo	TASK_REGS(%r1),%r26
+	BL	do_syscall_trace_exit,%r2
+	STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
+	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+	LDREG	TI_TASK(%r1), %r1
+	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return val. */
+
+	ldil	L%syscall_exit,%r1
+	be,n	R%syscall_exit(%sr7,%r1)
+
+.Ltrace_rt_sigreturn:
+	comib,<> 0,%r25,.Ltrace_in_syscall
+	ldil	L%tracesys_sigexit,%r2
+	be      0(%sr7,%r19)
+	ldo	R%tracesys_sigexit(%r2),%r2
+
+tracesys_sigexit:
+	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+	LDREG	TI_TASK(%r1), %r1
+#ifdef CONFIG_64BIT
+	ldo	-16(%r30),%r29			/* Reference param save area */
+#endif
+	BL	do_syscall_trace_exit,%r2
+	ldo	TASK_REGS(%r1),%r26
+
+	ldil	L%syscall_exit_rfi,%r1
+	be,n	R%syscall_exit_rfi(%sr7,%r1)
+
+
+	/*********************************************************
+		32/64-bit Light-Weight-Syscall ABI
+
+		* - Indicates a hint for userspace inline asm
+		implementations.
+
+		Syscall number (caller-saves)
+	        - %r20
+	        * In asm clobber.
+
+		Argument registers (caller-saves)
+	        - %r26, %r25, %r24, %r23, %r22
+	        * In asm input.
+
+		Return registers (caller-saves)
+	        - %r28 (return), %r21 (errno)
+	        * In asm output.
+
+		Caller-saves registers
+	        - %r1, %r27, %r29
+	        - %r2 (return pointer)
+	        - %r31 (ble link register)
+	        * In asm clobber.
+
+		Callee-saves registers
+	        - %r3-%r18
+	        - %r30 (stack pointer)
+	        * Not in asm clobber.
+
+		If userspace is 32-bit:
+		Callee-saves registers
+	        - %r19 (32-bit PIC register)
+
+		Differences from 32-bit calling convention:
+		- Syscall number in %r20
+		- Additional argument register %r22 (arg4)
+		- Callee-saves %r19.
+
+		If userspace is 64-bit:
+		Callee-saves registers
+		- %r27 (64-bit PIC register)
+
+		Differences from 64-bit calling convention:
+		- Syscall number in %r20
+		- Additional argument register %r22 (arg4)
+		- Callee-saves %r27.
+
+		Error codes returned by entry path:
+
+		ENOSYS - r20 was an invalid LWS number.
+
+	*********************************************************/
+lws_start:
+
+#ifdef CONFIG_64BIT
+	/* FIXME: If we are a 64-bit kernel just
+	 *        turn this on unconditionally.
+	 */
+	ssm	PSW_SM_W, %r1
+	extrd,u	%r1,PSW_W_BIT,1,%r1
+	/* sp must be aligned on 4, so deposit the W bit setting into
+	 * the bottom of sp temporarily */
+	or,ev	%r1,%r30,%r30
+
+	/* Clip LWS number to a 32-bit value always */
+	depdi	0, 31, 32, %r20
+#endif	
+
+        /* Is the lws entry number valid? */
+	comiclr,>>	__NR_lws_entries, %r20, %r0
+	b,n	lws_exit_nosys
+
+	/* Load table start */
+	ldil	L%lws_table, %r1
+	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
+	LDREGX	%r20(%sr2,r28), %r21	/* Scratch use of r21 */
+
+	/* Jump to lws, lws table pointers already relocated */
+	be,n	0(%sr2,%r21)
+
+lws_exit_nosys:
+	ldo	-ENOSYS(%r0),%r21		   /* set errno */
+	/* Fall through: Return to userspace */
+
+lws_exit:
+#ifdef CONFIG_64BIT
+	/* decide whether to reset the wide mode bit
+	 *
+	 * For a syscall, the W bit is stored in the lowest bit
+	 * of sp.  Extract it and reset W if it is zero */
+	extrd,u,*<>	%r30,63,1,%r1
+	rsm	PSW_SM_W, %r0
+	/* now reset the lowest bit of sp if it was set */
+	xor	%r30,%r1,%r30
+#endif
+	be,n	0(%sr7, %r31)
+
+
+	
+	/***************************************************
+		Implementing 32bit CAS as an atomic operation:
+
+		%r26 - Address to examine
+		%r25 - Old value to check (old)
+		%r24 - New value to set (new)
+		%r28 - Return prev through this register.
+		%r21 - Kernel error code
+
+		If debugging is DISabled:
+
+		%r21 has the following meanings:
+
+		EAGAIN - CAS is busy, ldcw failed, try again.
+		EFAULT - Read or write failed.		
+
+		If debugging is enabled:
+
+		EDEADLOCK - CAS called recursively.
+		EAGAIN && r28 == 1 - CAS is busy. Lock contended.
+		EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
+		EFAULT - Read or write failed.
+
+		Scratch: r20, r28, r1
+
+	****************************************************/
+
+	/* Do not enable LWS debugging */
+#define ENABLE_LWS_DEBUG 0 
+
+	/* ELF64 Process entry path */
+lws_compare_and_swap64:
+#ifdef CONFIG_64BIT
+	b,n	lws_compare_and_swap
+#else
+	/* If we are not a 64-bit kernel, then we don't
+	 * have 64-bit input registers, and calling
+	 * the 64-bit LWS CAS returns ENOSYS.
+	 */
+	b,n	lws_exit_nosys
+#endif
+
+	/* ELF32 Process entry path */
+lws_compare_and_swap32:
+#ifdef CONFIG_64BIT
+	/* Clip all the input registers */
+	depdi	0, 31, 32, %r26
+	depdi	0, 31, 32, %r25
+	depdi	0, 31, 32, %r24
+#endif
+
+lws_compare_and_swap:
+	/* Load start of lock table */
+	ldil	L%lws_lock_start, %r20
+	ldo	R%lws_lock_start(%r20), %r28
+
+	/* Extract four bits from r26 and hash lock (Bits 4-7) */
+	extru  %r26, 27, 4, %r20
+
+	/* Find lock to use, the hash is either one of 0 to
+	   15, multiplied by 16 (keep it 16-byte aligned)
+	   and add to the lock table offset. */
+	shlw	%r20, 4, %r20
+	add	%r20, %r28, %r20
+
+# if ENABLE_LWS_DEBUG
+	/*	
+		DEBUG, check for deadlock! 
+		If the thread register values are the same
+		then we were the one that locked it last and
+		this is a recurisve call that will deadlock.
+		We *must* giveup this call and fail.
+	*/
+	ldw	4(%sr2,%r20), %r28			/* Load thread register */
+	/* WARNING: If cr27 cycles to the same value we have problems */
+	mfctl	%cr27, %r21				/* Get current thread register */
+	cmpb,<>,n	%r21, %r28, cas_lock		/* Called recursive? */
+	b	lws_exit				/* Return error! */
+	ldo	-EDEADLOCK(%r0), %r21
+cas_lock:
+	cmpb,=,n	%r0, %r28, cas_nocontend	/* Is nobody using it? */
+	ldo	1(%r0), %r28				/* 1st case */
+	b	lws_exit				/* Contended... */
+	ldo	-EAGAIN(%r0), %r21			/* Spin in userspace */
+cas_nocontend:
+# endif
+/* ENABLE_LWS_DEBUG */
+
+	rsm	PSW_SM_I, %r0				/* Disable interrupts */
+	/* COW breaks can cause contention on UP systems */
+	LDCW	0(%sr2,%r20), %r28			/* Try to acquire the lock */
+	cmpb,<>,n	%r0, %r28, cas_action		/* Did we get it? */
+cas_wouldblock:
+	ldo	2(%r0), %r28				/* 2nd case */
+	ssm	PSW_SM_I, %r0
+	b	lws_exit				/* Contended... */
+	ldo	-EAGAIN(%r0), %r21			/* Spin in userspace */
+
+	/*
+		prev = *addr;
+		if ( prev == old )
+		  *addr = new;
+		return prev;
+	*/
+
+	/* NOTES:
+		This all works becuse intr_do_signal
+		and schedule both check the return iasq
+		and see that we are on the kernel page
+		so this process is never scheduled off
+		or is ever sent any signal of any sort,
+		thus it is wholly atomic from usrspaces
+		perspective
+	*/
+cas_action:
+#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
+	/* DEBUG */
+	mfctl	%cr27, %r1
+	stw	%r1, 4(%sr2,%r20)
+#endif
+	/* The load and store could fail */
+1:	ldw	0(%r26), %r28
+	sub,<>	%r28, %r25, %r0
+2:	stw	%r24, 0(%r26)
+	/* Free lock */
+	sync
+	stw	%r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
+	/* Clear thread register indicator */
+	stw	%r0, 4(%sr2,%r20)
+#endif
+	/* Enable interrupts */
+	ssm	PSW_SM_I, %r0
+	/* Return to userspace, set no error */
+	b	lws_exit
+	copy	%r0, %r21
+
+3:		
+	/* Error occurred on load or store */
+	/* Free lock */
+	sync
+	stw	%r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
+	stw	%r0, 4(%sr2,%r20)
+#endif
+	ssm	PSW_SM_I, %r0
+	b	lws_exit
+	ldo	-EFAULT(%r0),%r21	/* set errno */
+	nop
+	nop
+	nop
+	nop
+
+	/* Two exception table entries, one for the load,
+	   the other for the store. Either return -EFAULT.
+	   Each of the entries must be relocated. */
+	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
+
+
+	/***************************************************
+		New CAS implementation which uses pointers and variable size
+		information. The value pointed by old and new MUST NOT change
+		while performing CAS. The lock only protect the value at %r26.
+
+		%r26 - Address to examine
+		%r25 - Pointer to the value to check (old)
+		%r24 - Pointer to the value to set (new)
+		%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+		%r28 - Return non-zero on failure
+		%r21 - Kernel error code
+
+		%r21 has the following meanings:
+
+		EAGAIN - CAS is busy, ldcw failed, try again.
+		EFAULT - Read or write failed.
+
+		Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
+
+	****************************************************/
+
+	/* ELF32 Process entry path */
+lws_compare_and_swap_2:
+#ifdef CONFIG_64BIT
+	/* Clip the input registers. We don't need to clip %r23 as we
+	   only use it for word operations */
+	depdi	0, 31, 32, %r26
+	depdi	0, 31, 32, %r25
+	depdi	0, 31, 32, %r24
+#endif
+
+	/* Check the validity of the size pointer */
+	subi,>>= 3, %r23, %r0
+	b,n	lws_exit_nosys
+
+	/* Jump to the functions which will load the old and new values into
+	   registers depending on the their size */
+	shlw	%r23, 2, %r29
+	blr	%r29, %r0
+	nop
+
+	/* 8bit load */
+4:	ldb	0(%r25), %r25
+	b	cas2_lock_start
+5:	ldb	0(%r24), %r24
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/* 16bit load */
+6:	ldh	0(%r25), %r25
+	b	cas2_lock_start
+7:	ldh	0(%r24), %r24
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/* 32bit load */
+8:	ldw	0(%r25), %r25
+	b	cas2_lock_start
+9:	ldw	0(%r24), %r24
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/* 64bit load */
+#ifdef CONFIG_64BIT
+10:	ldd	0(%r25), %r25
+11:	ldd	0(%r24), %r24
+#else
+	/* Load old value into r22/r23 - high/low */
+10:	ldw	0(%r25), %r22
+11:	ldw	4(%r25), %r23
+	/* Load new value into fr4 for atomic store later */
+12:	flddx	0(%r24), %fr4
+#endif
+
+cas2_lock_start:
+	/* Load start of lock table */
+	ldil	L%lws_lock_start, %r20
+	ldo	R%lws_lock_start(%r20), %r28
+
+	/* Extract four bits from r26 and hash lock (Bits 4-7) */
+	extru  %r26, 27, 4, %r20
+
+	/* Find lock to use, the hash is either one of 0 to
+	   15, multiplied by 16 (keep it 16-byte aligned)
+	   and add to the lock table offset. */
+	shlw	%r20, 4, %r20
+	add	%r20, %r28, %r20
+
+	rsm	PSW_SM_I, %r0			/* Disable interrupts */
+	/* COW breaks can cause contention on UP systems */
+	LDCW	0(%sr2,%r20), %r28		/* Try to acquire the lock */
+	cmpb,<>,n	%r0, %r28, cas2_action	/* Did we get it? */
+cas2_wouldblock:
+	ldo	2(%r0), %r28			/* 2nd case */
+	ssm	PSW_SM_I, %r0
+	b	lws_exit			/* Contended... */
+	ldo	-EAGAIN(%r0), %r21		/* Spin in userspace */
+
+	/*
+		prev = *addr;
+		if ( prev == old )
+		  *addr = new;
+		return prev;
+	*/
+
+	/* NOTES:
+		This all works becuse intr_do_signal
+		and schedule both check the return iasq
+		and see that we are on the kernel page
+		so this process is never scheduled off
+		or is ever sent any signal of any sort,
+		thus it is wholly atomic from usrspaces
+		perspective
+	*/
+cas2_action:
+	/* Jump to the correct function */
+	blr	%r29, %r0
+	/* Set %r28 as non-zero for now */
+	ldo	1(%r0),%r28
+
+	/* 8bit CAS */
+13:	ldb	0(%r26), %r29
+	sub,=	%r29, %r25, %r0
+	b,n	cas2_end
+14:	stb	%r24, 0(%r26)
+	b	cas2_end
+	copy	%r0, %r28
+	nop
+	nop
+
+	/* 16bit CAS */
+15:	ldh	0(%r26), %r29
+	sub,=	%r29, %r25, %r0
+	b,n	cas2_end
+16:	sth	%r24, 0(%r26)
+	b	cas2_end
+	copy	%r0, %r28
+	nop
+	nop
+
+	/* 32bit CAS */
+17:	ldw	0(%r26), %r29
+	sub,=	%r29, %r25, %r0
+	b,n	cas2_end
+18:	stw	%r24, 0(%r26)
+	b	cas2_end
+	copy	%r0, %r28
+	nop
+	nop
+
+	/* 64bit CAS */
+#ifdef CONFIG_64BIT
+19:	ldd	0(%r26), %r29
+	sub,*=	%r29, %r25, %r0
+	b,n	cas2_end
+20:	std	%r24, 0(%r26)
+	copy	%r0, %r28
+#else
+	/* Compare first word */
+19:	ldw	0(%r26), %r29
+	sub,=	%r29, %r22, %r0
+	b,n	cas2_end
+	/* Compare second word */
+20:	ldw	4(%r26), %r29
+	sub,=	%r29, %r23, %r0
+	b,n	cas2_end
+	/* Perform the store */
+21:	fstdx	%fr4, 0(%r26)
+	copy	%r0, %r28
+#endif
+
+cas2_end:
+	/* Free lock */
+	sync
+	stw	%r20, 0(%sr2,%r20)
+	/* Enable interrupts */
+	ssm	PSW_SM_I, %r0
+	/* Return to userspace, set no error */
+	b	lws_exit
+	copy	%r0, %r21
+
+22:
+	/* Error occurred on load or store */
+	/* Free lock */
+	sync
+	stw	%r20, 0(%sr2,%r20)
+	ssm	PSW_SM_I, %r0
+	ldo	1(%r0),%r28
+	b	lws_exit
+	ldo	-EFAULT(%r0),%r21	/* set errno */
+	nop
+	nop
+	nop
+
+	/* Exception table entries, for the load and store, return EFAULT.
+	   Each of the entries must be relocated. */
+	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
+	ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+#endif
+
+	/* Make sure nothing else is placed on this page */
+	.align PAGE_SIZE
+END(linux_gateway_page)
+ENTRY(end_linux_gateway_page)
+
+	/* Relocate symbols assuming linux_gateway_page is mapped
+	   to virtual address 0x0 */
+
+#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
+
+	.section .rodata,"a"
+
+	.align 8
+	/* Light-weight-syscall table */
+	/* Start of lws table. */
+ENTRY(lws_table)
+	LWS_ENTRY(compare_and_swap32)		/* 0 - ELF32 Atomic 32bit CAS */
+	LWS_ENTRY(compare_and_swap64)		/* 1 - ELF64 Atomic 32bit CAS */
+	LWS_ENTRY(compare_and_swap_2)		/* 2 - ELF32 Atomic 64bit CAS */
+END(lws_table)
+	/* End of lws table */
+
+	.align 8
+ENTRY(sys_call_table)
+	.export sys_call_table,data
+#include "syscall_table.S"
+END(sys_call_table)
+
+#ifdef CONFIG_64BIT
+	.align 8
+ENTRY(sys_call_table64)
+#define SYSCALL_TABLE_64BIT
+#include "syscall_table.S"
+END(sys_call_table64)
+#endif
+
+	/*
+		All light-weight-syscall atomic operations 
+		will use this set of locks 
+
+		NOTE: The lws_lock_start symbol must be
+		at least 16-byte aligned for safe use
+		with ldcw.
+	*/
+	.section .data
+	.align	L1_CACHE_BYTES
+ENTRY(lws_lock_start)
+	/* lws locks */
+	.rept 16
+	/* Keep locks aligned at 16-bytes */
+	.word 1
+	.word 0 
+	.word 0
+	.word 0
+	.endr
+END(lws_lock_start)
+	.previous
+
+.end
+
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/syscall_table.S b/src/kernel/linux/v4.14/arch/parisc/kernel/syscall_table.S
new file mode 100644
index 0000000..6308749
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/syscall_table.S
@@ -0,0 +1,458 @@
+/*    System Call Table
+ *
+ *    Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ *    Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
+ *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ *    Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ *    Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
+ *    Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
+ *    Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ *    Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
+ *    Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
+/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
+ * narrow palinux.  Use ENTRY_DIFF for those where a 32-bit specific
+ * implementation is required on wide palinux.  Use ENTRY_COMP where
+ * the compatibility layer has a useful 32-bit implementation.
+ */
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys32_##_name_
+#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
+#define ENTRY_OURS(_name_) .dword parisc_##_name_
+#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
+#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys_##_name_
+#define ENTRY_UHOH(_name_) .dword sys_##_name_
+#define ENTRY_OURS(_name_) .dword sys_##_name_
+#define ENTRY_COMP(_name_) .dword sys_##_name_
+#else
+#define ENTRY_SAME(_name_) .word sys_##_name_
+#define ENTRY_DIFF(_name_) .word sys_##_name_
+#define ENTRY_UHOH(_name_) .word sys_##_name_
+#define ENTRY_OURS(_name_) .word parisc_##_name_
+#define ENTRY_COMP(_name_) .word sys_##_name_
+#endif
+
+90:	ENTRY_SAME(restart_syscall)	/* 0 */
+91:	ENTRY_SAME(exit)
+	ENTRY_SAME(fork_wrapper)
+	ENTRY_SAME(read)
+	ENTRY_SAME(write)
+	ENTRY_COMP(open)		/* 5 */
+	ENTRY_SAME(close)
+	ENTRY_SAME(waitpid)
+	ENTRY_SAME(creat)
+	ENTRY_SAME(link)
+	ENTRY_SAME(unlink)		/* 10 */
+	ENTRY_COMP(execve)
+	ENTRY_SAME(chdir)
+	/* See comments in kernel/time.c!!! Maybe we don't need this? */
+	ENTRY_COMP(time)
+	ENTRY_SAME(mknod)
+	ENTRY_SAME(chmod)		/* 15 */
+	ENTRY_SAME(lchown)
+	ENTRY_SAME(socket)
+	/* struct stat is MAYBE identical wide and narrow ?? */
+	ENTRY_COMP(newstat)
+	ENTRY_COMP(lseek)
+	ENTRY_SAME(getpid)		/* 20 */
+	/* the 'void * data' parameter may need re-packing in wide */
+	ENTRY_COMP(mount)
+	/* concerned about struct sockaddr in wide/narrow */
+	/* ---> I think sockaddr is OK unless the compiler packs the struct */
+	/*      differently to align the char array */
+	ENTRY_SAME(bind)
+	ENTRY_SAME(setuid)
+	ENTRY_SAME(getuid)
+	ENTRY_COMP(stime)		/* 25 */
+	ENTRY_COMP(ptrace)
+	ENTRY_SAME(alarm)
+	/* see stat comment */
+	ENTRY_COMP(newfstat)
+	ENTRY_SAME(pause)
+	/* struct utimbuf uses time_t which might vary */
+	ENTRY_COMP(utime)		/* 30 */
+	/* struct sockaddr... */
+	ENTRY_SAME(connect)
+	ENTRY_SAME(listen)
+	ENTRY_SAME(access)
+	ENTRY_SAME(nice)
+	/* struct sockaddr... */
+	ENTRY_SAME(accept)		/* 35 */
+	ENTRY_SAME(sync)
+	ENTRY_SAME(kill)
+	ENTRY_SAME(rename)
+	ENTRY_SAME(mkdir)
+	ENTRY_SAME(rmdir)		/* 40 */
+	ENTRY_SAME(dup)
+	ENTRY_SAME(pipe)
+	ENTRY_COMP(times)
+	/* struct sockaddr... */
+	ENTRY_SAME(getsockname)
+	/* it seems possible brk() could return a >4G pointer... */
+	ENTRY_SAME(brk)			/* 45 */
+	ENTRY_SAME(setgid)
+	ENTRY_SAME(getgid)
+	ENTRY_SAME(signal)
+	ENTRY_SAME(geteuid)
+	ENTRY_SAME(getegid)		/* 50 */
+	ENTRY_SAME(acct)
+	ENTRY_SAME(umount)
+	/* struct sockaddr... */
+	ENTRY_SAME(getpeername)
+	ENTRY_COMP(ioctl)
+	ENTRY_COMP(fcntl)		/* 55 */
+	ENTRY_SAME(socketpair)
+	ENTRY_SAME(setpgid)
+	ENTRY_SAME(send)
+	ENTRY_SAME(newuname)
+	ENTRY_SAME(umask)		/* 60 */
+	ENTRY_SAME(chroot)
+	ENTRY_COMP(ustat)
+	ENTRY_SAME(dup2)
+	ENTRY_SAME(getppid)
+	ENTRY_SAME(getpgrp)		/* 65 */
+	ENTRY_SAME(setsid)
+	ENTRY_SAME(pivot_root)
+	/* I don't like this */
+	ENTRY_UHOH(sgetmask)
+	ENTRY_UHOH(ssetmask)
+	ENTRY_SAME(setreuid)		/* 70 */
+	ENTRY_SAME(setregid)
+	ENTRY_SAME(mincore)
+	ENTRY_COMP(sigpending)
+	ENTRY_SAME(sethostname)
+	/* Following 3 have linux-common-code structs containing longs -( */
+	ENTRY_COMP(setrlimit)		/* 75 */
+	ENTRY_COMP(getrlimit)
+	ENTRY_COMP(getrusage)
+	/* struct timeval and timezone are maybe?? consistent wide and narrow */
+	ENTRY_COMP(gettimeofday)
+	ENTRY_COMP(settimeofday)
+	ENTRY_SAME(getgroups)		/* 80 */
+	ENTRY_SAME(setgroups)
+	/* struct socketaddr... */
+	ENTRY_SAME(sendto)
+	ENTRY_SAME(symlink)
+	/* see stat comment */
+	ENTRY_COMP(newlstat)
+	ENTRY_SAME(readlink)		/* 85 */
+	ENTRY_SAME(ni_syscall)	/* was uselib */
+	ENTRY_SAME(swapon)
+	ENTRY_SAME(reboot)
+	ENTRY_SAME(mmap2)
+	ENTRY_SAME(mmap)		/* 90 */
+	ENTRY_SAME(munmap)
+	ENTRY_COMP(truncate)
+	ENTRY_COMP(ftruncate)
+	ENTRY_SAME(fchmod)
+	ENTRY_SAME(fchown)		/* 95 */
+	ENTRY_SAME(getpriority)
+	ENTRY_SAME(setpriority)
+	ENTRY_SAME(recv)
+	ENTRY_COMP(statfs)
+	ENTRY_COMP(fstatfs)		/* 100 */
+	ENTRY_SAME(stat64)
+	ENTRY_SAME(ni_syscall)	/* was socketcall */
+	ENTRY_SAME(syslog)
+	/* even though manpage says struct timeval contains longs, ours has
+	 * time_t and suseconds_t -- both of which are safe wide/narrow */
+	ENTRY_COMP(setitimer)
+	ENTRY_COMP(getitimer)		/* 105 */
+	ENTRY_SAME(capget)
+	ENTRY_SAME(capset)
+	ENTRY_OURS(pread64)
+	ENTRY_OURS(pwrite64)
+	ENTRY_SAME(getcwd)		/* 110 */
+	ENTRY_SAME(vhangup)
+	ENTRY_SAME(fstat64)
+	ENTRY_SAME(vfork_wrapper)
+	/* struct rusage contains longs... */
+	ENTRY_COMP(wait4)
+	ENTRY_SAME(swapoff)		/* 115 */
+	ENTRY_COMP(sysinfo)
+	ENTRY_SAME(shutdown)
+	ENTRY_SAME(fsync)
+	ENTRY_SAME(madvise)
+	ENTRY_SAME(clone_wrapper)	/* 120 */
+	ENTRY_SAME(setdomainname)
+	ENTRY_COMP(sendfile)
+	/* struct sockaddr... */
+	ENTRY_SAME(recvfrom)
+	/* struct timex contains longs */
+	ENTRY_COMP(adjtimex)
+	ENTRY_SAME(mprotect)		/* 125 */
+	/* old_sigset_t forced to 32 bits.  Beware glibc sigset_t */
+	ENTRY_COMP(sigprocmask)
+	ENTRY_SAME(ni_syscall)	/* create_module */
+	ENTRY_SAME(init_module)
+	ENTRY_SAME(delete_module)
+	ENTRY_SAME(ni_syscall)		/* 130: get_kernel_syms */
+	/* time_t inside struct dqblk */
+	ENTRY_SAME(quotactl)
+	ENTRY_SAME(getpgid)
+	ENTRY_SAME(fchdir)
+	ENTRY_SAME(bdflush)
+	ENTRY_SAME(sysfs)		/* 135 */
+	ENTRY_OURS(personality)
+	ENTRY_SAME(ni_syscall)	/* for afs_syscall */
+	ENTRY_SAME(setfsuid)
+	ENTRY_SAME(setfsgid)
+	/* I think this might work */
+	ENTRY_SAME(llseek)		/* 140 */
+	ENTRY_COMP(getdents)
+	/* it is POSSIBLE that select will be OK because even though fd_set
+	 * contains longs, the macros and sizes are clever. */
+	ENTRY_COMP(select)
+	ENTRY_SAME(flock)
+	ENTRY_SAME(msync)
+	/* struct iovec contains pointers */
+	ENTRY_COMP(readv)		/* 145 */
+	ENTRY_COMP(writev)
+	ENTRY_SAME(getsid)
+	ENTRY_SAME(fdatasync)
+	/* struct __sysctl_args is a mess */
+	ENTRY_COMP(sysctl)
+	ENTRY_SAME(mlock)		/* 150 */
+	ENTRY_SAME(munlock)
+	ENTRY_SAME(mlockall)
+	ENTRY_SAME(munlockall)
+	/* struct sched_param is ok for now */
+	ENTRY_SAME(sched_setparam)
+	ENTRY_SAME(sched_getparam)	/* 155 */
+	ENTRY_SAME(sched_setscheduler)
+	ENTRY_SAME(sched_getscheduler)
+	ENTRY_SAME(sched_yield)
+	ENTRY_SAME(sched_get_priority_max)
+	ENTRY_SAME(sched_get_priority_min)	/* 160 */
+	ENTRY_COMP(sched_rr_get_interval)
+	ENTRY_COMP(nanosleep)
+	ENTRY_SAME(mremap)
+	ENTRY_SAME(setresuid)
+	ENTRY_SAME(getresuid)		/* 165 */
+	ENTRY_COMP(sigaltstack)
+	ENTRY_SAME(ni_syscall)		/* query_module */
+	ENTRY_SAME(poll)
+	/* structs contain pointers and an in_addr... */
+	ENTRY_SAME(ni_syscall)		/* was nfsservctl */
+	ENTRY_SAME(setresgid)		/* 170 */
+	ENTRY_SAME(getresgid)
+	ENTRY_SAME(prctl)
+	/* signals need a careful review */
+	ENTRY_SAME(rt_sigreturn_wrapper)
+	ENTRY_COMP(rt_sigaction)
+	ENTRY_COMP(rt_sigprocmask)	/* 175 */
+	ENTRY_COMP(rt_sigpending)
+	ENTRY_COMP(rt_sigtimedwait)
+	/* even though the struct siginfo_t is different, it appears like
+	 * all the paths use values which should be same wide and narrow.
+	 * Also the struct is padded to 128 bytes which means we don't have
+	 * to worry about faulting trying to copy in a larger 64-bit
+	 * struct from a 32-bit user-space app.
+	 */
+	ENTRY_COMP(rt_sigqueueinfo)
+	ENTRY_COMP(rt_sigsuspend)
+	ENTRY_SAME(chown)		/* 180 */
+	/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
+	ENTRY_COMP(setsockopt)
+	ENTRY_COMP(getsockopt)
+	ENTRY_COMP(sendmsg)
+	ENTRY_COMP(recvmsg)
+	ENTRY_SAME(semop)		/* 185 */
+	ENTRY_SAME(semget)
+	ENTRY_COMP(semctl)
+	ENTRY_COMP(msgsnd)
+	ENTRY_COMP(msgrcv)
+	ENTRY_SAME(msgget)		/* 190 */
+	ENTRY_COMP(msgctl)
+	ENTRY_COMP(shmat)
+	ENTRY_SAME(shmdt)
+	ENTRY_SAME(shmget)
+	ENTRY_COMP(shmctl)		/* 195 */
+	ENTRY_SAME(ni_syscall)		/* streams1 */
+	ENTRY_SAME(ni_syscall)		/* streams2 */
+	ENTRY_SAME(lstat64)
+	ENTRY_OURS(truncate64)
+	ENTRY_OURS(ftruncate64)		/* 200 */
+	ENTRY_SAME(getdents64)
+	ENTRY_COMP(fcntl64)
+	ENTRY_SAME(ni_syscall)	/* attrctl -- dead */
+	ENTRY_SAME(ni_syscall)	/* acl_get -- dead */
+	ENTRY_SAME(ni_syscall)		/* 205 (acl_set -- dead) */
+	ENTRY_SAME(gettid)
+	ENTRY_OURS(readahead)
+	ENTRY_SAME(tkill)
+	ENTRY_COMP(sendfile64)
+	ENTRY_COMP(futex)		/* 210 */
+	ENTRY_COMP(sched_setaffinity)
+	ENTRY_COMP(sched_getaffinity)
+	ENTRY_SAME(ni_syscall)	/* set_thread_area */
+	ENTRY_SAME(ni_syscall)	/* get_thread_area */
+	ENTRY_COMP(io_setup)		/* 215 */
+	ENTRY_SAME(io_destroy)
+	ENTRY_COMP(io_getevents)
+	ENTRY_COMP(io_submit)
+	ENTRY_SAME(io_cancel)
+	ENTRY_SAME(ni_syscall)		/* 220: was alloc_hugepages */
+	ENTRY_SAME(ni_syscall)		/* was free_hugepages */
+	ENTRY_SAME(exit_group)
+	ENTRY_COMP(lookup_dcookie)
+	ENTRY_SAME(epoll_create)
+	ENTRY_SAME(epoll_ctl)		/* 225 */
+	ENTRY_SAME(epoll_wait)
+ 	ENTRY_SAME(remap_file_pages)
+	ENTRY_COMP(semtimedop)
+	ENTRY_COMP(mq_open)
+	ENTRY_SAME(mq_unlink)		/* 230 */
+	ENTRY_COMP(mq_timedsend)
+	ENTRY_COMP(mq_timedreceive)
+	ENTRY_COMP(mq_notify)
+	ENTRY_COMP(mq_getsetattr)
+	ENTRY_COMP(waitid)		/* 235 */
+	ENTRY_OURS(fadvise64_64)
+	ENTRY_SAME(set_tid_address)
+	ENTRY_SAME(setxattr)
+	ENTRY_SAME(lsetxattr)
+	ENTRY_SAME(fsetxattr)		/* 240 */
+	ENTRY_SAME(getxattr)
+	ENTRY_SAME(lgetxattr)
+	ENTRY_SAME(fgetxattr)
+	ENTRY_SAME(listxattr)
+	ENTRY_SAME(llistxattr)		/* 245 */
+	ENTRY_SAME(flistxattr)
+	ENTRY_SAME(removexattr)
+	ENTRY_SAME(lremovexattr)
+	ENTRY_SAME(fremovexattr)
+	ENTRY_COMP(timer_create)	/* 250 */
+	ENTRY_COMP(timer_settime)
+	ENTRY_COMP(timer_gettime)
+	ENTRY_SAME(timer_getoverrun)
+	ENTRY_SAME(timer_delete)
+	ENTRY_COMP(clock_settime)	/* 255 */
+	ENTRY_COMP(clock_gettime)
+	ENTRY_COMP(clock_getres)
+	ENTRY_COMP(clock_nanosleep)
+	ENTRY_SAME(tgkill)
+	ENTRY_COMP(mbind)		/* 260 */
+	ENTRY_COMP(get_mempolicy)
+	ENTRY_COMP(set_mempolicy)
+	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
+	ENTRY_SAME(add_key)
+	ENTRY_SAME(request_key)		/* 265 */
+	ENTRY_COMP(keyctl)
+	ENTRY_SAME(ioprio_set)
+	ENTRY_SAME(ioprio_get)
+	ENTRY_SAME(inotify_init)
+	ENTRY_SAME(inotify_add_watch)	/* 270 */
+	ENTRY_SAME(inotify_rm_watch)
+	ENTRY_SAME(migrate_pages)
+	ENTRY_COMP(pselect6)
+	ENTRY_COMP(ppoll)
+	ENTRY_COMP(openat)		/* 275 */
+	ENTRY_SAME(mkdirat)
+	ENTRY_SAME(mknodat)
+	ENTRY_SAME(fchownat)
+	ENTRY_COMP(futimesat)
+	ENTRY_SAME(fstatat64)		/* 280 */
+	ENTRY_SAME(unlinkat)
+	ENTRY_SAME(renameat)
+	ENTRY_SAME(linkat)
+	ENTRY_SAME(symlinkat)
+	ENTRY_SAME(readlinkat)		/* 285 */
+	ENTRY_SAME(fchmodat)
+	ENTRY_SAME(faccessat)
+	ENTRY_SAME(unshare)
+	ENTRY_COMP(set_robust_list)
+	ENTRY_COMP(get_robust_list)	/* 290 */
+	ENTRY_SAME(splice)
+	ENTRY_OURS(sync_file_range)
+	ENTRY_SAME(tee)
+	ENTRY_COMP(vmsplice)
+	ENTRY_COMP(move_pages)		/* 295 */
+	ENTRY_SAME(getcpu)
+	ENTRY_COMP(epoll_pwait)
+	ENTRY_COMP(statfs64)
+	ENTRY_COMP(fstatfs64)
+	ENTRY_COMP(kexec_load)		/* 300 */
+	ENTRY_COMP(utimensat)
+	ENTRY_COMP(signalfd)
+	ENTRY_SAME(ni_syscall)		/* was timerfd */
+	ENTRY_SAME(eventfd)
+	ENTRY_OURS(fallocate)		/* 305 */
+	ENTRY_SAME(timerfd_create)
+	ENTRY_COMP(timerfd_settime)
+	ENTRY_COMP(timerfd_gettime)
+	ENTRY_COMP(signalfd4)
+	ENTRY_SAME(eventfd2)		/* 310 */
+	ENTRY_SAME(epoll_create1)
+	ENTRY_SAME(dup3)
+	ENTRY_SAME(pipe2)
+	ENTRY_SAME(inotify_init1)
+	ENTRY_COMP(preadv)		/* 315 */
+	ENTRY_COMP(pwritev)
+	ENTRY_COMP(rt_tgsigqueueinfo)
+	ENTRY_SAME(perf_event_open)
+	ENTRY_COMP(recvmmsg)
+	ENTRY_SAME(accept4)		/* 320 */
+	ENTRY_SAME(prlimit64)
+	ENTRY_SAME(fanotify_init)
+	ENTRY_DIFF(fanotify_mark)
+	ENTRY_COMP(clock_adjtime)
+	ENTRY_SAME(name_to_handle_at)	/* 325 */
+	ENTRY_COMP(open_by_handle_at)
+	ENTRY_SAME(syncfs)
+	ENTRY_SAME(setns)
+	ENTRY_COMP(sendmmsg)
+	ENTRY_COMP(process_vm_readv)	/* 330 */
+	ENTRY_COMP(process_vm_writev)
+	ENTRY_SAME(kcmp)
+	ENTRY_SAME(finit_module)
+	ENTRY_SAME(sched_setattr)
+	ENTRY_SAME(sched_getattr)	/* 335 */
+	ENTRY_COMP(utimes)
+	ENTRY_SAME(renameat2)
+	ENTRY_SAME(seccomp)
+	ENTRY_SAME(getrandom)
+	ENTRY_SAME(memfd_create)	/* 340 */
+	ENTRY_SAME(bpf)
+	ENTRY_COMP(execveat)
+	ENTRY_SAME(membarrier)
+	ENTRY_SAME(userfaultfd)
+	ENTRY_SAME(mlock2)		/* 345 */
+	ENTRY_SAME(copy_file_range)
+	ENTRY_COMP(preadv2)
+	ENTRY_COMP(pwritev2)
+	ENTRY_SAME(statx)
+
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
+
+#undef ENTRY_SAME
+#undef ENTRY_DIFF
+#undef ENTRY_UHOH
+#undef ENTRY_COMP
+#undef ENTRY_OURS
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/time.c b/src/kernel/linux/v4.14/arch/parisc/kernel/time.c
new file mode 100644
index 0000000..42a8732
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/time.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/parisc/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *  Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
+ *  Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
+ *
+ * 1994-07-02  Alan Modra
+ *             fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1998-12-20  Updated NTP code according to technical memorandum Jan '96
+ *             "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/sched_clock.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/platform_device.h>
+#include <linux/ftrace.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/param.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+
+#include <linux/timex.h>
+
+static unsigned long clocktick __read_mostly;	/* timer cycles per tick */
+
+/*
+ * We keep time on PA-RISC Linux by using the Interval Timer which is
+ * a pair of registers; one is read-only and one is write-only; both
+ * accessed through CR16.  The read-only register is 32 or 64 bits wide,
+ * and increments by 1 every CPU clock tick.  The architecture only
+ * guarantees us a rate between 0.5 and 2, but all implementations use a
+ * rate of 1.  The write-only register is 32-bits wide.  When the lowest
+ * 32 bits of the read-only register compare equal to the write-only
+ * register, it raises a maskable external interrupt.  Each processor has
+ * an Interval Timer of its own and they are not synchronised.  
+ *
+ * We want to generate an interrupt every 1/HZ seconds.  So we program
+ * CR16 to interrupt every @clocktick cycles.  The it_value in cpu_data
+ * is programmed with the intended time of the next tick.  We can be
+ * held off for an arbitrarily long period of time by interrupts being
+ * disabled, so we may miss one or more ticks.
+ */
+irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+{
+	unsigned long now;
+	unsigned long next_tick;
+	unsigned long ticks_elapsed = 0;
+	unsigned int cpu = smp_processor_id();
+	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+
+	/* gcc can optimize for "read-only" case with a local clocktick */
+	unsigned long cpt = clocktick;
+
+	profile_tick(CPU_PROFILING);
+
+	/* Initialize next_tick to the old expected tick time. */
+	next_tick = cpuinfo->it_value;
+
+	/* Calculate how many ticks have elapsed. */
+	now = mfctl(16);
+	do {
+		++ticks_elapsed;
+		next_tick += cpt;
+	} while (next_tick - now > cpt);
+
+	/* Store (in CR16 cycles) up to when we are accounting right now. */
+	cpuinfo->it_value = next_tick;
+
+	/* Go do system house keeping. */
+	if (cpu == 0)
+		xtime_update(ticks_elapsed);
+
+	update_process_times(user_mode(get_irq_regs()));
+
+	/* Skip clockticks on purpose if we know we would miss those.
+	 * The new CR16 must be "later" than current CR16 otherwise
+	 * itimer would not fire until CR16 wrapped - e.g 4 seconds
+	 * later on a 1Ghz processor. We'll account for the missed
+	 * ticks on the next timer interrupt.
+	 * We want IT to fire modulo clocktick even if we miss/skip some.
+	 * But those interrupts don't in fact get delivered that regularly.
+	 *
+	 * "next_tick - now" will always give the difference regardless
+	 * if one or the other wrapped. If "now" is "bigger" we'll end up
+	 * with a very large unsigned number.
+	 */
+	now = mfctl(16);
+	while (next_tick - now > cpt)
+		next_tick += cpt;
+
+	/* Program the IT when to deliver the next interrupt.
+	 * Only bottom 32-bits of next_tick are writable in CR16!
+	 * Timer interrupt will be delivered at least a few hundred cycles
+	 * after the IT fires, so if we are too close (<= 8000 cycles) to the
+	 * next cycle, simply skip it.
+	 */
+	if (next_tick - now <= 8000)
+		next_tick += cpt;
+	mtctl(next_tick, 16);
+
+	return IRQ_HANDLED;
+}
+
+
+unsigned long profile_pc(struct pt_regs *regs)
+{
+	unsigned long pc = instruction_pointer(regs);
+
+	if (regs->gr[0] & PSW_N)
+		pc -= 4;
+
+#ifdef CONFIG_SMP
+	if (in_lock_functions(pc))
+		pc = regs->gr[2];
+#endif
+
+	return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+
+
+/* clock source code */
+
+static u64 notrace read_cr16(struct clocksource *cs)
+{
+	return get_cycles();
+}
+
+static struct clocksource clocksource_cr16 = {
+	.name			= "cr16",
+	.rating			= 300,
+	.read			= read_cr16,
+	.mask			= CLOCKSOURCE_MASK(BITS_PER_LONG),
+	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init start_cpu_itimer(void)
+{
+	unsigned int cpu = smp_processor_id();
+	unsigned long next_tick = mfctl(16) + clocktick;
+
+	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */
+
+	per_cpu(cpu_data, cpu).it_value = next_tick;
+}
+
+#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
+{
+	struct pdc_tod tod_data;
+
+	memset(tm, 0, sizeof(*tm));
+	if (pdc_tod_read(&tod_data) < 0)
+		return -EOPNOTSUPP;
+
+	/* we treat tod_sec as unsigned, so this can work until year 2106 */
+	rtc_time64_to_tm(tod_data.tod_sec, tm);
+	return rtc_valid_tm(tm);
+}
+
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
+{
+	time64_t secs = rtc_tm_to_time64(tm);
+
+	if (pdc_tod_set(secs, 0) < 0)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static const struct rtc_class_ops rtc_generic_ops = {
+	.read_time = rtc_generic_get_time,
+	.set_time = rtc_generic_set_time,
+};
+
+static int __init rtc_init(void)
+{
+	struct platform_device *pdev;
+
+	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+					     &rtc_generic_ops,
+					     sizeof(rtc_generic_ops));
+
+	return PTR_ERR_OR_ZERO(pdev);
+}
+device_initcall(rtc_init);
+#endif
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+	static struct pdc_tod tod_data;
+	if (pdc_tod_read(&tod_data) == 0) {
+		ts->tv_sec = tod_data.tod_sec;
+		ts->tv_nsec = tod_data.tod_usec * 1000;
+	} else {
+		printk(KERN_ERR "Error reading tod clock\n");
+	        ts->tv_sec = 0;
+		ts->tv_nsec = 0;
+	}
+}
+
+
+static u64 notrace read_cr16_sched_clock(void)
+{
+	return get_cycles();
+}
+
+
+/*
+ * timer interrupt and sched_clock() initialization
+ */
+
+void __init time_init(void)
+{
+	unsigned long cr16_hz;
+
+	clocktick = (100 * PAGE0->mem_10msec) / HZ;
+	start_cpu_itimer();	/* get CPU 0 started */
+
+	cr16_hz = 100 * PAGE0->mem_10msec;  /* Hz */
+
+	/* register as sched_clock source */
+	sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
+}
+
+static int __init init_cr16_clocksource(void)
+{
+	/*
+	 * The cr16 interval timers are not syncronized across CPUs on
+	 * different sockets, so mark them unstable and lower rating on
+	 * multi-socket SMP systems.
+	 */
+	if (num_online_cpus() > 1 && !running_on_qemu) {
+		int cpu;
+		unsigned long cpu0_loc;
+		cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
+
+		for_each_online_cpu(cpu) {
+			if (cpu == 0)
+				continue;
+			if ((cpu0_loc != 0) &&
+			    (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
+				continue;
+
+			clocksource_cr16.name = "cr16_unstable";
+			clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+			clocksource_cr16.rating = 0;
+			break;
+		}
+	}
+
+	/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
+	 *	in sync:
+	 *	(clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */
+
+	/* register at clocksource framework */
+	clocksource_register_hz(&clocksource_cr16,
+		100 * PAGE0->mem_10msec);
+
+	return 0;
+}
+
+device_initcall(init_cr16_clocksource);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/topology.c b/src/kernel/linux/v4.14/arch/parisc/kernel/topology.c
new file mode 100644
index 0000000..f515938
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/topology.c
@@ -0,0 +1,37 @@
+/*
+ * arch/parisc/kernel/topology.c - Populate sysfs with topology information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+	int num;
+
+	for_each_present_cpu(num) {
+		register_cpu(&per_cpu(cpu_devices, num), num);
+	}
+	return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/traps.c b/src/kernel/linux/v4.14/arch/parisc/kernel/traps.c
new file mode 100644
index 0000000..9a898d6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/traps.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/parisc/traps.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/console.h>
+#include <linux/bug.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+
+#include <asm/assembly.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <asm/smp.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/unwind.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+#include "../math-emu/math-emu.h"	/* for handle_fpe() */
+
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+	struct pt_regs *regs);
+
+static int printbinary(char *buf, unsigned long x, int nbits)
+{
+	unsigned long mask = 1UL << (nbits - 1);
+	while (mask != 0) {
+		*buf++ = (mask & x ? '1' : '0');
+		mask >>= 1;
+	}
+	*buf = '\0';
+
+	return nbits;
+}
+
+#ifdef CONFIG_64BIT
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+#define FFMT "%016llx"	/* fpregs are 64-bit always */
+
+#define PRINTREGS(lvl,r,f,fmt,x)	\
+	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
+		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
+		(r)[(x)+2], (r)[(x)+3])
+
+static void print_gr(char *level, struct pt_regs *regs)
+{
+	int i;
+	char buf[64];
+
+	printk("%s\n", level);
+	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
+	printbinary(buf, regs->gr[0], 32);
+	printk("%sPSW: %s %s\n", level, buf, print_tainted());
+
+	for (i = 0; i < 32; i += 4)
+		PRINTREGS(level, regs->gr, "r", RFMT, i);
+}
+
+static void print_fr(char *level, struct pt_regs *regs)
+{
+	int i;
+	char buf[64];
+	struct { u32 sw[2]; } s;
+
+	/* FR are 64bit everywhere. Need to use asm to get the content
+	 * of fpsr/fper1, and we assume that we won't have a FP Identify
+	 * in our way, otherwise we're screwed.
+	 * The fldd is used to restore the T-bit if there was one, as the
+	 * store clears it anyway.
+	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
+	asm volatile ("fstd %%fr0,0(%1)	\n\t"
+		      "fldd 0(%1),%%fr0	\n\t"
+		      : "=m" (s) : "r" (&s) : "r0");
+
+	printk("%s\n", level);
+	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
+	printbinary(buf, s.sw[0], 32);
+	printk("%sFPSR: %s\n", level, buf);
+	printk("%sFPER1: %08x\n", level, s.sw[1]);
+
+	/* here we'll print fr0 again, tho it'll be meaningless */
+	for (i = 0; i < 32; i += 4)
+		PRINTREGS(level, regs->fr, "fr", FFMT, i);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	int i, user;
+	char *level;
+	unsigned long cr30, cr31;
+
+	user = user_mode(regs);
+	level = user ? KERN_DEBUG : KERN_CRIT;
+
+	show_regs_print_info(level);
+
+	print_gr(level, regs);
+
+	for (i = 0; i < 8; i += 4)
+		PRINTREGS(level, regs->sr, "sr", RFMT, i);
+
+	if (user)
+		print_fr(level, regs);
+
+	cr30 = mfctl(30);
+	cr31 = mfctl(31);
+	printk("%s\n", level);
+	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
+	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
+	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
+	       level, regs->iir, regs->isr, regs->ior);
+	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
+	       level, current_thread_info()->cpu, cr30, cr31);
+	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
+
+	if (user) {
+		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
+		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
+		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
+	} else {
+		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
+		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
+		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
+
+		parisc_show_stack(current, NULL, regs);
+	}
+}
+
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+#define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
+	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+		printk(fmt, ##__VA_ARGS__);				      \
+		show_regs(regs);					      \
+	}								      \
+}
+
+
+static void do_show_stack(struct unwind_frame_info *info)
+{
+	int i = 1;
+
+	printk(KERN_CRIT "Backtrace:\n");
+	while (i <= 16) {
+		if (unwind_once(info) < 0 || info->ip == 0)
+			break;
+
+		if (__kernel_text_address(info->ip)) {
+			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
+				info->ip, (void *) info->ip);
+			i++;
+		}
+	}
+	printk(KERN_CRIT "\n");
+}
+
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+	struct pt_regs *regs)
+{
+	struct unwind_frame_info info;
+	struct task_struct *t;
+
+	t = task ? task : current;
+	if (regs) {
+		unwind_frame_init(&info, t, regs);
+		goto show_stack;
+	}
+
+	if (t == current) {
+		unsigned long sp;
+
+HERE:
+		asm volatile ("copy %%r30, %0" : "=r"(sp));
+		{
+			struct pt_regs r;
+
+			memset(&r, 0, sizeof(struct pt_regs));
+			r.iaoq[0] = (unsigned long)&&HERE;
+			r.gr[2] = (unsigned long)__builtin_return_address(0);
+			r.gr[30] = sp;
+
+			unwind_frame_init(&info, current, &r);
+		}
+	} else {
+		unwind_frame_init_from_blocked_task(&info, t);
+	}
+
+show_stack:
+	do_show_stack(&info);
+}
+
+void show_stack(struct task_struct *t, unsigned long *sp)
+{
+	return parisc_show_stack(t, sp, NULL);
+}
+
+int is_valid_bugaddr(unsigned long iaoq)
+{
+	return 1;
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs, long err)
+{
+	if (user_mode(regs)) {
+		if (err == 0)
+			return; /* STFU */
+
+		parisc_printk_ratelimited(1, regs,
+			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
+
+		return;
+	}
+
+	oops_in_progress = 1;
+
+	oops_enter();
+
+	/* Amuse the user in a SPARC fashion */
+	if (err) printk(KERN_CRIT
+			"      _______________________________ \n"
+			"     < Your System ate a SPARC! Gah! >\n"
+			"      ------------------------------- \n"
+			"             \\   ^__^\n"
+			"                 (__)\\       )\\/\\\n"
+			"                  U  ||----w |\n"
+			"                     ||     ||\n");
+	
+	/* unlock the pdc lock if necessary */
+	pdc_emergency_unlock();
+
+	/* maybe the kernel hasn't booted very far yet and hasn't been able 
+	 * to initialize the serial or STI console. In that case we should 
+	 * re-enable the pdc console, so that the user will be able to 
+	 * identify the problem. */
+	if (!console_drivers)
+		pdc_console_restart();
+	
+	if (err)
+		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
+			current->comm, task_pid_nr(current), str, err);
+
+	/* Wot's wrong wif bein' racy? */
+	if (current->thread.flags & PARISC_KERNEL_DEATH) {
+		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
+		local_irq_enable();
+		while (1);
+	}
+	current->thread.flags |= PARISC_KERNEL_DEATH;
+
+	show_regs(regs);
+	dump_stack();
+	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+
+	if (panic_on_oops)
+		panic("Fatal exception");
+
+	oops_exit();
+	do_exit(SIGSEGV);
+}
+
+/* gdb uses break 4,8 */
+#define GDB_BREAK_INSN 0x10004
+static void handle_gdb_break(struct pt_regs *regs, int wot)
+{
+	struct siginfo si;
+
+	si.si_signo = SIGTRAP;
+	si.si_errno = 0;
+	si.si_code = wot;
+	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
+	force_sig_info(SIGTRAP, &si, current);
+}
+
+static void handle_break(struct pt_regs *regs)
+{
+	unsigned iir = regs->iir;
+
+	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
+		/* check if a BUG() or WARN() trapped here.  */
+		enum bug_trap_type tt;
+		tt = report_bug(regs->iaoq[0] & ~3, regs);
+		if (tt == BUG_TRAP_TYPE_WARN) {
+			regs->iaoq[0] += 4;
+			regs->iaoq[1] += 4;
+			return; /* return to next instruction when WARN_ON().  */
+		}
+		die_if_kernel("Unknown kernel breakpoint", regs,
+			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
+	}
+
+	if (unlikely(iir != GDB_BREAK_INSN))
+		parisc_printk_ratelimited(0, regs,
+			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+			iir & 31, (iir>>13) & ((1<<13)-1),
+			task_pid_nr(current), current->comm);
+
+	/* send standard GDB signal */
+	handle_gdb_break(regs, TRAP_BRKPT);
+}
+
+static void default_trap(int code, struct pt_regs *regs)
+{
+	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
+	show_regs(regs);
+}
+
+void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
+
+
+void transfer_pim_to_trap_frame(struct pt_regs *regs)
+{
+    register int i;
+    extern unsigned int hpmc_pim_data[];
+    struct pdc_hpmc_pim_11 *pim_narrow;
+    struct pdc_hpmc_pim_20 *pim_wide;
+
+    if (boot_cpu_data.cpu_type >= pcxu) {
+
+	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
+
+	/*
+	 * Note: The following code will probably generate a
+	 * bunch of truncation error warnings from the compiler.
+	 * Could be handled with an ifdef, but perhaps there
+	 * is a better way.
+	 */
+
+	regs->gr[0] = pim_wide->cr[22];
+
+	for (i = 1; i < 32; i++)
+	    regs->gr[i] = pim_wide->gr[i];
+
+	for (i = 0; i < 32; i++)
+	    regs->fr[i] = pim_wide->fr[i];
+
+	for (i = 0; i < 8; i++)
+	    regs->sr[i] = pim_wide->sr[i];
+
+	regs->iasq[0] = pim_wide->cr[17];
+	regs->iasq[1] = pim_wide->iasq_back;
+	regs->iaoq[0] = pim_wide->cr[18];
+	regs->iaoq[1] = pim_wide->iaoq_back;
+
+	regs->sar  = pim_wide->cr[11];
+	regs->iir  = pim_wide->cr[19];
+	regs->isr  = pim_wide->cr[20];
+	regs->ior  = pim_wide->cr[21];
+    }
+    else {
+	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
+
+	regs->gr[0] = pim_narrow->cr[22];
+
+	for (i = 1; i < 32; i++)
+	    regs->gr[i] = pim_narrow->gr[i];
+
+	for (i = 0; i < 32; i++)
+	    regs->fr[i] = pim_narrow->fr[i];
+
+	for (i = 0; i < 8; i++)
+	    regs->sr[i] = pim_narrow->sr[i];
+
+	regs->iasq[0] = pim_narrow->cr[17];
+	regs->iasq[1] = pim_narrow->iasq_back;
+	regs->iaoq[0] = pim_narrow->cr[18];
+	regs->iaoq[1] = pim_narrow->iaoq_back;
+
+	regs->sar  = pim_narrow->cr[11];
+	regs->iir  = pim_narrow->cr[19];
+	regs->isr  = pim_narrow->cr[20];
+	regs->ior  = pim_narrow->cr[21];
+    }
+
+    /*
+     * The following fields only have meaning if we came through
+     * another path. So just zero them here.
+     */
+
+    regs->ksp = 0;
+    regs->kpc = 0;
+    regs->orig_r28 = 0;
+}
+
+
+/*
+ * This routine is called as a last resort when everything else
+ * has gone clearly wrong. We get called for faults in kernel space,
+ * and HPMC's.
+ */
+void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
+{
+	static DEFINE_SPINLOCK(terminate_lock);
+
+	oops_in_progress = 1;
+
+	set_eiem(0);
+	local_irq_disable();
+	spin_lock(&terminate_lock);
+
+	/* unlock the pdc lock if necessary */
+	pdc_emergency_unlock();
+
+	/* restart pdc console if necessary */
+	if (!console_drivers)
+		pdc_console_restart();
+
+	/* Not all paths will gutter the processor... */
+	switch(code){
+
+	case 1:
+		transfer_pim_to_trap_frame(regs);
+		break;
+
+	default:
+		/* Fall through */
+		break;
+
+	}
+	    
+	{
+		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
+		struct unwind_frame_info info;
+		unwind_frame_init(&info, current, regs);
+		do_show_stack(&info);
+	}
+
+	printk("\n");
+	pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
+		msg, code, trap_name(code), regs, offset);
+	show_regs(regs);
+
+	spin_unlock(&terminate_lock);
+
+	/* put soft power button back under hardware control;
+	 * if the user had pressed it once at any time, the 
+	 * system will shut down immediately right here. */
+	pdc_soft_power_button(0);
+	
+	/* Call kernel panic() so reboot timeouts work properly 
+	 * FIXME: This function should be on the list of
+	 * panic notifiers, and we should call panic
+	 * directly from the location that we wish. 
+	 * e.g. We should not call panic from
+	 * parisc_terminate, but rather the oter way around.
+	 * This hack works, prints the panic message twice,
+	 * and it enables reboot timers!
+	 */
+	panic(msg);
+}
+
+void notrace handle_interruption(int code, struct pt_regs *regs)
+{
+	unsigned long fault_address = 0;
+	unsigned long fault_space = 0;
+	struct siginfo si;
+
+	if (code == 1)
+	    pdc_console_restart();  /* switch back to pdc if HPMC */
+	else
+	    local_irq_enable();
+
+	/* Security check:
+	 * If the priority level is still user, and the
+	 * faulting space is not equal to the active space
+	 * then the user is attempting something in a space
+	 * that does not belong to them. Kill the process.
+	 *
+	 * This is normally the situation when the user
+	 * attempts to jump into the kernel space at the
+	 * wrong offset, be it at the gateway page or a
+	 * random location.
+	 *
+	 * We cannot normally signal the process because it
+	 * could *be* on the gateway page, and processes
+	 * executing on the gateway page can't have signals
+	 * delivered.
+	 * 
+	 * We merely readjust the address into the users
+	 * space, at a destination address of zero, and
+	 * allow processing to continue.
+	 */
+	if (((unsigned long)regs->iaoq[0] & 3) &&
+	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
+		/* Kill the user process later */
+		regs->iaoq[0] = 0 | 3;
+		regs->iaoq[1] = regs->iaoq[0] + 4;
+		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
+		regs->gr[0] &= ~PSW_B;
+		return;
+	}
+	
+#if 0
+	printk(KERN_CRIT "Interruption # %d\n", code);
+#endif
+
+	switch(code) {
+
+	case  1:
+		/* High-priority machine check (HPMC) */
+		
+		/* set up a new led state on systems shipped with a LED State panel */
+		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
+
+		parisc_terminate("High Priority Machine Check (HPMC)",
+				regs, code, 0);
+		/* NOT REACHED */
+		
+	case  2:
+		/* Power failure interrupt */
+		printk(KERN_CRIT "Power failure interrupt !\n");
+		return;
+
+	case  3:
+		/* Recovery counter trap */
+		regs->gr[0] &= ~PSW_R;
+		if (user_space(regs))
+			handle_gdb_break(regs, TRAP_TRACE);
+		/* else this must be the start of a syscall - just let it run */
+		return;
+
+	case  5:
+		/* Low-priority machine check */
+		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
+		
+		flush_cache_all();
+		flush_tlb_all();
+		cpu_lpmc(5, regs);
+		return;
+
+	case  6:
+		/* Instruction TLB miss fault/Instruction page fault */
+		fault_address = regs->iaoq[0];
+		fault_space   = regs->iasq[0];
+		break;
+
+	case  8:
+		/* Illegal instruction trap */
+		die_if_kernel("Illegal instruction", regs, code);
+		si.si_code = ILL_ILLOPC;
+		goto give_sigill;
+
+	case  9:
+		/* Break instruction trap */
+		handle_break(regs);
+		return;
+
+	case 10:
+		/* Privileged operation trap */
+		die_if_kernel("Privileged operation", regs, code);
+		si.si_code = ILL_PRVOPC;
+		goto give_sigill;
+
+	case 11:
+		/* Privileged register trap */
+		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
+
+			/* This is a MFCTL cr26/cr27 to gr instruction.
+			 * PCXS traps on this, so we need to emulate it.
+			 */
+
+			if (regs->iir & 0x00200000)
+				regs->gr[regs->iir & 0x1f] = mfctl(27);
+			else
+				regs->gr[regs->iir & 0x1f] = mfctl(26);
+
+			regs->iaoq[0] = regs->iaoq[1];
+			regs->iaoq[1] += 4;
+			regs->iasq[0] = regs->iasq[1];
+			return;
+		}
+
+		die_if_kernel("Privileged register usage", regs, code);
+		si.si_code = ILL_PRVREG;
+	give_sigill:
+		si.si_signo = SIGILL;
+		si.si_errno = 0;
+		si.si_addr = (void __user *) regs->iaoq[0];
+		force_sig_info(SIGILL, &si, current);
+		return;
+
+	case 12:
+		/* Overflow Trap, let the userland signal handler do the cleanup */
+		si.si_signo = SIGFPE;
+		si.si_code = FPE_INTOVF;
+		si.si_addr = (void __user *) regs->iaoq[0];
+		force_sig_info(SIGFPE, &si, current);
+		return;
+		
+	case 13:
+		/* Conditional Trap
+		   The condition succeeds in an instruction which traps
+		   on condition  */
+		if(user_mode(regs)){
+			si.si_signo = SIGFPE;
+			/* Set to zero, and let the userspace app figure it out from
+			   the insn pointed to by si_addr */
+			si.si_code = 0;
+			si.si_addr = (void __user *) regs->iaoq[0];
+			force_sig_info(SIGFPE, &si, current);
+			return;
+		} 
+		/* The kernel doesn't want to handle condition codes */
+		break;
+		
+	case 14:
+		/* Assist Exception Trap, i.e. floating point exception. */
+		die_if_kernel("Floating point exception", regs, 0); /* quiet */
+		__inc_irq_stat(irq_fpassist_count);
+		handle_fpe(regs);
+		return;
+
+	case 15:
+		/* Data TLB miss fault/Data page fault */
+		/* Fall through */
+	case 16:
+		/* Non-access instruction TLB miss fault */
+		/* The instruction TLB entry needed for the target address of the FIC
+		   is absent, and hardware can't find it, so we get to cleanup */
+		/* Fall through */
+	case 17:
+		/* Non-access data TLB miss fault/Non-access data page fault */
+		/* FIXME: 
+			 Still need to add slow path emulation code here!
+			 If the insn used a non-shadow register, then the tlb
+			 handlers could not have their side-effect (e.g. probe
+			 writing to a target register) emulated since rfir would
+			 erase the changes to said register. Instead we have to
+			 setup everything, call this function we are in, and emulate
+			 by hand. Technically we need to emulate:
+			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
+		*/
+		fault_address = regs->ior;
+		fault_space = regs->isr;
+		break;
+
+	case 18:
+		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
+		/* Check for unaligned access */
+		if (check_unaligned(regs)) {
+			handle_unaligned(regs);
+			return;
+		}
+		/* Fall Through */
+	case 26: 
+		/* PCXL: Data memory access rights trap */
+		fault_address = regs->ior;
+		fault_space   = regs->isr;
+		break;
+
+	case 19:
+		/* Data memory break trap */
+		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
+		/* fall thru */
+	case 21:
+		/* Page reference trap */
+		handle_gdb_break(regs, TRAP_HWBKPT);
+		return;
+
+	case 25:
+		/* Taken branch trap */
+		regs->gr[0] &= ~PSW_T;
+		if (user_space(regs))
+			handle_gdb_break(regs, TRAP_BRANCH);
+		/* else this must be the start of a syscall - just let it
+		 * run.
+		 */
+		return;
+
+	case  7:  
+		/* Instruction access rights */
+		/* PCXL: Instruction memory protection trap */
+
+		/*
+		 * This could be caused by either: 1) a process attempting
+		 * to execute within a vma that does not have execute
+		 * permission, or 2) an access rights violation caused by a
+		 * flush only translation set up by ptep_get_and_clear().
+		 * So we check the vma permissions to differentiate the two.
+		 * If the vma indicates we have execute permission, then
+		 * the cause is the latter one. In this case, we need to
+		 * call do_page_fault() to fix the problem.
+		 */
+
+		if (user_mode(regs)) {
+			struct vm_area_struct *vma;
+
+			down_read(&current->mm->mmap_sem);
+			vma = find_vma(current->mm,regs->iaoq[0]);
+			if (vma && (regs->iaoq[0] >= vma->vm_start)
+				&& (vma->vm_flags & VM_EXEC)) {
+
+				fault_address = regs->iaoq[0];
+				fault_space = regs->iasq[0];
+
+				up_read(&current->mm->mmap_sem);
+				break; /* call do_page_fault() */
+			}
+			up_read(&current->mm->mmap_sem);
+		}
+		/* Fall Through */
+	case 27: 
+		/* Data memory protection ID trap */
+		if (code == 27 && !user_mode(regs) &&
+			fixup_exception(regs))
+			return;
+
+		die_if_kernel("Protection id trap", regs, code);
+		si.si_code = SEGV_MAPERR;
+		si.si_signo = SIGSEGV;
+		si.si_errno = 0;
+		if (code == 7)
+		    si.si_addr = (void __user *) regs->iaoq[0];
+		else
+		    si.si_addr = (void __user *) regs->ior;
+		force_sig_info(SIGSEGV, &si, current);
+		return;
+
+	case 28: 
+		/* Unaligned data reference trap */
+		handle_unaligned(regs);
+		return;
+
+	default:
+		if (user_mode(regs)) {
+			parisc_printk_ratelimited(0, regs, KERN_DEBUG
+				"handle_interruption() pid=%d command='%s'\n",
+				task_pid_nr(current), current->comm);
+			/* SIGBUS, for lack of a better one. */
+			si.si_signo = SIGBUS;
+			si.si_code = BUS_OBJERR;
+			si.si_errno = 0;
+			si.si_addr = (void __user *) regs->ior;
+			force_sig_info(SIGBUS, &si, current);
+			return;
+		}
+		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+		
+		parisc_terminate("Unexpected interruption", regs, code, 0);
+		/* NOT REACHED */
+	}
+
+	if (user_mode(regs)) {
+	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
+		parisc_printk_ratelimited(0, regs, KERN_DEBUG
+				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+				code, fault_space,
+				task_pid_nr(current), current->comm);
+		si.si_signo = SIGSEGV;
+		si.si_errno = 0;
+		si.si_code = SEGV_MAPERR;
+		si.si_addr = (void __user *) regs->ior;
+		force_sig_info(SIGSEGV, &si, current);
+		return;
+	    }
+	}
+	else {
+
+	    /*
+	     * The kernel should never fault on its own address space,
+	     * unless pagefault_disable() was called before.
+	     */
+
+	    if (fault_space == 0 && !faulthandler_disabled())
+	    {
+		/* Clean up and return if in exception table. */
+		if (fixup_exception(regs))
+			return;
+		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+		parisc_terminate("Kernel Fault", regs, code, fault_address);
+	    }
+	}
+
+	do_page_fault(regs, code, fault_address);
+}
+
+
+void __init initialize_ivt(const void *iva)
+{
+	extern u32 os_hpmc_size;
+	extern const u32 os_hpmc[];
+
+	int i;
+	u32 check = 0;
+	u32 *ivap;
+	u32 *hpmcp;
+	u32 length, instr;
+
+	if (strcmp((const char *)iva, "cows can fly"))
+		panic("IVT invalid");
+
+	ivap = (u32 *)iva;
+
+	for (i = 0; i < 8; i++)
+	    *ivap++ = 0;
+
+	/*
+	 * Use PDC_INSTR firmware function to get instruction that invokes
+	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
+	 * the PA 1.1 Firmware Architecture document.
+	 */
+	if (pdc_instr(&instr) == PDC_OK)
+		ivap[0] = instr;
+
+	/* Setup IVA and compute checksum for HPMC handler */
+	ivap[6] = (u32)__pa(os_hpmc);
+	length = os_hpmc_size;
+	ivap[7] = length;
+
+	hpmcp = (u32 *)os_hpmc;
+
+	for (i=0; i<length/4; i++)
+	    check += *hpmcp++;
+
+	for (i=0; i<8; i++)
+	    check += ivap[i];
+
+	ivap[5] = -check;
+}
+	
+
+/* early_trap_init() is called before we set up kernel mappings and
+ * write-protect the kernel */
+void  __init early_trap_init(void)
+{
+	extern const void fault_vector_20;
+
+#ifndef CONFIG_64BIT
+	extern const void fault_vector_11;
+	initialize_ivt(&fault_vector_11);
+#endif
+
+	initialize_ivt(&fault_vector_20);
+}
+
+void __init trap_init(void)
+{
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/unaligned.c b/src/kernel/linux/v4.14/arch/parisc/kernel/unaligned.c
new file mode 100644
index 0000000..e36f7b7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/unaligned.c
@@ -0,0 +1,765 @@
+/*
+ *    Unaligned memory access handler
+ *
+ *    Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ *    Significantly tweaked by LaMont Jones <lamont@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/signal.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <asm/hardirq.h>
+#include <asm/traps.h>
+
+/* #define DEBUG_UNALIGNED 1 */
+
+#ifdef DEBUG_UNALIGNED
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#else
+#define DPRINTF(fmt, args...)
+#endif
+
+#ifdef CONFIG_64BIT
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+
+#define FIXUP_BRANCH(lbl) \
+	"\tldil L%%" #lbl ", %%r1\n"			\
+	"\tldo R%%" #lbl "(%%r1), %%r1\n"		\
+	"\tbv,n %%r0(%%r1)\n"
+/* If you use FIXUP_BRANCH, then you must list this clobber */
+#define FIXUP_BRANCH_CLOBBER "r1"
+
+/* 1111 1100 0000 0000 0001 0011 1100 0000 */
+#define OPCODE1(a,b,c)	((a)<<26|(b)<<12|(c)<<6) 
+#define OPCODE2(a,b)	((a)<<26|(b)<<1)
+#define OPCODE3(a,b)	((a)<<26|(b)<<2)
+#define OPCODE4(a)	((a)<<26)
+#define OPCODE1_MASK	OPCODE1(0x3f,1,0xf)
+#define OPCODE2_MASK 	OPCODE2(0x3f,1)
+#define OPCODE3_MASK	OPCODE3(0x3f,1)
+#define OPCODE4_MASK    OPCODE4(0x3f)
+
+/* skip LDB - never unaligned (index) */
+#define OPCODE_LDH_I	OPCODE1(0x03,0,0x1)
+#define OPCODE_LDW_I	OPCODE1(0x03,0,0x2)
+#define OPCODE_LDD_I	OPCODE1(0x03,0,0x3)
+#define OPCODE_LDDA_I	OPCODE1(0x03,0,0x4)
+#define OPCODE_LDCD_I	OPCODE1(0x03,0,0x5)
+#define OPCODE_LDWA_I	OPCODE1(0x03,0,0x6)
+#define OPCODE_LDCW_I	OPCODE1(0x03,0,0x7)
+/* skip LDB - never unaligned (short) */
+#define OPCODE_LDH_S	OPCODE1(0x03,1,0x1)
+#define OPCODE_LDW_S	OPCODE1(0x03,1,0x2)
+#define OPCODE_LDD_S	OPCODE1(0x03,1,0x3)
+#define OPCODE_LDDA_S	OPCODE1(0x03,1,0x4)
+#define OPCODE_LDCD_S	OPCODE1(0x03,1,0x5)
+#define OPCODE_LDWA_S	OPCODE1(0x03,1,0x6)
+#define OPCODE_LDCW_S	OPCODE1(0x03,1,0x7)
+/* skip STB - never unaligned */
+#define OPCODE_STH	OPCODE1(0x03,1,0x9)
+#define OPCODE_STW	OPCODE1(0x03,1,0xa)
+#define OPCODE_STD	OPCODE1(0x03,1,0xb)
+/* skip STBY - never unaligned */
+/* skip STDBY - never unaligned */
+#define OPCODE_STWA	OPCODE1(0x03,1,0xe)
+#define OPCODE_STDA	OPCODE1(0x03,1,0xf)
+
+#define OPCODE_FLDWX	OPCODE1(0x09,0,0x0)
+#define OPCODE_FLDWXR	OPCODE1(0x09,0,0x1)
+#define OPCODE_FSTWX	OPCODE1(0x09,0,0x8)
+#define OPCODE_FSTWXR	OPCODE1(0x09,0,0x9)
+#define OPCODE_FLDWS	OPCODE1(0x09,1,0x0)
+#define OPCODE_FLDWSR	OPCODE1(0x09,1,0x1)
+#define OPCODE_FSTWS	OPCODE1(0x09,1,0x8)
+#define OPCODE_FSTWSR	OPCODE1(0x09,1,0x9)
+#define OPCODE_FLDDX	OPCODE1(0x0b,0,0x0)
+#define OPCODE_FSTDX	OPCODE1(0x0b,0,0x8)
+#define OPCODE_FLDDS	OPCODE1(0x0b,1,0x0)
+#define OPCODE_FSTDS	OPCODE1(0x0b,1,0x8)
+
+#define OPCODE_LDD_L	OPCODE2(0x14,0)
+#define OPCODE_FLDD_L	OPCODE2(0x14,1)
+#define OPCODE_STD_L	OPCODE2(0x1c,0)
+#define OPCODE_FSTD_L	OPCODE2(0x1c,1)
+
+#define OPCODE_LDW_M	OPCODE3(0x17,1)
+#define OPCODE_FLDW_L	OPCODE3(0x17,0)
+#define OPCODE_FSTW_L	OPCODE3(0x1f,0)
+#define OPCODE_STW_M	OPCODE3(0x1f,1)
+
+#define OPCODE_LDH_L    OPCODE4(0x11)
+#define OPCODE_LDW_L    OPCODE4(0x12)
+#define OPCODE_LDWM     OPCODE4(0x13)
+#define OPCODE_STH_L    OPCODE4(0x19)
+#define OPCODE_STW_L    OPCODE4(0x1A)
+#define OPCODE_STWM     OPCODE4(0x1B)
+
+#define MAJOR_OP(i) (((i)>>26)&0x3f)
+#define R1(i) (((i)>>21)&0x1f)
+#define R2(i) (((i)>>16)&0x1f)
+#define R3(i) ((i)&0x1f)
+#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
+#define IM5_2(i) IM((i)>>16,5)
+#define IM5_3(i) IM((i),5)
+#define IM14(i) IM((i),14)
+
+#define ERR_NOTHANDLED	-1
+#define ERR_PAGEFAULT	-2
+
+int unaligned_enabled __read_mostly = 1;
+
+static int emulate_ldh(struct pt_regs *regs, int toreg)
+{
+	unsigned long saddr = regs->ior;
+	unsigned long val = 0;
+	int ret;
+
+	DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n", 
+		regs->isr, regs->ior, toreg);
+
+	__asm__ __volatile__  (
+"	mtsp	%4, %%sr1\n"
+"1:	ldbs	0(%%sr1,%3), %%r20\n"
+"2:	ldbs	1(%%sr1,%3), %0\n"
+"	depw	%%r20, 23, 24, %0\n"
+"	copy	%%r0, %1\n"
+"3:	\n"
+"	.section .fixup,\"ax\"\n"
+"4:	ldi	-2, %1\n"
+	FIXUP_BRANCH(3b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
+	: "=r" (val), "=r" (ret)
+	: "0" (val), "r" (saddr), "r" (regs->isr)
+	: "r20", FIXUP_BRANCH_CLOBBER );
+
+	DPRINTF("val = 0x" RFMT "\n", val);
+
+	if (toreg)
+		regs->gr[toreg] = val;
+
+	return ret;
+}
+
+static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+{
+	unsigned long saddr = regs->ior;
+	unsigned long val = 0;
+	int ret;
+
+	DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n", 
+		regs->isr, regs->ior, toreg);
+
+	__asm__ __volatile__  (
+"	zdep	%3,28,2,%%r19\n"		/* r19=(ofs&3)*8 */
+"	mtsp	%4, %%sr1\n"
+"	depw	%%r0,31,2,%3\n"
+"1:	ldw	0(%%sr1,%3),%0\n"
+"2:	ldw	4(%%sr1,%3),%%r20\n"
+"	subi	32,%%r19,%%r19\n"
+"	mtctl	%%r19,11\n"
+"	vshd	%0,%%r20,%0\n"
+"	copy	%%r0, %1\n"
+"3:	\n"
+"	.section .fixup,\"ax\"\n"
+"4:	ldi	-2, %1\n"
+	FIXUP_BRANCH(3b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
+	: "=r" (val), "=r" (ret)
+	: "0" (val), "r" (saddr), "r" (regs->isr)
+	: "r19", "r20", FIXUP_BRANCH_CLOBBER );
+
+	DPRINTF("val = 0x" RFMT "\n", val);
+
+	if (flop)
+		((__u32*)(regs->fr))[toreg] = val;
+	else if (toreg)
+		regs->gr[toreg] = val;
+
+	return ret;
+}
+static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+{
+	unsigned long saddr = regs->ior;
+	__u64 val = 0;
+	int ret;
+
+	DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n", 
+		regs->isr, regs->ior, toreg);
+#ifdef CONFIG_PA20
+
+#ifndef CONFIG_64BIT
+	if (!flop)
+		return -1;
+#endif
+	__asm__ __volatile__  (
+"	depd,z	%3,60,3,%%r19\n"		/* r19=(ofs&7)*8 */
+"	mtsp	%4, %%sr1\n"
+"	depd	%%r0,63,3,%3\n"
+"1:	ldd	0(%%sr1,%3),%0\n"
+"2:	ldd	8(%%sr1,%3),%%r20\n"
+"	subi	64,%%r19,%%r19\n"
+"	mtsar	%%r19\n"
+"	shrpd	%0,%%r20,%%sar,%0\n"
+"	copy	%%r0, %1\n"
+"3:	\n"
+"	.section .fixup,\"ax\"\n"
+"4:	ldi	-2, %1\n"
+	FIXUP_BRANCH(3b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+	: "=r" (val), "=r" (ret)
+	: "0" (val), "r" (saddr), "r" (regs->isr)
+	: "r19", "r20", FIXUP_BRANCH_CLOBBER );
+#else
+    {
+	unsigned long valh=0,vall=0;
+	__asm__ __volatile__  (
+"	zdep	%5,29,2,%%r19\n"		/* r19=(ofs&3)*8 */
+"	mtsp	%6, %%sr1\n"
+"	dep	%%r0,31,2,%5\n"
+"1:	ldw	0(%%sr1,%5),%0\n"
+"2:	ldw	4(%%sr1,%5),%1\n"
+"3:	ldw	8(%%sr1,%5),%%r20\n"
+"	subi	32,%%r19,%%r19\n"
+"	mtsar	%%r19\n"
+"	vshd	%0,%1,%0\n"
+"	vshd	%1,%%r20,%1\n"
+"	copy	%%r0, %2\n"
+"4:	\n"
+"	.section .fixup,\"ax\"\n"
+"5:	ldi	-2, %2\n"
+	FIXUP_BRANCH(4b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,5b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,5b)
+	ASM_EXCEPTIONTABLE_ENTRY(3b,5b)
+	: "=r" (valh), "=r" (vall), "=r" (ret)
+	: "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
+	: "r19", "r20", FIXUP_BRANCH_CLOBBER );
+	val=((__u64)valh<<32)|(__u64)vall;
+    }
+#endif
+
+	DPRINTF("val = 0x%llx\n", val);
+
+	if (flop)
+		regs->fr[toreg] = val;
+	else if (toreg)
+		regs->gr[toreg] = val;
+
+	return ret;
+}
+
+static int emulate_sth(struct pt_regs *regs, int frreg)
+{
+	unsigned long val = regs->gr[frreg];
+	int ret;
+
+	if (!frreg)
+		val = 0;
+
+	DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg, 
+		val, regs->isr, regs->ior);
+
+	__asm__ __volatile__ (
+"	mtsp %3, %%sr1\n"
+"	extrw,u %1, 23, 8, %%r19\n"
+"1:	stb %1, 1(%%sr1, %2)\n"
+"2:	stb %%r19, 0(%%sr1, %2)\n"
+"	copy	%%r0, %0\n"
+"3:	\n"
+"	.section .fixup,\"ax\"\n"
+"4:	ldi	-2, %0\n"
+	FIXUP_BRANCH(3b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+	: "=r" (ret)
+	: "r" (val), "r" (regs->ior), "r" (regs->isr)
+	: "r19", FIXUP_BRANCH_CLOBBER );
+
+	return ret;
+}
+
+static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
+{
+	unsigned long val;
+	int ret;
+
+	if (flop)
+		val = ((__u32*)(regs->fr))[frreg];
+	else if (frreg)
+		val = regs->gr[frreg];
+	else
+		val = 0;
+
+	DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg, 
+		val, regs->isr, regs->ior);
+
+
+	__asm__ __volatile__ (
+"	mtsp %3, %%sr1\n"
+"	zdep	%2, 28, 2, %%r19\n"
+"	dep	%%r0, 31, 2, %2\n"
+"	mtsar	%%r19\n"
+"	depwi,z	-2, %%sar, 32, %%r19\n"
+"1:	ldw	0(%%sr1,%2),%%r20\n"
+"2:	ldw	4(%%sr1,%2),%%r21\n"
+"	vshd	%%r0, %1, %%r22\n"
+"	vshd	%1, %%r0, %%r1\n"
+"	and	%%r20, %%r19, %%r20\n"
+"	andcm	%%r21, %%r19, %%r21\n"
+"	or	%%r22, %%r20, %%r20\n"
+"	or	%%r1, %%r21, %%r21\n"
+"	stw	%%r20,0(%%sr1,%2)\n"
+"	stw	%%r21,4(%%sr1,%2)\n"
+"	copy	%%r0, %0\n"
+"3:	\n"
+"	.section .fixup,\"ax\"\n"
+"4:	ldi	-2, %0\n"
+	FIXUP_BRANCH(3b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
+	: "=r" (ret)
+	: "r" (val), "r" (regs->ior), "r" (regs->isr)
+	: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
+
+	return 0;
+}
+static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+{
+	__u64 val;
+	int ret;
+
+	if (flop)
+		val = regs->fr[frreg];
+	else if (frreg)
+		val = regs->gr[frreg];
+	else
+		val = 0;
+
+	DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg, 
+		val,  regs->isr, regs->ior);
+
+#ifdef CONFIG_PA20
+#ifndef CONFIG_64BIT
+	if (!flop)
+		return -1;
+#endif
+	__asm__ __volatile__ (
+"	mtsp %3, %%sr1\n"
+"	depd,z	%2, 60, 3, %%r19\n"
+"	depd	%%r0, 63, 3, %2\n"
+"	mtsar	%%r19\n"
+"	depdi,z	-2, %%sar, 64, %%r19\n"
+"1:	ldd	0(%%sr1,%2),%%r20\n"
+"2:	ldd	8(%%sr1,%2),%%r21\n"
+"	shrpd	%%r0, %1, %%sar, %%r22\n"
+"	shrpd	%1, %%r0, %%sar, %%r1\n"
+"	and	%%r20, %%r19, %%r20\n"
+"	andcm	%%r21, %%r19, %%r21\n"
+"	or	%%r22, %%r20, %%r20\n"
+"	or	%%r1, %%r21, %%r21\n"
+"3:	std	%%r20,0(%%sr1,%2)\n"
+"4:	std	%%r21,8(%%sr1,%2)\n"
+"	copy	%%r0, %0\n"
+"5:	\n"
+"	.section .fixup,\"ax\"\n"
+"6:	ldi	-2, %0\n"
+	FIXUP_BRANCH(5b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,6b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,6b)
+	ASM_EXCEPTIONTABLE_ENTRY(3b,6b)
+	ASM_EXCEPTIONTABLE_ENTRY(4b,6b)
+	: "=r" (ret)
+	: "r" (val), "r" (regs->ior), "r" (regs->isr)
+	: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
+#else
+    {
+	unsigned long valh=(val>>32),vall=(val&0xffffffffl);
+	__asm__ __volatile__ (
+"	mtsp	%4, %%sr1\n"
+"	zdep	%2, 29, 2, %%r19\n"
+"	dep	%%r0, 31, 2, %2\n"
+"	mtsar	%%r19\n"
+"	zvdepi	-2, 32, %%r19\n"
+"1:	ldw	0(%%sr1,%3),%%r20\n"
+"2:	ldw	8(%%sr1,%3),%%r21\n"
+"	vshd	%1, %2, %%r1\n"
+"	vshd	%%r0, %1, %1\n"
+"	vshd	%2, %%r0, %2\n"
+"	and	%%r20, %%r19, %%r20\n"
+"	andcm	%%r21, %%r19, %%r21\n"
+"	or	%1, %%r20, %1\n"
+"	or	%2, %%r21, %2\n"
+"3:	stw	%1,0(%%sr1,%1)\n"
+"4:	stw	%%r1,4(%%sr1,%3)\n"
+"5:	stw	%2,8(%%sr1,%3)\n"
+"	copy	%%r0, %0\n"
+"6:	\n"
+"	.section .fixup,\"ax\"\n"
+"7:	ldi	-2, %0\n"
+	FIXUP_BRANCH(6b)
+"	.previous\n"
+	ASM_EXCEPTIONTABLE_ENTRY(1b,7b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,7b)
+	ASM_EXCEPTIONTABLE_ENTRY(3b,7b)
+	ASM_EXCEPTIONTABLE_ENTRY(4b,7b)
+	ASM_EXCEPTIONTABLE_ENTRY(5b,7b)
+	: "=r" (ret)
+	: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
+	: "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER );
+    }
+#endif
+
+	return ret;
+}
+
+void handle_unaligned(struct pt_regs *regs)
+{
+	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+	unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
+	int modify = 0;
+	int ret = ERR_NOTHANDLED;
+	struct siginfo si;
+	register int flop=0;	/* true if this is a flop */
+
+	__inc_irq_stat(irq_unaligned_count);
+
+	/* log a message with pacing */
+	if (user_mode(regs)) {
+		if (current->thread.flags & PARISC_UAC_SIGBUS) {
+			goto force_sigbus;
+		}
+
+		if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+			__ratelimit(&ratelimit)) {
+			char buf[256];
+			sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
+				current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
+			printk(KERN_WARNING "%s", buf);
+#ifdef DEBUG_UNALIGNED
+			show_regs(regs);
+#endif		
+		}
+
+		if (!unaligned_enabled)
+			goto force_sigbus;
+	}
+
+	/* handle modification - OK, it's ugly, see the instruction manual */
+	switch (MAJOR_OP(regs->iir))
+	{
+	case 0x03:
+	case 0x09:
+	case 0x0b:
+		if (regs->iir&0x20)
+		{
+			modify = 1;
+			if (regs->iir&0x1000)		/* short loads */
+				if (regs->iir&0x200)
+					newbase += IM5_3(regs->iir);
+				else
+					newbase += IM5_2(regs->iir);
+			else if (regs->iir&0x2000)	/* scaled indexed */
+			{
+				int shift=0;
+				switch (regs->iir & OPCODE1_MASK)
+				{
+				case OPCODE_LDH_I:
+					shift= 1; break;
+				case OPCODE_LDW_I:
+					shift= 2; break;
+				case OPCODE_LDD_I:
+				case OPCODE_LDDA_I:
+					shift= 3; break;
+				}
+				newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
+			} else				/* simple indexed */
+				newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
+		}
+		break;
+	case 0x13:
+	case 0x1b:
+		modify = 1;
+		newbase += IM14(regs->iir);
+		break;
+	case 0x14:
+	case 0x1c:
+		if (regs->iir&8)
+		{
+			modify = 1;
+			newbase += IM14(regs->iir&~0xe);
+		}
+		break;
+	case 0x16:
+	case 0x1e:
+		modify = 1;
+		newbase += IM14(regs->iir&6);
+		break;
+	case 0x17:
+	case 0x1f:
+		if (regs->iir&4)
+		{
+			modify = 1;
+			newbase += IM14(regs->iir&~4);
+		}
+		break;
+	}
+
+	/* TODO: make this cleaner... */
+	switch (regs->iir & OPCODE1_MASK)
+	{
+	case OPCODE_LDH_I:
+	case OPCODE_LDH_S:
+		ret = emulate_ldh(regs, R3(regs->iir));
+		break;
+
+	case OPCODE_LDW_I:
+	case OPCODE_LDWA_I:
+	case OPCODE_LDW_S:
+	case OPCODE_LDWA_S:
+		ret = emulate_ldw(regs, R3(regs->iir),0);
+		break;
+
+	case OPCODE_STH:
+		ret = emulate_sth(regs, R2(regs->iir));
+		break;
+
+	case OPCODE_STW:
+	case OPCODE_STWA:
+		ret = emulate_stw(regs, R2(regs->iir),0);
+		break;
+
+#ifdef CONFIG_PA20
+	case OPCODE_LDD_I:
+	case OPCODE_LDDA_I:
+	case OPCODE_LDD_S:
+	case OPCODE_LDDA_S:
+		ret = emulate_ldd(regs, R3(regs->iir),0);
+		break;
+
+	case OPCODE_STD:
+	case OPCODE_STDA:
+		ret = emulate_std(regs, R2(regs->iir),0);
+		break;
+#endif
+
+	case OPCODE_FLDWX:
+	case OPCODE_FLDWS:
+	case OPCODE_FLDWXR:
+	case OPCODE_FLDWSR:
+		flop=1;
+		ret = emulate_ldw(regs,FR3(regs->iir),1);
+		break;
+
+	case OPCODE_FLDDX:
+	case OPCODE_FLDDS:
+		flop=1;
+		ret = emulate_ldd(regs,R3(regs->iir),1);
+		break;
+
+	case OPCODE_FSTWX:
+	case OPCODE_FSTWS:
+	case OPCODE_FSTWXR:
+	case OPCODE_FSTWSR:
+		flop=1;
+		ret = emulate_stw(regs,FR3(regs->iir),1);
+		break;
+
+	case OPCODE_FSTDX:
+	case OPCODE_FSTDS:
+		flop=1;
+		ret = emulate_std(regs,R3(regs->iir),1);
+		break;
+
+	case OPCODE_LDCD_I:
+	case OPCODE_LDCW_I:
+	case OPCODE_LDCD_S:
+	case OPCODE_LDCW_S:
+		ret = ERR_NOTHANDLED;	/* "undefined", but lets kill them. */
+		break;
+	}
+#ifdef CONFIG_PA20
+	switch (regs->iir & OPCODE2_MASK)
+	{
+	case OPCODE_FLDD_L:
+		flop=1;
+		ret = emulate_ldd(regs,R2(regs->iir),1);
+		break;
+	case OPCODE_FSTD_L:
+		flop=1;
+		ret = emulate_std(regs, R2(regs->iir),1);
+		break;
+	case OPCODE_LDD_L:
+		ret = emulate_ldd(regs, R2(regs->iir),0);
+		break;
+	case OPCODE_STD_L:
+		ret = emulate_std(regs, R2(regs->iir),0);
+		break;
+	}
+#endif
+	switch (regs->iir & OPCODE3_MASK)
+	{
+	case OPCODE_FLDW_L:
+		flop=1;
+		ret = emulate_ldw(regs, R2(regs->iir),0);
+		break;
+	case OPCODE_LDW_M:
+		ret = emulate_ldw(regs, R2(regs->iir),1);
+		break;
+
+	case OPCODE_FSTW_L:
+		flop=1;
+		ret = emulate_stw(regs, R2(regs->iir),1);
+		break;
+	case OPCODE_STW_M:
+		ret = emulate_stw(regs, R2(regs->iir),0);
+		break;
+	}
+	switch (regs->iir & OPCODE4_MASK)
+	{
+	case OPCODE_LDH_L:
+		ret = emulate_ldh(regs, R2(regs->iir));
+		break;
+	case OPCODE_LDW_L:
+	case OPCODE_LDWM:
+		ret = emulate_ldw(regs, R2(regs->iir),0);
+		break;
+	case OPCODE_STH_L:
+		ret = emulate_sth(regs, R2(regs->iir));
+		break;
+	case OPCODE_STW_L:
+	case OPCODE_STWM:
+		ret = emulate_stw(regs, R2(regs->iir),0);
+		break;
+	}
+
+	if (ret == 0 && modify && R1(regs->iir))
+		regs->gr[R1(regs->iir)] = newbase;
+
+
+	if (ret == ERR_NOTHANDLED)
+		printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
+
+	DPRINTF("ret = %d\n", ret);
+
+	if (ret)
+	{
+		/*
+		 * The unaligned handler failed.
+		 * If we were called by __get_user() or __put_user() jump
+		 * to it's exception fixup handler instead of crashing.
+		 */
+		if (!user_mode(regs) && fixup_exception(regs))
+			return;
+
+		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+		die_if_kernel("Unaligned data reference", regs, 28);
+
+		if (ret == ERR_PAGEFAULT)
+		{
+			si.si_signo = SIGSEGV;
+			si.si_errno = 0;
+			si.si_code = SEGV_MAPERR;
+			si.si_addr = (void __user *)regs->ior;
+			force_sig_info(SIGSEGV, &si, current);
+		}
+		else
+		{
+force_sigbus:
+			/* couldn't handle it ... */
+			si.si_signo = SIGBUS;
+			si.si_errno = 0;
+			si.si_code = BUS_ADRALN;
+			si.si_addr = (void __user *)regs->ior;
+			force_sig_info(SIGBUS, &si, current);
+		}
+		
+		return;
+	}
+
+	/* else we handled it, let life go on. */
+	regs->gr[0]|=PSW_N;
+}
+
+/*
+ * NB: check_unaligned() is only used for PCXS processors right
+ * now, so we only check for PA1.1 encodings at this point.
+ */
+
+int
+check_unaligned(struct pt_regs *regs)
+{
+	unsigned long align_mask;
+
+	/* Get alignment mask */
+
+	align_mask = 0UL;
+	switch (regs->iir & OPCODE1_MASK) {
+
+	case OPCODE_LDH_I:
+	case OPCODE_LDH_S:
+	case OPCODE_STH:
+		align_mask = 1UL;
+		break;
+
+	case OPCODE_LDW_I:
+	case OPCODE_LDWA_I:
+	case OPCODE_LDW_S:
+	case OPCODE_LDWA_S:
+	case OPCODE_STW:
+	case OPCODE_STWA:
+		align_mask = 3UL;
+		break;
+
+	default:
+		switch (regs->iir & OPCODE4_MASK) {
+		case OPCODE_LDH_L:
+		case OPCODE_STH_L:
+			align_mask = 1UL;
+			break;
+		case OPCODE_LDW_L:
+		case OPCODE_LDWM:
+		case OPCODE_STW_L:
+		case OPCODE_STWM:
+			align_mask = 3UL;
+			break;
+		}
+		break;
+	}
+
+	return (int)(regs->ior & align_mask);
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/unwind.c b/src/kernel/linux/v4.14/arch/parisc/kernel/unwind.c
new file mode 100644
index 0000000..5a65798
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/unwind.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel unwinding support
+ *
+ * (c) 2002-2004 Randolph Chung <tausq@debian.org>
+ *
+ * Derived partially from the IA64 implementation. The PA-RISC
+ * Runtime Architecture Document is also a useful reference to
+ * understand what is happening here
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+#include <linux/sort.h>
+#include <linux/sched.h>
+
+#include <linux/uaccess.h>
+#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
+
+#include <asm/unwind.h>
+
+/* #define DEBUG 1 */
+#ifdef DEBUG
+#define dbg(x...) printk(x)
+#else
+#define dbg(x...)
+#endif
+
+#define KERNEL_START (KERNEL_BINARY_TEXT_START)
+
+extern struct unwind_table_entry __start___unwind[];
+extern struct unwind_table_entry __stop___unwind[];
+
+static DEFINE_SPINLOCK(unwind_lock);
+/*
+ * the kernel unwind block is not dynamically allocated so that
+ * we can call unwind_init as early in the bootup process as 
+ * possible (before the slab allocator is initialized)
+ */
+static struct unwind_table kernel_unwind_table __read_mostly;
+static LIST_HEAD(unwind_tables);
+
+static inline const struct unwind_table_entry *
+find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
+{
+	const struct unwind_table_entry *e = NULL;
+	unsigned long lo, hi, mid;
+
+	lo = 0; 
+	hi = table->length - 1; 
+	
+	while (lo <= hi) {
+		mid = (hi - lo) / 2 + lo;
+		e = &table->table[mid];
+		if (addr < e->region_start)
+			hi = mid - 1;
+		else if (addr > e->region_end)
+			lo = mid + 1;
+		else
+			return e;
+	}
+
+	return NULL;
+}
+
+static const struct unwind_table_entry *
+find_unwind_entry(unsigned long addr)
+{
+	struct unwind_table *table;
+	const struct unwind_table_entry *e = NULL;
+
+	if (addr >= kernel_unwind_table.start && 
+	    addr <= kernel_unwind_table.end)
+		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&unwind_lock, flags);
+		list_for_each_entry(table, &unwind_tables, list) {
+			if (addr >= table->start && 
+			    addr <= table->end)
+				e = find_unwind_entry_in_table(table, addr);
+			if (e) {
+				/* Move-to-front to exploit common traces */
+				list_move(&table->list, &unwind_tables);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&unwind_lock, flags);
+	}
+
+	return e;
+}
+
+static void
+unwind_table_init(struct unwind_table *table, const char *name,
+		  unsigned long base_addr, unsigned long gp,
+		  void *table_start, void *table_end)
+{
+	struct unwind_table_entry *start = table_start;
+	struct unwind_table_entry *end = 
+		(struct unwind_table_entry *)table_end - 1;
+
+	table->name = name;
+	table->base_addr = base_addr;
+	table->gp = gp;
+	table->start = base_addr + start->region_start;
+	table->end = base_addr + end->region_end;
+	table->table = (struct unwind_table_entry *)table_start;
+	table->length = end - start + 1;
+	INIT_LIST_HEAD(&table->list);
+
+	for (; start <= end; start++) {
+		if (start < end && 
+		    start->region_end > (start+1)->region_start) {
+			printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+		}
+
+		start->region_start += base_addr;
+		start->region_end += base_addr;
+	}
+}
+
+static int cmp_unwind_table_entry(const void *a, const void *b)
+{
+	return ((const struct unwind_table_entry *)a)->region_start
+	     - ((const struct unwind_table_entry *)b)->region_start;
+}
+
+static void
+unwind_table_sort(struct unwind_table_entry *start,
+		  struct unwind_table_entry *finish)
+{
+	sort(start, finish - start, sizeof(struct unwind_table_entry),
+	     cmp_unwind_table_entry, NULL);
+}
+
+struct unwind_table *
+unwind_table_add(const char *name, unsigned long base_addr, 
+		 unsigned long gp,
+                 void *start, void *end)
+{
+	struct unwind_table *table;
+	unsigned long flags;
+	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
+	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
+
+	unwind_table_sort(s, e);
+
+	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
+	if (table == NULL)
+		return NULL;
+	unwind_table_init(table, name, base_addr, gp, start, end);
+	spin_lock_irqsave(&unwind_lock, flags);
+	list_add_tail(&table->list, &unwind_tables);
+	spin_unlock_irqrestore(&unwind_lock, flags);
+
+	return table;
+}
+
+void unwind_table_remove(struct unwind_table *table)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&unwind_lock, flags);
+	list_del(&table->list);
+	spin_unlock_irqrestore(&unwind_lock, flags);
+
+	kfree(table);
+}
+
+/* Called from setup_arch to import the kernel unwind info */
+int __init unwind_init(void)
+{
+	long start, stop;
+	register unsigned long gp __asm__ ("r27");
+
+	start = (long)&__start___unwind[0];
+	stop = (long)&__stop___unwind[0];
+
+	printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+	    start, stop,
+	    (stop - start) / sizeof(struct unwind_table_entry));
+
+	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
+			  gp, 
+			  &__start___unwind[0], &__stop___unwind[0]);
+#if 0
+	{
+		int i;
+		for (i = 0; i < 10; i++)
+		{
+			printk("region 0x%x-0x%x\n", 
+				__start___unwind[i].region_start, 
+				__start___unwind[i].region_end);
+		}
+	}
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_64BIT
+#define get_func_addr(fptr) fptr[2]
+#else
+#define get_func_addr(fptr) fptr[0]
+#endif
+
+static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
+{
+	extern void handle_interruption(int, struct pt_regs *);
+	static unsigned long *hi = (unsigned long *)&handle_interruption;
+
+	if (pc == get_func_addr(hi)) {
+		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
+		dbg("Unwinding through handle_interruption()\n");
+		info->prev_sp = regs->gr[30];
+		info->prev_ip = regs->iaoq[0];
+
+		return 1;
+	}
+
+	return 0;
+}
+
+static void unwind_frame_regs(struct unwind_frame_info *info)
+{
+	const struct unwind_table_entry *e;
+	unsigned long npc;
+	unsigned int insn;
+	long frame_size = 0;
+	int looking_for_rp, rpoffset = 0;
+
+	e = find_unwind_entry(info->ip);
+	if (e == NULL) {
+		unsigned long sp;
+
+		dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
+
+#ifdef CONFIG_KALLSYMS
+		/* Handle some frequent special cases.... */
+		{
+			char symname[KSYM_NAME_LEN];
+			char *modname;
+
+			kallsyms_lookup(info->ip, NULL, NULL, &modname,
+				symname);
+
+			dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
+
+			if (strcmp(symname, "_switch_to_ret") == 0) {
+				info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+				info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+				dbg("_switch_to_ret @ %lx - setting "
+				    "prev_sp=%lx prev_ip=%lx\n", 
+				    info->ip, info->prev_sp, 
+				    info->prev_ip);
+				return;
+			} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
+				   strcmp(symname, "syscall_exit") == 0) {
+				info->prev_ip = info->prev_sp = 0;
+				return;
+			}
+		}
+#endif
+
+		/* Since we are doing the unwinding blind, we don't know if
+		   we are adjusting the stack correctly or extracting the rp
+		   correctly. The rp is checked to see if it belongs to the
+		   kernel text section, if not we assume we don't have a 
+		   correct stack frame and we continue to unwind the stack.
+		   This is not quite correct, and will fail for loadable
+		   modules. */
+		sp = info->sp & ~63;
+		do {
+			unsigned long tmp;
+
+			info->prev_sp = sp - 64;
+			info->prev_ip = 0;
+
+			/* The stack is at the end inside the thread_union
+			 * struct. If we reach data, we have reached the
+			 * beginning of the stack and should stop unwinding. */
+			if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
+			    info->prev_sp < ((unsigned long) task_thread_info(info->t)
+						+ THREAD_SZ_ALGN)) {
+				info->prev_sp = 0;
+				break;
+			}
+
+			if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) 
+				break;
+			info->prev_ip = tmp;
+			sp = info->prev_sp;
+		} while (!kernel_text_address(info->prev_ip));
+
+		info->rp = 0;
+
+		dbg("analyzing func @ %lx with no unwind info, setting "
+		    "prev_sp=%lx prev_ip=%lx\n", info->ip, 
+		    info->prev_sp, info->prev_ip);
+	} else {
+		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
+		    "Save_RP = %d, Millicode = %d size = %u\n", 
+		    e->region_start, e->region_end, e->Save_SP, e->Save_RP, 
+		    e->Millicode, e->Total_frame_size);
+
+		looking_for_rp = e->Save_RP;
+
+		for (npc = e->region_start; 
+		     (frame_size < (e->Total_frame_size << 3) || 
+		      looking_for_rp) && 
+		     npc < info->ip; 
+		     npc += 4) {
+
+			insn = *(unsigned int *)npc;
+
+			if ((insn & 0xffffc001) == 0x37de0000 ||
+			    (insn & 0xffe00001) == 0x6fc00000) {
+				/* ldo X(sp), sp, or stwm X,D(sp) */
+				frame_size += (insn & 0x3fff) >> 1;
+				dbg("analyzing func @ %lx, insn=%08x @ "
+				    "%lx, frame_size = %ld\n", info->ip,
+				    insn, npc, frame_size);
+			} else if ((insn & 0xffe00009) == 0x73c00008) {
+				/* std,ma X,D(sp) */
+				frame_size += ((insn >> 4) & 0x3ff) << 3;
+				dbg("analyzing func @ %lx, insn=%08x @ "
+				    "%lx, frame_size = %ld\n", info->ip,
+				    insn, npc, frame_size);
+			} else if (insn == 0x6bc23fd9) { 
+				/* stw rp,-20(sp) */
+				rpoffset = 20;
+				looking_for_rp = 0;
+				dbg("analyzing func @ %lx, insn=stw rp,"
+				    "-20(sp) @ %lx\n", info->ip, npc);
+			} else if (insn == 0x0fc212c1) {
+				/* std rp,-16(sr0,sp) */
+				rpoffset = 16;
+				looking_for_rp = 0;
+				dbg("analyzing func @ %lx, insn=std rp,"
+				    "-16(sp) @ %lx\n", info->ip, npc);
+			}
+		}
+
+		if (frame_size > e->Total_frame_size << 3)
+			frame_size = e->Total_frame_size << 3;
+
+		if (!unwind_special(info, e->region_start, frame_size)) {
+			info->prev_sp = info->sp - frame_size;
+			if (e->Millicode)
+				info->rp = info->r31;
+			else if (rpoffset)
+				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
+			info->prev_ip = info->rp;
+			info->rp = 0;
+		}
+
+		dbg("analyzing func @ %lx, setting prev_sp=%lx "
+		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 
+		    info->prev_ip, npc);
+	}
+}
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
+		       struct pt_regs *regs)
+{
+	memset(info, 0, sizeof(struct unwind_frame_info));
+	info->t = t;
+	info->sp = regs->gr[30];
+	info->ip = regs->iaoq[0];
+	info->rp = regs->gr[2];
+	info->r31 = regs->gr[31];
+
+	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 
+	    t ? (int)t->pid : -1, info->sp, info->ip);
+}
+
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
+{
+	struct pt_regs *r = &t->thread.regs;
+	struct pt_regs *r2;
+
+	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
+	if (!r2)
+		return;
+	*r2 = *r;
+	r2->gr[30] = r->ksp;
+	r2->iaoq[0] = r->kpc;
+	unwind_frame_init(info, t, r2);
+	kfree(r2);
+}
+
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
+{
+	unwind_frame_init(info, current, regs);
+}
+
+int unwind_once(struct unwind_frame_info *next_frame)
+{
+	unwind_frame_regs(next_frame);
+
+	if (next_frame->prev_sp == 0 ||
+	    next_frame->prev_ip == 0)
+		return -1;
+
+	next_frame->sp = next_frame->prev_sp;
+	next_frame->ip = next_frame->prev_ip;
+	next_frame->prev_sp = 0;
+	next_frame->prev_ip = 0;
+
+	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 
+	    next_frame->t ? (int)next_frame->t->pid : -1, 
+	    next_frame->sp, next_frame->ip);
+
+	return 0;
+}
+
+int unwind_to_user(struct unwind_frame_info *info)
+{
+	int ret;
+	
+	do {
+		ret = unwind_once(info);
+	} while (!ret && !(info->ip & 3));
+
+	return ret;
+}
+
+unsigned long return_address(unsigned int level)
+{
+	struct unwind_frame_info info;
+	struct pt_regs r;
+	unsigned long sp;
+
+	/* initialize unwind info */
+	asm volatile ("copy %%r30, %0" : "=r"(sp));
+	memset(&r, 0, sizeof(struct pt_regs));
+	r.iaoq[0] = (unsigned long) current_text_addr();
+	r.gr[2] = (unsigned long) __builtin_return_address(0);
+	r.gr[30] = sp;
+	unwind_frame_init(&info, current, &r);
+
+	/* unwind stack */
+	++level;
+	do {
+		if (unwind_once(&info) < 0 || info.ip == 0)
+			return 0;
+		if (!kernel_text_address(info.ip))
+			return 0;
+	} while (info.ip && level--);
+
+	return info.ip;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/kernel/vmlinux.lds.S b/src/kernel/linux/v4.14/arch/parisc/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..159a2ec
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/kernel/vmlinux.lds.S
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*    Kernel link layout for various "sections"
+ *
+ *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ *    Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
+ *    Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
+ */
+
+/*
+ * Put page table entries (swapper_pg_dir) as the first thing in .bss. This
+ * will ensure that it has .bss alignment (PAGE_SIZE).
+ */
+#define BSS_FIRST_SECTIONS	*(.data..vm0.pmd) \
+				*(.data..vm0.pgd) \
+				*(.data..vm0.pte)
+
+#include <asm-generic/vmlinux.lds.h>
+
+/* needed for the processor specific cache alignment size */	
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+	
+/* ld script to make hppa Linux kernel */
+#ifndef CONFIG_64BIT
+OUTPUT_FORMAT("elf32-hppa-linux")
+OUTPUT_ARCH(hppa)
+#else
+OUTPUT_FORMAT("elf64-hppa-linux")
+OUTPUT_ARCH(hppa:hppa2.0w)
+#endif
+
+ENTRY(parisc_kernel_start)
+#ifndef CONFIG_64BIT
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+SECTIONS
+{
+	. = KERNEL_BINARY_TEXT_START;
+
+	__init_begin = .;
+	HEAD_TEXT_SECTION
+	INIT_TEXT_SECTION(8)
+
+	. = ALIGN(PAGE_SIZE);
+	INIT_DATA_SECTION(PAGE_SIZE)
+	/* we have to discard exit text and such at runtime, not link time */
+	.exit.text :
+	{
+		EXIT_TEXT
+	}
+	.exit.data :
+	{
+		EXIT_DATA
+	}
+	PERCPU_SECTION(8)
+	. = ALIGN(HUGEPAGE_SIZE);
+	__init_end = .;
+	/* freed after init ends here */
+
+	_text = .;		/* Text and read-only data */
+	_stext = .;
+	.text ALIGN(PAGE_SIZE) : {
+		TEXT_TEXT
+		SCHED_TEXT
+		CPUIDLE_TEXT
+		LOCK_TEXT
+		KPROBES_TEXT
+		IRQENTRY_TEXT
+		SOFTIRQENTRY_TEXT
+		*(.text.do_softirq)
+		*(.text.sys_exit)
+		*(.text.do_sigaltstack)
+		*(.text.do_fork)
+		*(.text.div)
+		*($$*)			/* millicode routines */
+		*(.text.*)
+		*(.fixup)
+		*(.lock.text)		/* out-of-line lock text */
+		*(.gnu.warning)
+	}
+	. = ALIGN(PAGE_SIZE);
+	_etext = .;
+	/* End of text section */
+
+	/* Start of data section */
+	_sdata = .;
+
+	/* Architecturally we need to keep __gp below 0x1000000 and thus
+	 * in front of RO_DATA_SECTION() which stores lots of tracepoint
+	 * and ftrace symbols. */
+#ifdef CONFIG_64BIT
+	. = ALIGN(16);
+	/* Linkage tables */
+	.opd : {
+		*(.opd)
+	} PROVIDE (__gp = .);
+	.plt : {
+		*(.plt)
+	}
+	.dlt : {
+		*(.dlt)
+	}
+#endif
+
+	RO_DATA_SECTION(8)
+
+	/* RO because of BUILDTIME_EXTABLE_SORT */
+	EXCEPTION_TABLE(8)
+	NOTES
+
+	/* unwind info */
+	.PARISC.unwind : {
+		__start___unwind = .;
+		*(.PARISC.unwind)
+		__stop___unwind = .;
+	}
+
+	/* writeable */
+	/* Make sure this is page aligned so
+	 * that we can properly leave these
+	 * as writable
+	 */
+	. = ALIGN(HUGEPAGE_SIZE);
+	data_start = .;
+
+	/* Data */
+	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
+
+	/* PA-RISC locks requires 16-byte alignment */
+	. = ALIGN(16);
+	.data..lock_aligned : {
+		*(.data..lock_aligned)
+	}
+
+	/* End of data section */
+	_edata = .;
+
+	/* BSS */
+	BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
+
+	. = ALIGN(HUGEPAGE_SIZE);
+	_end = . ;
+
+	STABS_DEBUG
+	.note 0 : { *(.note) }
+
+	/* Sections to be discarded */
+	DISCARDS
+	/DISCARD/ : {
+#ifdef CONFIG_64BIT
+		/* temporary hack until binutils is fixed to not emit these
+	 	 * for static binaries
+		 */
+		*(.interp)
+		*(.dynsym)
+		*(.dynstr)
+		*(.dynamic)
+		*(.hash)
+		*(.gnu.hash)
+#endif
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/Makefile b/src/kernel/linux/v4.14/arch/parisc/lib/Makefile
new file mode 100644
index 0000000..f2dac4d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for parisc-specific library files
+#
+
+lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
+	   ucmpdi2.o delay.o
+
+obj-y	:= iomap.o
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/bitops.c b/src/kernel/linux/v4.14/arch/parisc/lib/bitops.c
new file mode 100644
index 0000000..2e4d1f0
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/bitops.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bitops.c: atomic operations which got too long to be inlined all over
+ *      the place.
+ * 
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SMP
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+	[0 ... (ATOMIC_HASH_SIZE-1)]  = __ARCH_SPIN_LOCK_UNLOCKED
+};
+#endif
+
+#ifdef CONFIG_64BIT
+unsigned long __xchg64(unsigned long x, unsigned long *ptr)
+{
+	unsigned long temp, flags;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = *ptr;
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return temp;
+}
+#endif
+
+unsigned long __xchg32(int x, int *ptr)
+{
+	unsigned long flags;
+	long temp;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = (long) *ptr;	/* XXX - sign extension wanted? */
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)temp;
+}
+
+
+unsigned long __xchg8(char x, char *ptr)
+{
+	unsigned long flags;
+	long temp;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	temp = (long) *ptr;	/* XXX - sign extension wanted? */
+	*ptr = x;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)temp;
+}
+
+
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
+{
+	unsigned long flags;
+	u64 prev;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return prev;
+}
+
+unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
+{
+	unsigned long flags;
+	unsigned int prev;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return (unsigned long)prev;
+}
+
+u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
+{
+	unsigned long flags;
+	u8 prev;
+
+	_atomic_spin_lock_irqsave(ptr, flags);
+	if ((prev = *ptr) == old)
+		*ptr = new;
+	_atomic_spin_unlock_irqrestore(ptr, flags);
+	return prev;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/checksum.c b/src/kernel/linux/v4.14/arch/parisc/lib/checksum.c
new file mode 100644
index 0000000..ba6384d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/checksum.c
@@ -0,0 +1,149 @@
+/*
+ * INET		An implementation of the TCP/IP protocol suite for the LINUX
+ *		operating system.  INET is implemented using the  BSD Socket
+ *		interface as the means of communication with the user level.
+ *
+ *		MIPS specific IP/TCP/UDP checksumming routines
+ *
+ * Authors:	Ralf Baechle, <ralf@waldorf-gmbh.de>
+ *		Lots of code moved from tcp.c and ip.c; see those files
+ *		for more names.
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/string.h>
+#include <linux/uaccess.h>
+
+#define addc(_t,_r)                     \
+	__asm__ __volatile__ (          \
+"       add             %0, %1, %0\n"   \
+"       addc            %0, %%r0, %0\n" \
+	: "=r"(_t)                      \
+	: "r"(_r), "0"(_t));
+
+static inline unsigned short from32to16(unsigned int x)
+{
+	/* 32 bits --> 16 bits + carry */
+	x = (x & 0xffff) + (x >> 16);
+	/* 16 bits + carry --> 16 bits including carry */
+	x = (x & 0xffff) + (x >> 16);
+	return (unsigned short)x;
+}
+
+static inline unsigned int do_csum(const unsigned char * buff, int len)
+{
+	int odd, count;
+	unsigned int result = 0;
+
+	if (len <= 0)
+		goto out;
+	odd = 1 & (unsigned long) buff;
+	if (odd) {
+		result = be16_to_cpu(*buff);
+		len--;
+		buff++;
+	}
+	count = len >> 1;		/* nr of 16-bit words.. */
+	if (count) {
+		if (2 & (unsigned long) buff) {
+			result += *(unsigned short *) buff;
+			count--;
+			len -= 2;
+			buff += 2;
+		}
+		count >>= 1;		/* nr of 32-bit words.. */
+		if (count) {
+			while (count >= 4) {
+				unsigned int r1, r2, r3, r4;
+				r1 = *(unsigned int *)(buff + 0);
+				r2 = *(unsigned int *)(buff + 4);
+				r3 = *(unsigned int *)(buff + 8);
+				r4 = *(unsigned int *)(buff + 12);
+				addc(result, r1);
+				addc(result, r2);
+				addc(result, r3);
+				addc(result, r4);
+				count -= 4;
+				buff += 16;
+			}
+			while (count) {
+				unsigned int w = *(unsigned int *) buff;
+				count--;
+				buff += 4;
+				addc(result, w);
+			}
+			result = (result & 0xffff) + (result >> 16);
+		}
+		if (len & 2) {
+			result += *(unsigned short *) buff;
+			buff += 2;
+		}
+	}
+	if (len & 1)
+		result += le16_to_cpu(*buff);
+	result = from32to16(result);
+	if (odd)
+		result = swab16(result);
+out:
+	return result;
+}
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+	unsigned int result = do_csum(buff, len);
+	addc(result, sum);
+	return (__force __wsum)from32to16(result);
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * copy while checksumming, otherwise like csum_partial
+ */
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
+{
+	/*
+	 * It's 2:30 am and I don't feel like doing it real ...
+	 * This is lots slower than the real thing (tm)
+	 */
+	sum = csum_partial(src, len, sum);
+	memcpy(dst, src, len);
+
+	return sum;
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+/*
+ * Copy from userspace and compute checksum.  If we catch an exception
+ * then zero the rest of the buffer.
+ */
+__wsum csum_partial_copy_from_user(const void __user *src,
+					void *dst, int len,
+					__wsum sum, int *err_ptr)
+{
+	int missing;
+
+	missing = copy_from_user(dst, src, len);
+	if (missing) {
+		memset(dst + len - missing, 0, missing);
+		*err_ptr = -EFAULT;
+	}
+		
+	return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/delay.c b/src/kernel/linux/v4.14/arch/parisc/lib/delay.c
new file mode 100644
index 0000000..7eab4bb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/delay.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *	Precise Delay Loops for parisc
+ *
+ *	based on code by:
+ *	Copyright (C) 1993 Linus Torvalds
+ *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
+ *
+ *	parisc implementation:
+ *	Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ */
+
+
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/init.h>
+
+#include <asm/processor.h>
+#include <asm/delay.h>
+
+#include <asm/special_insns.h>    /* for mfctl() */
+#include <asm/processor.h> /* for boot_cpu_data */
+
+/* CR16 based delay: */
+static void __cr16_delay(unsigned long __loops)
+{
+	/*
+	 * Note: Due to unsigned math, cr16 rollovers shouldn't be
+	 * a problem here. However, on 32 bit, we need to make sure
+	 * we don't pass in too big a value. The current default
+	 * value of MAX_UDELAY_MS should help prevent this.
+	 */
+	u32 bclock, now, loops = __loops;
+	int cpu;
+
+	preempt_disable();
+	cpu = smp_processor_id();
+	bclock = mfctl(16);
+	for (;;) {
+		now = mfctl(16);
+		if ((now - bclock) >= loops)
+			break;
+
+		/* Allow RT tasks to run */
+		preempt_enable();
+		asm volatile("	nop\n");
+		barrier();
+		preempt_disable();
+
+		/*
+		 * It is possible that we moved to another CPU, and
+		 * since CR16's are per-cpu we need to calculate
+		 * that. The delay must guarantee that we wait "at
+		 * least" the amount of time. Being moved to another
+		 * CPU could make the wait longer but we just need to
+		 * make sure we waited long enough. Rebalance the
+		 * counter for this CPU.
+		 */
+		if (unlikely(cpu != smp_processor_id())) {
+			loops -= (now - bclock);
+			cpu = smp_processor_id();
+			bclock = mfctl(16);
+		}
+	}
+	preempt_enable();
+}
+
+
+void __udelay(unsigned long usecs)
+{
+	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/io.c b/src/kernel/linux/v4.14/arch/parisc/lib/io.c
new file mode 100644
index 0000000..7c00496
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/io.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/parisc/lib/io.c
+ *
+ * Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
+ * Copyright (c) Randolph Chung 2001 <tausq@debian.org>
+ *
+ * IO accessing functions which shouldn't be inlined because they're too big
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Copies a block of memory to a device in an efficient manner.
+ * Assumes the device can cope with 32-bit transfers.  If it can't,
+ * don't use this function.
+ */
+void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+	if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
+		goto bytecopy;
+	while ((unsigned long)dst & 3) {
+		writeb(*(char *)src, dst++);
+		src++;
+		count--;
+	}
+	while (count > 3) {
+		__raw_writel(*(u32 *)src, dst);
+		src += 4;
+		dst += 4;
+		count -= 4;
+	}
+ bytecopy:
+	while (count--) {
+		writeb(*(char *)src, dst++);
+		src++;
+	}
+}
+
+/*
+** Copies a block of memory from a device in an efficient manner.
+** Assumes the device can cope with 32-bit transfers.  If it can't,
+** don't use this function.
+**
+** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
+**	27341/64    = 427 cyc per int
+**	61311/128   = 478 cyc per short
+**	122637/256  = 479 cyc per byte
+** Ergo bus latencies dominant (not transfer size).
+**      Minimize total number of transfers at cost of CPU cycles.
+**	TODO: only look at src alignment and adjust the stores to dest.
+*/
+void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+	/* first compare alignment of src/dst */ 
+	if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
+		goto bytecopy;
+
+	if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
+		goto shortcopy;
+
+	/* Then check for misaligned start address */
+	if ((unsigned long)src & 1) {
+		*(u8 *)dst = readb(src);
+		src++;
+		dst++;
+		count--;
+		if (count < 2) goto bytecopy;
+	}
+
+	if ((unsigned long)src & 2) {
+		*(u16 *)dst = __raw_readw(src);
+		src += 2;
+		dst += 2;
+		count -= 2;
+	}
+
+	while (count > 3) {
+		*(u32 *)dst = __raw_readl(src);
+		dst += 4;
+		src += 4;
+		count -= 4;
+	}
+
+ shortcopy:
+	while (count > 1) {
+		*(u16 *)dst = __raw_readw(src);
+		src += 2;
+		dst += 2;
+		count -= 2;
+	}
+
+ bytecopy:
+	while (count--) {
+		*(char *)dst = readb(src);
+		src++;
+		dst++;
+	}
+}
+
+/* Sets a block of memory on a device to a given value.
+ * Assumes the device can cope with 32-bit transfers.  If it can't,
+ * don't use this function.
+ */
+void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+	u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
+	while ((unsigned long)addr & 3) {
+		writeb(val, addr++);
+		count--;
+	}
+	while (count > 3) {
+		__raw_writel(val32, addr);
+		addr += 4;
+		count -= 4;
+	}
+	while (count--) {
+		writeb(val, addr++);
+	}
+}
+
+/*
+ * Read COUNT 8-bit bytes from port PORT into memory starting at
+ * SRC.
+ */
+void insb (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+
+	while (((unsigned long)p) & 0x3) {
+		if (!count)
+			return;
+		count--;
+		*p = inb(port);
+		p++;
+	}
+
+	while (count >= 4) {
+		unsigned int w;
+		count -= 4;
+		w = inb(port) << 24;
+		w |= inb(port) << 16;
+		w |= inb(port) << 8;
+		w |= inb(port);
+		*(unsigned int *) p = w;
+		p += 4;
+	}
+
+	while (count) {
+		--count;
+		*p = inb(port);
+		p++;
+	}
+}
+
+
+/*
+ * Read COUNT 16-bit words from port PORT into memory starting at
+ * SRC.  SRC must be at least short aligned.  This is used by the
+ * IDE driver to read disk sectors.  Performance is important, but
+ * the interfaces seems to be slow: just using the inlined version
+ * of the inw() breaks things.
+ */
+void insw (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count>=2) {
+			
+			count -= 2;
+			l = cpu_to_le16(inw(port)) << 16;
+			l |= cpu_to_le16(inw(port));
+			*(unsigned int *)p = l;
+			p += 4;
+		}
+		if (count) {
+			*(unsigned short *)p = cpu_to_le16(inw(port));
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		*(unsigned short *)p = cpu_to_le16(inw(port));
+		p += 2;
+		count--;
+		while (count>=2) {
+			
+			count -= 2;
+			l = cpu_to_le16(inw(port)) << 16;
+			l |= cpu_to_le16(inw(port));
+			*(unsigned int *)p = l;
+			p += 4;
+		}
+		if (count) {
+			*(unsigned short *)p = cpu_to_le16(inw(port));
+		}
+		break;
+		
+	 case 0x01:			/* Buffer 8-bit aligned */
+	 case 0x03:
+		/* I don't bother with 32bit transfers
+		 * in this case, 16bit will have to do -- DE */
+		--count;
+		
+		l = cpu_to_le16(inw(port));
+		*p = l >> 8;
+		p++;
+		while (count--)
+		{
+			l2 = cpu_to_le16(inw(port));
+			*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
+			p += 2;
+			l = l2;
+		}
+		*p = l & 0xff;
+		break;
+	}
+}
+
+
+
+/*
+ * Read COUNT 32-bit words from port PORT into memory starting at
+ * SRC. Now works with any alignment in SRC. Performance is important,
+ * but the interfaces seems to be slow: just using the inlined version
+ * of the inl() breaks things.
+ */
+void insl (unsigned long port, void *dst, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	unsigned char *p;
+
+	p = (unsigned char *)dst;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long) dst) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count--)
+		{
+			*(unsigned int *)p = cpu_to_le32(inl(port));
+			p += 4;
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*(unsigned short *)p = l >> 16;
+		p += 2;
+		
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
+			p += 4;
+			l = l2;
+		}
+		*(unsigned short *)p = l & 0xffff;
+		break;
+	 case 0x01:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*(unsigned char *)p = l >> 24;
+		p++;
+		*(unsigned short *)p = (l >> 8) & 0xffff;
+		p += 2;
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
+			p += 4;
+			l = l2;
+		}
+		*p = l & 0xff;
+		break;
+	 case 0x03:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = cpu_to_le32(inl(port));
+		*p = l >> 24;
+		p++;
+		while (count--)
+		{
+			l2 = cpu_to_le32(inl(port));
+			*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
+			p += 4;
+			l = l2;
+		}
+		*(unsigned short *)p = (l >> 8) & 0xffff;
+		p += 2;
+		*p = l & 0xff;
+		break;
+	}
+}
+
+
+/*
+ * Like insb but in the opposite direction.
+ * Don't worry as much about doing aligned memory transfers:
+ * doing byte reads the "slow" way isn't nearly as slow as
+ * doing byte writes the slow way (no r-m-w cycle).
+ */
+void outsb(unsigned long port, const void * src, unsigned long count)
+{
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	while (count) {
+		count--;
+		outb(*p, port);
+		p++;
+	}
+}
+
+/*
+ * Like insw but in the opposite direction.  This is used by the IDE
+ * driver to write disk sectors.  Performance is important, but the
+ * interfaces seems to be slow: just using the inlined version of the
+ * outw() breaks things.
+ */
+void outsw (unsigned long port, const void *src, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count>=2) {
+			count -= 2;
+			l = *(unsigned int *)p;
+			p += 4;
+			outw(le16_to_cpu(l >> 16), port);
+			outw(le16_to_cpu(l & 0xffff), port);
+		}
+		if (count) {
+			outw(le16_to_cpu(*(unsigned short*)p), port);
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		
+		outw(le16_to_cpu(*(unsigned short*)p), port);
+		p += 2;
+		count--;
+		
+		while (count>=2) {
+			count -= 2;
+			l = *(unsigned int *)p;
+			p += 4;
+			outw(le16_to_cpu(l >> 16), port);
+			outw(le16_to_cpu(l & 0xffff), port);
+		}
+		if (count) {
+			outw(le16_to_cpu(*(unsigned short *)p), port);
+		}
+		break;
+		
+	 case 0x01:			/* Buffer 8-bit aligned */	
+		/* I don't bother with 32bit transfers
+		 * in this case, 16bit will have to do -- DE */
+		
+		l  = *p << 8;
+		p++;
+		count--;
+		while (count)
+		{
+			count--;
+			l2 = *(unsigned short *)p;
+			p += 2;
+			outw(le16_to_cpu(l | l2 >> 8), port);
+		        l = l2 << 8;
+		}
+		l2 = *(unsigned char *)p;
+		outw (le16_to_cpu(l | l2>>8), port);
+		break;
+	
+	}
+}
+
+
+/*
+ * Like insl but in the opposite direction.  This is used by the IDE
+ * driver to write disk sectors.  Works with any alignment in SRC.
+ *  Performance is important, but the interfaces seems to be slow:
+ * just using the inlined version of the outl() breaks things.
+ */
+void outsl (unsigned long port, const void *src, unsigned long count)
+{
+	unsigned int l = 0, l2;
+	const unsigned char *p;
+
+	p = (const unsigned char *)src;
+	
+	if (!count)
+		return;
+	
+	switch (((unsigned long)p) & 0x3)
+	{
+	 case 0x00:			/* Buffer 32-bit aligned */
+		while (count--)
+		{
+			outl(le32_to_cpu(*(unsigned int *)p), port);
+			p += 4;
+		}
+		break;
+	
+	 case 0x02:			/* Buffer 16-bit aligned */
+		--count;
+		
+		l = *(unsigned short *)p;
+		p += 2;
+		
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l << 16 | l2 >> 16), port);
+			l = l2;
+		}
+		l2 = *(unsigned short *)p;
+		outl (le32_to_cpu(l << 16 | l2), port);
+		break;
+	 case 0x01:			/* Buffer 8-bit aligned */
+		--count;
+
+		l = *p << 24;
+		p++;
+		l |= *(unsigned short *)p << 8;
+		p += 2;
+
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l | l2 >> 24), port);
+			l = l2 << 8;
+		}
+		l2 = *p;
+		outl (le32_to_cpu(l | l2), port);
+		break;
+	 case 0x03:			/* Buffer 8-bit aligned */
+		--count;
+		
+		l = *p << 24;
+		p++;
+
+		while (count--)
+		{
+			l2 = *(unsigned int *)p;
+			p += 4;
+			outl (le32_to_cpu(l | l2 >> 8), port);
+			l = l2 << 24;
+		}
+		l2 = *(unsigned short *)p << 16;
+		p += 2;
+		l2 |= *p;
+		outl (le32_to_cpu(l | l2), port);
+		break;
+	}
+}
+
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/iomap.c b/src/kernel/linux/v4.14/arch/parisc/lib/iomap.c
new file mode 100644
index 0000000..4b19e6e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/iomap.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * iomap.c - Implement iomap interface for PA-RISC
+ * Copyright (c) 2004 Matthew Wilcox
+ */
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/export.h>
+#include <asm/io.h>
+
+/*
+ * The iomap space on 32-bit PA-RISC is intended to look like this:
+ * 00000000-7fffffff virtual mapped IO
+ * 80000000-8fffffff ISA/EISA port space that can't be virtually mapped
+ * 90000000-9fffffff Dino port space
+ * a0000000-afffffff Astro port space
+ * b0000000-bfffffff PAT port space
+ * c0000000-cfffffff non-swapped memory IO
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * For the moment, here's what it looks like:
+ * 80000000-8fffffff All ISA/EISA port space
+ * f0000000-ffffffff legacy IO memory pointers
+ *
+ * On 64-bit, everything is extended, so:
+ * 8000000000000000-8fffffffffffffff All ISA/EISA port space
+ * f000000000000000-ffffffffffffffff legacy IO memory pointers
+ */
+
+/*
+ * Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END),
+ * but that's slow and we know it'll be within the first 2GB.
+ */
+#ifdef CONFIG_64BIT
+#define INDIRECT_ADDR(addr)	(((unsigned long)(addr) & 1UL<<63) != 0)
+#define ADDR_TO_REGION(addr)    (((unsigned long)addr >> 60) & 7)
+#define IOPORT_MAP_BASE		(8UL << 60)
+#else
+#define INDIRECT_ADDR(addr)     (((unsigned long)(addr) & 1UL<<31) != 0)
+#define ADDR_TO_REGION(addr)    (((unsigned long)addr >> 28) & 7)
+#define IOPORT_MAP_BASE		(8UL << 28)
+#endif
+
+struct iomap_ops {
+	unsigned int (*read8)(void __iomem *);
+	unsigned int (*read16)(void __iomem *);
+	unsigned int (*read16be)(void __iomem *);
+	unsigned int (*read32)(void __iomem *);
+	unsigned int (*read32be)(void __iomem *);
+	void (*write8)(u8, void __iomem *);
+	void (*write16)(u16, void __iomem *);
+	void (*write16be)(u16, void __iomem *);
+	void (*write32)(u32, void __iomem *);
+	void (*write32be)(u32, void __iomem *);
+	void (*read8r)(void __iomem *, void *, unsigned long);
+	void (*read16r)(void __iomem *, void *, unsigned long);
+	void (*read32r)(void __iomem *, void *, unsigned long);
+	void (*write8r)(void __iomem *, const void *, unsigned long);
+	void (*write16r)(void __iomem *, const void *, unsigned long);
+	void (*write32r)(void __iomem *, const void *, unsigned long);
+};
+
+/* Generic ioport ops.  To be replaced later by specific dino/elroy/wax code */
+
+#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
+
+static unsigned int ioport_read8(void __iomem *addr)
+{
+	return inb(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read16(void __iomem *addr)
+{
+	return inw(ADDR2PORT(addr));
+}
+
+static unsigned int ioport_read32(void __iomem *addr)
+{
+	return inl(ADDR2PORT(addr));
+}
+
+static void ioport_write8(u8 datum, void __iomem *addr)
+{
+	outb(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write16(u16 datum, void __iomem *addr)
+{
+	outw(datum, ADDR2PORT(addr));
+}
+
+static void ioport_write32(u32 datum, void __iomem *addr)
+{
+	outl(datum, ADDR2PORT(addr));
+}
+
+static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insb(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insw(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+	insl(ADDR2PORT(addr), dst, count);
+}
+
+static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsb(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsw(ADDR2PORT(addr), s, n);
+}
+
+static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+	outsl(ADDR2PORT(addr), s, n);
+}
+
+static const struct iomap_ops ioport_ops = {
+	.read8 = ioport_read8,
+	.read16 = ioport_read16,
+	.read16be = ioport_read16,
+	.read32 = ioport_read32,
+	.read32be = ioport_read32,
+	.write8 = ioport_write8,
+	.write16 = ioport_write16,
+	.write16be = ioport_write16,
+	.write32 = ioport_write32,
+	.write32be = ioport_write32,
+	.read8r = ioport_read8r,
+	.read16r = ioport_read16r,
+	.read32r = ioport_read32r,
+	.write8r = ioport_write8r,
+	.write16r = ioport_write16r,
+	.write32r = ioport_write32r,
+};
+
+/* Legacy I/O memory ops */
+
+static unsigned int iomem_read8(void __iomem *addr)
+{
+	return readb(addr);
+}
+
+static unsigned int iomem_read16(void __iomem *addr)
+{
+	return readw(addr);
+}
+
+static unsigned int iomem_read16be(void __iomem *addr)
+{
+	return __raw_readw(addr);
+}
+
+static unsigned int iomem_read32(void __iomem *addr)
+{
+	return readl(addr);
+}
+
+static unsigned int iomem_read32be(void __iomem *addr)
+{
+	return __raw_readl(addr);
+}
+
+static void iomem_write8(u8 datum, void __iomem *addr)
+{
+	writeb(datum, addr);
+}
+
+static void iomem_write16(u16 datum, void __iomem *addr)
+{
+	writew(datum, addr);
+}
+
+static void iomem_write16be(u16 datum, void __iomem *addr)
+{
+	__raw_writew(datum, addr);
+}
+
+static void iomem_write32(u32 datum, void __iomem *addr)
+{
+	writel(datum, addr);
+}
+
+static void iomem_write32be(u32 datum, void __iomem *addr)
+{
+	__raw_writel(datum, addr);
+}
+
+static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u8 *)dst = __raw_readb(addr);
+		dst++;
+	}
+}
+
+static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u16 *)dst = __raw_readw(addr);
+		dst += 2;
+	}
+}
+
+static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
+{
+	while (count--) {
+		*(u32 *)dst = __raw_readl(addr);
+		dst += 4;
+	}
+}
+
+static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writeb(*(u8 *)s, addr);
+		s++;
+	}
+}
+
+static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writew(*(u16 *)s, addr);
+		s += 2;
+	}
+}
+
+static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
+{
+	while (n--) {
+		__raw_writel(*(u32 *)s, addr);
+		s += 4;
+	}
+}
+
+static const struct iomap_ops iomem_ops = {
+	.read8 = iomem_read8,
+	.read16 = iomem_read16,
+	.read16be = iomem_read16be,
+	.read32 = iomem_read32,
+	.read32be = iomem_read32be,
+	.write8 = iomem_write8,
+	.write16 = iomem_write16,
+	.write16be = iomem_write16be,
+	.write32 = iomem_write32,
+	.write32be = iomem_write32be,
+	.read8r = iomem_read8r,
+	.read16r = iomem_read16r,
+	.read32r = iomem_read32r,
+	.write8r = iomem_write8r,
+	.write16r = iomem_write16r,
+	.write32r = iomem_write32r,
+};
+
+static const struct iomap_ops *iomap_ops[8] = {
+	[0] = &ioport_ops,
+	[7] = &iomem_ops
+};
+
+
+unsigned int ioread8(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
+	return *((u8 *)addr);
+}
+
+unsigned int ioread16(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
+	return le16_to_cpup((u16 *)addr);
+}
+
+unsigned int ioread16be(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
+	return *((u16 *)addr);
+}
+
+unsigned int ioread32(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
+	return le32_to_cpup((u32 *)addr);
+}
+
+unsigned int ioread32be(void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr)))
+		return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
+	return *((u32 *)addr);
+}
+
+void iowrite8(u8 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr);
+	} else {
+		*((u8 *)addr) = datum;
+	}
+}
+
+void iowrite16(u16 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr);
+	} else {
+		*((u16 *)addr) = cpu_to_le16(datum);
+	}
+}
+
+void iowrite16be(u16 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr);
+	} else {
+		*((u16 *)addr) = datum;
+	}
+}
+
+void iowrite32(u32 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr);
+	} else {
+		*((u32 *)addr) = cpu_to_le32(datum);
+	}
+}
+
+void iowrite32be(u32 datum, void __iomem *addr)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr);
+	} else {
+		*((u32 *)addr) = datum;
+	}
+}
+
+/* Repeating interfaces */
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u8 *)dst = *(u8 *)addr;
+			dst++;
+		}
+	}
+}
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u16 *)dst = *(u16 *)addr;
+			dst += 2;
+		}
+	}
+}
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
+	} else {
+		while (count--) {
+			*(u32 *)dst = *(u32 *)addr;
+			dst += 4;
+		}
+	}
+}
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u8 *)addr = *(u8 *)src;
+			src++;
+		}
+	}
+}
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u16 *)addr = *(u16 *)src;
+			src += 2;
+		}
+	}
+}
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+	if (unlikely(INDIRECT_ADDR(addr))) {
+		iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count);
+	} else {
+		while (count--) {
+			*(u32 *)addr = *(u32 *)src;
+			src += 4;
+		}
+	}
+}
+
+/* Mapping interfaces */
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *)(IOPORT_MAP_BASE | port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	if (!INDIRECT_ADDR(addr)) {
+		iounmap(addr);
+	}
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	if (!INDIRECT_ADDR(addr)) {
+		iounmap(addr);
+	}
+}
+
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread16be);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(ioread32be);
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite16be);
+EXPORT_SYMBOL(iowrite32);
+EXPORT_SYMBOL(iowrite32be);
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/lusercopy.S b/src/kernel/linux/v4.14/arch/parisc/lib/lusercopy.S
new file mode 100644
index 0000000..d4fe198
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/lusercopy.S
@@ -0,0 +1,447 @@
+/*
+ *    User Space Access Routines
+ *
+ *    Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
+ *    Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
+ *    Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
+ *    Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
+ *
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * These routines still have plenty of room for optimization
+ * (word & doubleword load/store, dual issue, store hints, etc.).
+ */
+
+/*
+ * The following routines assume that space register 3 (sr3) contains
+ * the space id associated with the current users address space.
+ */
+
+
+	.text
+	
+#include <asm/assembly.h>
+#include <asm/errno.h>
+#include <linux/linkage.h>
+
+	/*
+	 * get_sr gets the appropriate space value into
+	 * sr1 for kernel/user space access, depending
+	 * on the flag stored in the task structure.
+	 */
+
+	.macro  get_sr
+	mfctl       %cr30,%r1
+	ldw         TI_SEGMENT(%r1),%r22
+	mfsp        %sr3,%r1
+	or,<>       %r22,%r0,%r0
+	copy        %r0,%r1
+	mtsp        %r1,%sr1
+	.endm
+
+	/*
+	 * unsigned long lclear_user(void *to, unsigned long n)
+	 *
+	 * Returns 0 for success.
+	 * otherwise, returns number of bytes not transferred.
+	 */
+
+ENTRY_CFI(lclear_user)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+	comib,=,n   0,%r25,$lclu_done
+	get_sr
+$lclu_loop:
+	addib,<>    -1,%r25,$lclu_loop
+1:      stbs,ma     %r0,1(%sr1,%r26)
+
+$lclu_done:
+	bv          %r0(%r2)
+	copy        %r25,%r28
+
+2:	b           $lclu_done
+	ldo         1(%r25),%r25
+
+	ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
+
+	.exit
+ENDPROC_CFI(lclear_user)
+
+
+	.procend
+
+	/*
+	 * long lstrnlen_user(char *s, long n)
+	 *
+	 * Returns 0 if exception before zero byte or reaching N,
+	 *         N+1 if N would be exceeded,
+	 *         else strlen + 1 (i.e. includes zero byte).
+	 */
+
+ENTRY_CFI(lstrnlen_user)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+	comib,=     0,%r25,$lslen_nzero
+	copy	    %r26,%r24
+	get_sr
+1:      ldbs,ma     1(%sr1,%r26),%r1
+$lslen_loop:
+	comib,=,n   0,%r1,$lslen_done
+	addib,<>    -1,%r25,$lslen_loop
+2:      ldbs,ma     1(%sr1,%r26),%r1
+$lslen_done:
+	bv          %r0(%r2)
+	sub	    %r26,%r24,%r28
+	.exit
+
+$lslen_nzero:
+	b           $lslen_done
+	ldo         1(%r26),%r26 /* special case for N == 0 */
+
+3:      b	    $lslen_done
+	copy        %r24,%r26    /* reset r26 so 0 is returned on fault */
+
+	ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+	ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
+
+ENDPROC_CFI(lstrnlen_user)
+
+	.procend
+
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ *   On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers.  Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ *   additional interlocks. Assumption is that those were only efficient on old
+ *   machines (pre PA8000 processors)
+ */
+
+	dst = arg0
+	src = arg1
+	len = arg2
+	end = arg3
+	t1  = r19
+	t2  = r20
+	t3  = r21
+	t4  = r22
+	srcspc = sr1
+	dstspc = sr2
+
+	t0 = r1
+	a1 = t1
+	a2 = t2
+	a3 = t3
+	a0 = t4
+
+	save_src = ret0
+	save_dst = ret1
+	save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	/* Last destination address */
+	add	dst,len,end
+
+	/* short copy with less than 16 bytes? */
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+	/* same alignment? */
+	xor	src,dst,t0
+	extru	t0,31,2,t1
+	cmpib,<>,n  0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+	/* only do 64-bit copies if we can get aligned. */
+	extru	t0,31,3,t1
+	cmpib,<>,n  0,t1,.Lalign_loop32
+
+	/* loop until we are 64-bit aligned */
+.Lalign_loop64:
+	extru	dst,31,3,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_16_start
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop64
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_loop_16_start:
+	ldi	31,t0
+.Lcopy_loop_16:
+	cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10:	ldd	0(srcspc,src),t1
+11:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+12:	std,ma	t1,8(dstspc,dst)
+13:	std,ma	t2,8(dstspc,dst)
+14:	ldd	0(srcspc,src),t1
+15:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+16:	std,ma	t1,8(dstspc,dst)
+17:	std,ma	t2,8(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_16
+	ldo	-32(len),len
+
+.Lword_loop:
+	cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20:	ldw,ma	4(srcspc,src),t1
+21:	stw,ma	t1,4(dstspc,dst)
+	b	.Lword_loop
+	ldo	-4(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+	/* loop until we are 32-bit aligned */
+.Lalign_loop32:
+	extru	dst,31,2,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_8
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop32
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_8:
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10:	ldw	0(srcspc,src),t1
+11:	ldw	4(srcspc,src),t2
+12:	stw,ma	t1,4(dstspc,dst)
+13:	stw,ma	t2,4(dstspc,dst)
+14:	ldw	8(srcspc,src),t1
+15:	ldw	12(srcspc,src),t2
+	ldo	16(src),src
+16:	stw,ma	t1,4(dstspc,dst)
+17:	stw,ma	t2,4(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_8
+	ldo	-16(len),len
+
+.Lbyte_loop:
+	cmpclr,COND(<>) len,%r0,%r0
+	b,n	.Lcopy_done
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lbyte_loop
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+	bv	%r0(%r2)
+	sub	end,dst,ret0
+
+
+	/* src and dst are not aligned the same way. */
+	/* need to go the hard way */
+.Lunaligned_copy:
+	/* align until dst is 32bit-word-aligned */
+	extru	dst,31,2,t1
+	cmpib,=,n	0,t1,.Lcopy_dstaligned
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lunaligned_copy
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+	/* store src, dst and len in safe place */
+	copy	src,save_src
+	copy	dst,save_dst
+	copy	len,save_len
+
+	/* len now needs give number of words to copy */
+	SHRREG	len,2,len
+
+	/*
+	 * Copy from a not-aligned src to an aligned dst using shifts.
+	 * Handles 4 words per loop.
+	 */
+
+	depw,z src,28,2,t0
+	subi 32,t0,t0
+	mtsar t0
+	extru len,31,2,t0
+	cmpib,= 2,t0,.Lcase2
+	/* Make src aligned by rounding it down.  */
+	depi 0,31,2,src
+
+	cmpiclr,<> 3,t0,%r0
+	b,n .Lcase3
+	cmpiclr,<> 1,t0,%r0
+	b,n .Lcase1
+.Lcase0:
+	cmpb,COND(=) %r0,len,.Lcda_finish
+	nop
+
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b,n .Ldo3
+.Lcase1:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	ldo -1(len),len
+	cmpb,COND(=),n %r0,len,.Ldo0
+.Ldo4:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a3, a0, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a0, a1, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a1, a2, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+	ldo -4(len),len
+	cmpb,COND(<>) %r0,len,.Ldo4
+	nop
+.Ldo0:
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+	/* calculate new src, dst and len and jump to byte-copy loop */
+	sub	dst,save_dst,t0
+	add	save_src,t0,src
+	b	.Lbyte_loop
+	sub	save_len,t0,len
+
+.Lcase3:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo2
+	ldo 1(len),len
+.Lcase2:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo1
+	ldo 2(len),len
+
+
+	/* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+	b	.Lcopy_done
+10:	std,ma	t1,8(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+	b	.Lcopy_done
+10:	stw,ma	t1,4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+	.exit
+ENDPROC_CFI(pa_memcpy)
+	.procend
+
+	.end
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/memcpy.c b/src/kernel/linux/v4.14/arch/parisc/lib/memcpy.c
new file mode 100644
index 0000000..865a7f7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/memcpy.c
@@ -0,0 +1,84 @@
+/*
+ *    Optimized memory copy routines.
+ *
+ *    Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ *    Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Portions derived from the GNU C Library
+ *    Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/uaccess.h>
+
+#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
+#define get_kernel_space() (0)
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+extern unsigned long pa_memcpy(void *dst, const void *src,
+				unsigned long len);
+
+unsigned long raw_copy_to_user(void __user *dst, const void *src,
+			       unsigned long len)
+{
+	mtsp(get_kernel_space(), 1);
+	mtsp(get_user_space(), 2);
+	return pa_memcpy((void __force *)dst, src, len);
+}
+EXPORT_SYMBOL(raw_copy_to_user);
+
+unsigned long raw_copy_from_user(void *dst, const void __user *src,
+			       unsigned long len)
+{
+	mtsp(get_user_space(), 1);
+	mtsp(get_kernel_space(), 2);
+	return pa_memcpy(dst, (void __force *)src, len);
+}
+EXPORT_SYMBOL(raw_copy_from_user);
+
+unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
+{
+	mtsp(get_user_space(), 1);
+	mtsp(get_user_space(), 2);
+	return pa_memcpy((void __force *)dst, (void __force *)src, len);
+}
+
+
+void * memcpy(void * dst,const void *src, size_t count)
+{
+	mtsp(get_kernel_space(), 1);
+	mtsp(get_kernel_space(), 2);
+	pa_memcpy(dst, src, count);
+	return dst;
+}
+
+EXPORT_SYMBOL(raw_copy_in_user);
+EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+	unsigned long addr = (unsigned long)src;
+
+	if (addr < PAGE_SIZE)
+		return -EFAULT;
+
+	/* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+	return __probe_kernel_read(dst, src, size);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/memset.c b/src/kernel/linux/v4.14/arch/parisc/lib/memset.c
new file mode 100644
index 0000000..1d7929b
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/memset.c
@@ -0,0 +1,91 @@
+/* Copyright (C) 1991, 1997 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */
+
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+  unsigned int c = sc;
+  long int dstp = (long int) dstpp;
+
+  if (len >= 8)
+    {
+      size_t xlen;
+      op_t cccc;
+
+      cccc = (unsigned char) c;
+      cccc |= cccc << 8;
+      cccc |= cccc << 16;
+      if (OPSIZ > 4)
+	/* Do the shift in two steps to avoid warning if long has 32 bits.  */
+	cccc |= (cccc << 16) << 16;
+
+      /* There are at least some bytes to set.
+	 No need to test for LEN == 0 in this alignment loop.  */
+      while (dstp % OPSIZ != 0)
+	{
+	  ((unsigned char *) dstp)[0] = c;
+	  dstp += 1;
+	  len -= 1;
+	}
+
+      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
+      xlen = len / (OPSIZ * 8);
+      while (xlen > 0)
+	{
+	  ((op_t *) dstp)[0] = cccc;
+	  ((op_t *) dstp)[1] = cccc;
+	  ((op_t *) dstp)[2] = cccc;
+	  ((op_t *) dstp)[3] = cccc;
+	  ((op_t *) dstp)[4] = cccc;
+	  ((op_t *) dstp)[5] = cccc;
+	  ((op_t *) dstp)[6] = cccc;
+	  ((op_t *) dstp)[7] = cccc;
+	  dstp += 8 * OPSIZ;
+	  xlen -= 1;
+	}
+      len %= OPSIZ * 8;
+
+      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
+      xlen = len / OPSIZ;
+      while (xlen > 0)
+	{
+	  ((op_t *) dstp)[0] = cccc;
+	  dstp += OPSIZ;
+	  xlen -= 1;
+	}
+      len %= OPSIZ;
+    }
+
+  /* Write the last few bytes.  */
+  while (len > 0)
+    {
+      ((unsigned char *) dstp)[0] = c;
+      dstp += 1;
+      len -= 1;
+    }
+
+  return dstpp;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/lib/ucmpdi2.c b/src/kernel/linux/v4.14/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 0000000..8e6014a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+
+union ull_union {
+	unsigned long long ull;
+	struct {
+		unsigned int high;
+		unsigned int low;
+	} ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+	union ull_union au = {.ull = a};
+	union ull_union bu = {.ull = b};
+
+	if (au.ui.high < bu.ui.high)
+		return 0;
+	else if (au.ui.high > bu.ui.high)
+		return 2;
+	if (au.ui.low < bu.ui.low)
+		return 0;
+	else if (au.ui.low > bu.ui.low)
+		return 2;
+	return 1;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/Makefile b/src/kernel/linux/v4.14/arch/parisc/math-emu/Makefile
new file mode 100644
index 0000000..b6c4b25
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux/parisc floating point code
+#
+
+# See arch/parisc/math-emu/README
+ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
+	-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
+	-Wno-implicit-int
+
+obj-y	 := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
+		dfmpy.o sfmpy.o sfsqrt.o dfsqrt.o dfadd.o fmpyfadd.o \
+		sfadd.o dfsub.o sfsub.o fcnvfxt.o fcnvff.o fcnvxf.o \
+		fcnvfx.o fcnvuf.o fcnvfu.o fcnvfut.o dfdiv.o sfdiv.o \
+		dfrem.o sfrem.o dfcmp.o sfcmp.o
+
+# Math emulation code beyond the FRND is required for 712/80i and
+# other very old or stripped-down PA-RISC CPUs -- not currently supported
+
+obj-$(CONFIG_MATH_EMULATION)	+= unimplemented-math-emulation.o
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/README b/src/kernel/linux/v4.14/arch/parisc/math-emu/README
new file mode 100644
index 0000000..1a0124e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/README
@@ -0,0 +1,11 @@
+All files except driver.c are snapshots from the HP-UX kernel.  They've
+been modified as little as possible.  Even though they don't fit the
+Linux coding style, please leave them in their funny format just in case
+someone in the future, with access to HP-UX source code, is generous
+enough to update our copies with later changes from HP-UX -- it'll
+make their 'diff' job easier if our code is relatively unmodified.
+
+Required Disclaimer: Hewlett-Packard makes no implied or expressed
+warranties about this code nor any promises to maintain or test it
+in any way.  This copy of this snapshot is no longer the property
+of Hewlett-Packard.
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/cnv_float.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/cnv_float.h
new file mode 100644
index 0000000..b0db611
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/cnv_float.h
@@ -0,0 +1,376 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/*
+ * Some more constants
+ */
+#define SGL_FX_MAX_EXP 30
+#define DBL_FX_MAX_EXP 62
+#define QUAD_FX_MAX_EXP 126
+
+#define Dintp1(object) (object)
+#define Dintp2(object) (object)
+
+#define Duintp1(object) (object)
+#define Duintp2(object) (object)
+
+#define Qintp0(object) (object)
+#define Qintp1(object) (object)
+#define Qintp2(object) (object)
+#define Qintp3(object) (object)
+
+
+/*
+ * These macros will be used specifically by the convert instructions.
+ *
+ *
+ * Single format macros
+ */
+
+#define Sgl_to_dbl_exponent(src_exponent,dest)			\
+    Deposit_dexponent(dest,src_exponent+(DBL_BIAS-SGL_BIAS))
+
+#define Sgl_to_dbl_mantissa(src_mantissa,destA,destB)	\
+    Deposit_dmantissap1(destA,src_mantissa>>3);		\
+    Dmantissap2(destB) = src_mantissa << 29
+
+#define Sgl_isinexact_to_fix(sgl_value,exponent)	\
+    ((exponent < (SGL_P - 1)) ?				\
+     (Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
+
+#define Int_isinexact_to_sgl(int_value)	((int_value << 33 - SGL_EXP_LENGTH) != 0)
+
+#define Sgl_roundnearest_from_int(int_value,sgl_value)			\
+    if (int_value & 1<<(SGL_EXP_LENGTH - 2))   /* round bit */		\
+	if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
+		Sall(sgl_value)++
+
+#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB)		\
+    (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
+
+#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value)	\
+    if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) 			\
+	if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) ||	\
+    	Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
+
+#define Dint_isinexact_to_dbl(dint_value) 	\
+    (Dintp2(dint_value) << 33 - DBL_EXP_LENGTH)
+
+#define Dbl_roundnearest_from_dint(dint_opndB,dbl_opndA,dbl_opndB) 	\
+    if (Dintp2(dint_opndB) & 1<<(DBL_EXP_LENGTH - 2))			\
+       if ((Dintp2(dint_opndB) << 34 - DBL_EXP_LENGTH) || Dlowp2(dbl_opndB))  \
+          if ((++Dallp2(dbl_opndB))==0) Dallp1(dbl_opndA)++
+
+#define Sgl_isone_roundbit(sgl_value,exponent)			\
+    ((Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) >> 31)
+
+#define Sgl_isone_stickybit(sgl_value,exponent)		\
+    (exponent < (SGL_P - 2) ?				\
+     Sall(sgl_value) << (SGL_EXP_LENGTH + 2 + exponent) : FALSE)
+
+
+/* 
+ * Double format macros
+ */
+
+#define Dbl_to_sgl_exponent(src_exponent,dest)			\
+    dest = src_exponent + (SGL_BIAS - DBL_BIAS)
+
+#define Dbl_to_sgl_mantissa(srcA,srcB,dest,inexact,guard,sticky,odd)	\
+    Shiftdouble(Dmantissap1(srcA),Dmantissap2(srcB),29,dest); 	\
+    guard = Dbit3p2(srcB);					\
+    sticky = Dallp2(srcB)<<4;					\
+    inexact = guard | sticky;					\
+    odd = Dbit2p2(srcB)
+
+#define Dbl_to_sgl_denormalized(srcA,srcB,exp,dest,inexact,guard,sticky,odd,tiny) \
+    Deposit_dexponent(srcA,1);						\
+    tiny = TRUE;							\
+    if (exp >= -2) {							\
+	if (exp == 0) {							\
+	    inexact = Dallp2(srcB) << 3;				\
+	    guard = inexact >> 31;					\
+	    sticky = inexact << 1;					\
+	    Shiftdouble(Dmantissap1(srcA),Dmantissap2(srcB),29,dest);	\
+	    odd = dest << 31;						\
+	    if (inexact) {						\
+		switch(Rounding_mode()) {				\
+		    case ROUNDPLUS:					\
+			if (Dbl_iszero_sign(srcA)) {			\
+			    dest++;					\
+			    if (Sgl_isone_hidden(dest))	\
+				tiny = FALSE;				\
+			    dest--;					\
+			}						\
+			break;						\
+		    case ROUNDMINUS:					\
+			if (Dbl_isone_sign(srcA)) {			\
+			    dest++;					\
+			    if (Sgl_isone_hidden(dest))	\
+				tiny = FALSE;				\
+			    dest--;					\
+			}						\
+			break;						\
+		    case ROUNDNEAREST:					\
+			if (guard && (sticky || odd)) {			\
+			    dest++;					\
+			    if (Sgl_isone_hidden(dest))	\
+				tiny = FALSE;				\
+			    dest--;					\
+			}						\
+			break;						\
+		}							\
+	    }								\
+		/* shift right by one to get correct result */		\
+		guard = odd;						\
+		sticky = inexact;					\
+		inexact |= guard;					\
+		dest >>= 1;						\
+    		Deposit_dsign(srcA,0);					\
+    	        Shiftdouble(Dallp1(srcA),Dallp2(srcB),30,dest);		\
+	        odd = dest << 31;					\
+	}								\
+	else {								\
+    	    inexact = Dallp2(srcB) << (2 + exp);			\
+    	    guard = inexact >> 31;					\
+    	    sticky = inexact << 1; 					\
+    	    Deposit_dsign(srcA,0);					\
+    	    if (exp == -2) dest = Dallp1(srcA);				\
+    	    else Variable_shift_double(Dallp1(srcA),Dallp2(srcB),30-exp,dest); \
+    	    odd = dest << 31;						\
+	}								\
+    }									\
+    else {								\
+    	Deposit_dsign(srcA,0);						\
+    	if (exp > (1 - SGL_P)) {					\
+    	    dest = Dallp1(srcA) >> (- 2 - exp);				\
+    	    inexact = Dallp1(srcA) << (34 + exp);			\
+    	    guard = inexact >> 31;					\
+    	    sticky = (inexact << 1) | Dallp2(srcB);			\
+    	    inexact |= Dallp2(srcB); 					\
+    	    odd = dest << 31;						\
+    	}								\
+    	else {								\
+    	    dest = 0;							\
+    	    inexact = Dallp1(srcA) | Dallp2(srcB);			\
+    	    if (exp == (1 - SGL_P)) {					\
+    	    	guard = Dhidden(srcA);					\
+    	    	sticky = Dmantissap1(srcA) | Dallp2(srcB); 		\
+    	    }								\
+    	    else {							\
+    	    	guard = 0;						\
+    	    	sticky = inexact;					\
+    	    }								\
+    	    odd = 0;							\
+    	}								\
+    }									\
+    exp = 0
+
+#define Dbl_isinexact_to_fix(dbl_valueA,dbl_valueB,exponent)		\
+    (exponent < (DBL_P-33) ? 						\
+     Dallp2(dbl_valueB) || Dallp1(dbl_valueA) << (DBL_EXP_LENGTH+1+exponent) : \
+     (exponent < (DBL_P-1) ? Dallp2(dbl_valueB) << (exponent + (33-DBL_P)) :   \
+      FALSE))
+
+#define Dbl_isoverflow_to_int(exponent,dbl_valueA,dbl_valueB)		\
+    ((exponent > SGL_FX_MAX_EXP + 1) || Dsign(dbl_valueA)==0 ||		\
+     Dmantissap1(dbl_valueA)!=0 || (Dallp2(dbl_valueB)>>21)!=0 ) 
+
+#define Dbl_isone_roundbit(dbl_valueA,dbl_valueB,exponent)              \
+    ((exponent < (DBL_P - 33) ?						\
+      Dallp1(dbl_valueA) >> ((30 - DBL_EXP_LENGTH) - exponent) :	\
+      Dallp2(dbl_valueB) >> ((DBL_P - 2) - exponent)) & 1)
+
+#define Dbl_isone_stickybit(dbl_valueA,dbl_valueB,exponent)		\
+    (exponent < (DBL_P-34) ? 						\
+     (Dallp2(dbl_valueB) || Dallp1(dbl_valueA)<<(DBL_EXP_LENGTH+2+exponent)) : \
+     (exponent<(DBL_P-2) ? (Dallp2(dbl_valueB) << (exponent + (34-DBL_P))) : \
+      FALSE))
+
+
+/* Int macros */
+
+#define Int_from_sgl_mantissa(sgl_value,exponent)	\
+    Sall(sgl_value) = 				\
+    	(unsigned)(Sall(sgl_value) << SGL_EXP_LENGTH)>>(31 - exponent)
+
+#define Int_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent)	\
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),22,Dallp1(dbl_valueA)); \
+    if (exponent < 31) Dallp1(dbl_valueA) >>= 30 - exponent;	\
+    else Dallp1(dbl_valueA) <<= 1
+
+#define Int_negate(int_value) int_value = -int_value
+
+
+/* Dint macros */
+
+#define Dint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB)	\
+    {Sall(sgl_value) <<= SGL_EXP_LENGTH;  /*  left-justify  */		\
+    if (exponent <= 31) {						\
+    	Dintp1(dresultA) = 0;						\
+    	Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \
+    }									\
+    else {								\
+    	Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent);		\
+    	Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31);		\
+    }}
+
+
+#define Dint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB) \
+    {if (exponent < 32) {						\
+    	Dintp1(destA) = 0;						\
+    	if (exponent <= 20)						\
+    	    Dintp2(destB) = Dallp1(dbl_valueA) >> 20-exponent;		\
+    	else Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+	     52-exponent,Dintp2(destB));					\
+    }									\
+    else {								\
+    	if (exponent <= 52) {						\
+    	    Dintp1(destA) = Dallp1(dbl_valueA) >> 52-exponent;		\
+	    if (exponent == 52) Dintp2(destB) = Dallp2(dbl_valueB);	\
+	    else Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+	    52-exponent,Dintp2(destB));					\
+        }								\
+    	else {								\
+    	    Variable_shift_double(Dallp1(dbl_valueA),Dallp2(dbl_valueB), \
+	    84-exponent,Dintp1(destA));					\
+    	    Dintp2(destB) = Dallp2(dbl_valueB) << exponent-52;		\
+    	}								\
+    }}
+
+#define Dint_setzero(dresultA,dresultB) 	\
+    Dintp1(dresultA) = 0; 	\
+    Dintp2(dresultB) = 0
+
+#define Dint_setone_sign(dresultA,dresultB)		\
+    Dintp1(dresultA) = ~Dintp1(dresultA);		\
+    if ((Dintp2(dresultB) = -Dintp2(dresultB)) == 0) Dintp1(dresultA)++
+
+#define Dint_set_minint(dresultA,dresultB)		\
+    Dintp1(dresultA) = (unsigned int)1<<31;		\
+    Dintp2(dresultB) = 0
+
+#define Dint_isone_lowp2(dresultB)  (Dintp2(dresultB) & 01)
+
+#define Dint_increment(dresultA,dresultB) 		\
+    if ((++Dintp2(dresultB))==0) Dintp1(dresultA)++
+
+#define Dint_decrement(dresultA,dresultB) 		\
+    if ((Dintp2(dresultB)--)==0) Dintp1(dresultA)--
+
+#define Dint_negate(dresultA,dresultB)			\
+    Dintp1(dresultA) = ~Dintp1(dresultA);		\
+    if ((Dintp2(dresultB) = -Dintp2(dresultB))==0) Dintp1(dresultA)++
+
+#define Dint_copyfromptr(src,destA,destB) \
+     Dintp1(destA) = src->wd0;		\
+     Dintp2(destB) = src->wd1
+#define Dint_copytoptr(srcA,srcB,dest)	\
+    dest->wd0 = Dintp1(srcA);		\
+    dest->wd1 = Dintp2(srcB)
+
+
+/* other macros  */
+
+#define Find_ms_one_bit(value, position)	\
+    {						\
+	int var;				\
+	for (var=8; var >=1; var >>= 1) {	\
+	    if (value >> 32 - position)		\
+		position -= var;		\
+		else position += var;		\
+	}					\
+	if ((value >> 32 - position) == 0)	\
+	    position--;				\
+	else position -= 2;			\
+    }
+
+
+/*
+ * Unsigned int macros
+ */
+#define Duint_copyfromptr(src,destA,destB) \
+    Dint_copyfromptr(src,destA,destB)
+#define Duint_copytoptr(srcA,srcB,dest)	\
+    Dint_copytoptr(srcA,srcB,dest)
+
+#define Suint_isinexact_to_sgl(int_value) \
+    (int_value << 32 - SGL_EXP_LENGTH)
+
+#define Sgl_roundnearest_from_suint(suint_value,sgl_value)		\
+    if (suint_value & 1<<(SGL_EXP_LENGTH - 1))   /* round bit */	\
+    	if ((suint_value << 33 - SGL_EXP_LENGTH) || Slow(sgl_value))	\
+		Sall(sgl_value)++
+
+#define Duint_isinexact_to_sgl(duint_valueA,duint_valueB)	\
+    ((Duintp1(duint_valueA) << 32 - SGL_EXP_LENGTH) || Duintp2(duint_valueB))
+
+#define Sgl_roundnearest_from_duint(duint_valueA,duint_valueB,sgl_value) \
+    if (Duintp1(duint_valueA) & 1<<(SGL_EXP_LENGTH - 1))		\
+    	if ((Duintp1(duint_valueA) << 33 - SGL_EXP_LENGTH) ||		\
+    	Duintp2(duint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
+
+#define Duint_isinexact_to_dbl(duint_value) 	\
+    (Duintp2(duint_value) << 32 - DBL_EXP_LENGTH)
+
+#define Dbl_roundnearest_from_duint(duint_opndB,dbl_opndA,dbl_opndB) 	\
+    if (Duintp2(duint_opndB) & 1<<(DBL_EXP_LENGTH - 1))			\
+       if ((Duintp2(duint_opndB) << 33 - DBL_EXP_LENGTH) || Dlowp2(dbl_opndB)) \
+          if ((++Dallp2(dbl_opndB))==0) Dallp1(dbl_opndA)++
+
+#define Suint_from_sgl_mantissa(src,exponent,result)	\
+    Sall(result) = (unsigned)(Sall(src) << SGL_EXP_LENGTH)>>(31 - exponent)
+
+#define Sgl_isinexact_to_unsigned(sgl_value,exponent)	\
+    Sgl_isinexact_to_fix(sgl_value,exponent)
+
+#define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB)	\
+  {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH;		\
+    if (exponent <= 31) {						\
+	Dintp1(dresultA) = 0;						\
+	Dintp2(dresultB) = val >> (31 - exponent);			\
+    }									\
+    else {								\
+	Dintp1(dresultA) = val >> (63 - exponent);			\
+	Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0;	\
+    }									\
+  }
+
+#define Duint_setzero(dresultA,dresultB) 	\
+    Dint_setzero(dresultA,dresultB)
+
+#define Duint_increment(dresultA,dresultB) Dint_increment(dresultA,dresultB) 
+
+#define Duint_isone_lowp2(dresultB)  Dint_isone_lowp2(dresultB)
+
+#define Suint_from_dbl_mantissa(srcA,srcB,exponent,dest) \
+    Shiftdouble(Dallp1(srcA),Dallp2(srcB),21,dest); \
+    dest = (unsigned)dest >> 31 - exponent
+
+#define Dbl_isinexact_to_unsigned(dbl_valueA,dbl_valueB,exponent) \
+    Dbl_isinexact_to_fix(dbl_valueA,dbl_valueB,exponent)
+
+#define Duint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB) \
+    Dint_from_dbl_mantissa(dbl_valueA,dbl_valueB,exponent,destA,destB) 
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dbl_float.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/dbl_float.h
new file mode 100644
index 0000000..0c2fa9a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dbl_float.h
@@ -0,0 +1,847 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/* 32-bit word grabbing functions */
+#define Dbl_firstword(value) Dallp1(value)
+#define Dbl_secondword(value) Dallp2(value)
+#define Dbl_thirdword(value) dummy_location
+#define Dbl_fourthword(value) dummy_location
+
+#define Dbl_sign(object) Dsign(object)
+#define Dbl_exponent(object) Dexponent(object)
+#define Dbl_signexponent(object) Dsignexponent(object)
+#define Dbl_mantissap1(object) Dmantissap1(object)
+#define Dbl_mantissap2(object) Dmantissap2(object)
+#define Dbl_exponentmantissap1(object) Dexponentmantissap1(object)
+#define Dbl_allp1(object) Dallp1(object)
+#define Dbl_allp2(object) Dallp2(object)
+
+/* dbl_and_signs ANDs the sign bits of each argument and puts the result
+ * into the first argument. dbl_or_signs ors those same sign bits */
+#define Dbl_and_signs( src1dst, src2)		\
+    Dallp1(src1dst) = (Dallp1(src2)|~((unsigned int)1<<31)) & Dallp1(src1dst)
+#define Dbl_or_signs( src1dst, src2)		\
+    Dallp1(src1dst) = (Dallp1(src2)&((unsigned int)1<<31)) | Dallp1(src1dst)
+
+/* The hidden bit is always the low bit of the exponent */
+#define Dbl_clear_exponent_set_hidden(srcdst) Deposit_dexponent(srcdst,1)
+#define Dbl_clear_signexponent_set_hidden(srcdst) \
+    Deposit_dsignexponent(srcdst,1)
+#define Dbl_clear_sign(srcdst) Dallp1(srcdst) &= ~((unsigned int)1<<31)
+#define Dbl_clear_signexponent(srcdst) \
+    Dallp1(srcdst) &= Dmantissap1((unsigned int)-1)
+
+/* Exponent field for doubles has already been cleared and may be
+ * included in the shift.  Here we need to generate two double width
+ * variable shifts.  The insignificant bits can be ignored.
+ *      MTSAR f(varamount)
+ *      VSHD	srcdst.high,srcdst.low => srcdst.low
+ *	VSHD	0,srcdst.high => srcdst.high 
+ * This is very difficult to model with C expressions since the shift amount
+ * could exceed 32.  */
+/* varamount must be less than 64 */
+#define Dbl_rightshift(srcdstA, srcdstB, varamount)			\
+    {if((varamount) >= 32) {						\
+        Dallp2(srcdstB) = Dallp1(srcdstA) >> (varamount-32);		\
+        Dallp1(srcdstA)=0;						\
+    }									\
+    else if(varamount > 0) {						\
+	Variable_shift_double(Dallp1(srcdstA), Dallp2(srcdstB), 	\
+	  (varamount), Dallp2(srcdstB));				\
+	Dallp1(srcdstA) >>= varamount;					\
+    } }
+/* varamount must be less than 64 */
+#define Dbl_rightshift_exponentmantissa(srcdstA, srcdstB, varamount)	\
+    {if((varamount) >= 32) {						\
+        Dallp2(srcdstB) = Dexponentmantissap1(srcdstA) >> (varamount-32); \
+	Dallp1(srcdstA) &= ((unsigned int)1<<31);  /* clear expmant field */ \
+    }									\
+    else if(varamount > 0) {						\
+	Variable_shift_double(Dexponentmantissap1(srcdstA), Dallp2(srcdstB), \
+	(varamount), Dallp2(srcdstB));					\
+	Deposit_dexponentmantissap1(srcdstA,				\
+	    (Dexponentmantissap1(srcdstA)>>varamount));			\
+    } }
+/* varamount must be less than 64 */
+#define Dbl_leftshift(srcdstA, srcdstB, varamount)			\
+    {if((varamount) >= 32) {						\
+	Dallp1(srcdstA) = Dallp2(srcdstB) << (varamount-32);		\
+	Dallp2(srcdstB)=0;						\
+    }									\
+    else {								\
+	if ((varamount) > 0) {						\
+	    Dallp1(srcdstA) = (Dallp1(srcdstA) << (varamount)) |	\
+		(Dallp2(srcdstB) >> (32-(varamount)));			\
+	    Dallp2(srcdstB) <<= varamount;				\
+	}								\
+    } }
+#define Dbl_leftshiftby1_withextent(lefta,leftb,right,resulta,resultb)	\
+    Shiftdouble(Dallp1(lefta), Dallp2(leftb), 31, Dallp1(resulta));	\
+    Shiftdouble(Dallp2(leftb), Extall(right), 31, Dallp2(resultb)) 
+    
+#define Dbl_rightshiftby1_withextent(leftb,right,dst)		\
+    Extall(dst) = (Dallp2(leftb) << 31) | ((unsigned int)Extall(right) >> 1) | \
+		  Extlow(right)
+
+#define Dbl_arithrightshiftby1(srcdstA,srcdstB)			\
+    Shiftdouble(Dallp1(srcdstA),Dallp2(srcdstB),1,Dallp2(srcdstB));\
+    Dallp1(srcdstA) = (int)Dallp1(srcdstA) >> 1
+   
+/* Sign extend the sign bit with an integer destination */
+#define Dbl_signextendedsign(value)  Dsignedsign(value)
+
+#define Dbl_isone_hidden(dbl_value) (Is_dhidden(dbl_value)!=0)
+/* Singles and doubles may include the sign and exponent fields.  The
+ * hidden bit and the hidden overflow must be included. */
+#define Dbl_increment(dbl_valueA,dbl_valueB) \
+    if( (Dallp2(dbl_valueB) += 1) == 0 )  Dallp1(dbl_valueA) += 1
+#define Dbl_increment_mantissa(dbl_valueA,dbl_valueB) \
+    if( (Dmantissap2(dbl_valueB) += 1) == 0 )  \
+    Deposit_dmantissap1(dbl_valueA,dbl_valueA+1)
+#define Dbl_decrement(dbl_valueA,dbl_valueB) \
+    if( Dallp2(dbl_valueB) == 0 )  Dallp1(dbl_valueA) -= 1; \
+    Dallp2(dbl_valueB) -= 1
+
+#define Dbl_isone_sign(dbl_value) (Is_dsign(dbl_value)!=0)
+#define Dbl_isone_hiddenoverflow(dbl_value) (Is_dhiddenoverflow(dbl_value)!=0)
+#define Dbl_isone_lowmantissap1(dbl_valueA) (Is_dlowp1(dbl_valueA)!=0)
+#define Dbl_isone_lowmantissap2(dbl_valueB) (Is_dlowp2(dbl_valueB)!=0)
+#define Dbl_isone_signaling(dbl_value) (Is_dsignaling(dbl_value)!=0)
+#define Dbl_is_signalingnan(dbl_value) (Dsignalingnan(dbl_value)==0xfff)
+#define Dbl_isnotzero(dbl_valueA,dbl_valueB) \
+    (Dallp1(dbl_valueA) || Dallp2(dbl_valueB))
+#define Dbl_isnotzero_hiddenhigh7mantissa(dbl_value) \
+    (Dhiddenhigh7mantissa(dbl_value)!=0)
+#define Dbl_isnotzero_exponent(dbl_value) (Dexponent(dbl_value)!=0)
+#define Dbl_isnotzero_mantissa(dbl_valueA,dbl_valueB) \
+    (Dmantissap1(dbl_valueA) || Dmantissap2(dbl_valueB))
+#define Dbl_isnotzero_mantissap1(dbl_valueA) (Dmantissap1(dbl_valueA)!=0)
+#define Dbl_isnotzero_mantissap2(dbl_valueB) (Dmantissap2(dbl_valueB)!=0)
+#define Dbl_isnotzero_exponentmantissa(dbl_valueA,dbl_valueB) \
+    (Dexponentmantissap1(dbl_valueA) || Dmantissap2(dbl_valueB))
+#define Dbl_isnotzero_low4p2(dbl_value) (Dlow4p2(dbl_value)!=0)
+#define Dbl_iszero(dbl_valueA,dbl_valueB) (Dallp1(dbl_valueA)==0 && \
+    Dallp2(dbl_valueB)==0)
+#define Dbl_iszero_allp1(dbl_value) (Dallp1(dbl_value)==0)
+#define Dbl_iszero_allp2(dbl_value) (Dallp2(dbl_value)==0)
+#define Dbl_iszero_hidden(dbl_value) (Is_dhidden(dbl_value)==0)
+#define Dbl_iszero_hiddenoverflow(dbl_value) (Is_dhiddenoverflow(dbl_value)==0)
+#define Dbl_iszero_hiddenhigh3mantissa(dbl_value) \
+    (Dhiddenhigh3mantissa(dbl_value)==0)
+#define Dbl_iszero_hiddenhigh7mantissa(dbl_value) \
+    (Dhiddenhigh7mantissa(dbl_value)==0)
+#define Dbl_iszero_sign(dbl_value) (Is_dsign(dbl_value)==0)
+#define Dbl_iszero_exponent(dbl_value) (Dexponent(dbl_value)==0)
+#define Dbl_iszero_mantissa(dbl_valueA,dbl_valueB) \
+    (Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_iszero_exponentmantissa(dbl_valueA,dbl_valueB) \
+    (Dexponentmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_isinfinity_exponent(dbl_value)		\
+    (Dexponent(dbl_value)==DBL_INFINITY_EXPONENT)
+#define Dbl_isnotinfinity_exponent(dbl_value)		\
+    (Dexponent(dbl_value)!=DBL_INFINITY_EXPONENT)
+#define Dbl_isinfinity(dbl_valueA,dbl_valueB)			\
+    (Dexponent(dbl_valueA)==DBL_INFINITY_EXPONENT &&	\
+    Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0)
+#define Dbl_isnan(dbl_valueA,dbl_valueB)		\
+    (Dexponent(dbl_valueA)==DBL_INFINITY_EXPONENT &&	\
+    (Dmantissap1(dbl_valueA)!=0 || Dmantissap2(dbl_valueB)!=0))
+#define Dbl_isnotnan(dbl_valueA,dbl_valueB)		\
+    (Dexponent(dbl_valueA)!=DBL_INFINITY_EXPONENT ||	\
+    (Dmantissap1(dbl_valueA)==0 && Dmantissap2(dbl_valueB)==0))
+
+#define Dbl_islessthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b)	\
+    (Dallp1(dbl_op1a) < Dallp1(dbl_op2a) ||			\
+     (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) &&			\
+      Dallp2(dbl_op1b) < Dallp2(dbl_op2b)))
+#define Dbl_isgreaterthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b)	\
+    (Dallp1(dbl_op1a) > Dallp1(dbl_op2a) ||			\
+     (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) &&			\
+      Dallp2(dbl_op1b) > Dallp2(dbl_op2b)))
+#define Dbl_isnotlessthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b)	\
+    (Dallp1(dbl_op1a) > Dallp1(dbl_op2a) ||			\
+     (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) &&			\
+      Dallp2(dbl_op1b) >= Dallp2(dbl_op2b)))
+#define Dbl_isnotgreaterthan(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b) \
+    (Dallp1(dbl_op1a) < Dallp1(dbl_op2a) ||			\
+     (Dallp1(dbl_op1a) == Dallp1(dbl_op2a) &&			\
+      Dallp2(dbl_op1b) <= Dallp2(dbl_op2b)))
+#define Dbl_isequal(dbl_op1a,dbl_op1b,dbl_op2a,dbl_op2b)	\
+     ((Dallp1(dbl_op1a) == Dallp1(dbl_op2a)) &&			\
+      (Dallp2(dbl_op1b) == Dallp2(dbl_op2b)))
+
+#define Dbl_leftshiftby8(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),24,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 8
+#define Dbl_leftshiftby7(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),25,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 7
+#define Dbl_leftshiftby4(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),28,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 4
+#define Dbl_leftshiftby3(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),29,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 3
+#define Dbl_leftshiftby2(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),30,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 2
+#define Dbl_leftshiftby1(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),31,Dallp1(dbl_valueA)); \
+    Dallp2(dbl_valueB) <<= 1
+
+#define Dbl_rightshiftby8(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),8,Dallp2(dbl_valueB)); \
+    Dallp1(dbl_valueA) >>= 8
+#define Dbl_rightshiftby4(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),4,Dallp2(dbl_valueB)); \
+    Dallp1(dbl_valueA) >>= 4
+#define Dbl_rightshiftby2(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),2,Dallp2(dbl_valueB)); \
+    Dallp1(dbl_valueA) >>= 2
+#define Dbl_rightshiftby1(dbl_valueA,dbl_valueB) \
+    Shiftdouble(Dallp1(dbl_valueA),Dallp2(dbl_valueB),1,Dallp2(dbl_valueB)); \
+    Dallp1(dbl_valueA) >>= 1
+    
+/* This magnitude comparison uses the signless first words and
+ * the regular part2 words.  The comparison is graphically:
+ *
+ *       1st greater?  -------------
+ *                                 |
+ *       1st less?-----------------+---------
+ *                                 |        |
+ *       2nd greater or equal----->|        |
+ *                               False     True
+ */
+#define Dbl_ismagnitudeless(leftB,rightB,signlessleft,signlessright)	\
+      ((signlessleft <= signlessright) &&				\
+       ( (signlessleft < signlessright) || (Dallp2(leftB)<Dallp2(rightB)) ))
+    
+#define Dbl_copytoint_exponentmantissap1(src,dest) \
+    dest = Dexponentmantissap1(src)
+
+/* A quiet NaN has the high mantissa bit clear and at least on other (in this
+ * case the adjacent bit) bit set. */
+#define Dbl_set_quiet(dbl_value) Deposit_dhigh2mantissa(dbl_value,1)
+#define Dbl_set_exponent(dbl_value, exp) Deposit_dexponent(dbl_value,exp)
+
+#define Dbl_set_mantissa(desta,destb,valuea,valueb)	\
+    Deposit_dmantissap1(desta,valuea);			\
+    Dmantissap2(destb) = Dmantissap2(valueb)
+#define Dbl_set_mantissap1(desta,valuea)		\
+    Deposit_dmantissap1(desta,valuea)
+#define Dbl_set_mantissap2(destb,valueb)		\
+    Dmantissap2(destb) = Dmantissap2(valueb)
+
+#define Dbl_set_exponentmantissa(desta,destb,valuea,valueb)	\
+    Deposit_dexponentmantissap1(desta,valuea);			\
+    Dmantissap2(destb) = Dmantissap2(valueb)
+#define Dbl_set_exponentmantissap1(dest,value)			\
+    Deposit_dexponentmantissap1(dest,value)
+
+#define Dbl_copyfromptr(src,desta,destb) \
+    Dallp1(desta) = src->wd0;		\
+    Dallp2(destb) = src->wd1 
+#define Dbl_copytoptr(srca,srcb,dest)	\
+    dest->wd0 = Dallp1(srca);		\
+    dest->wd1 = Dallp2(srcb)
+
+/*  An infinity is represented with the max exponent and a zero mantissa */
+#define Dbl_setinfinity_exponent(dbl_value) \
+    Deposit_dexponent(dbl_value,DBL_INFINITY_EXPONENT)
+#define Dbl_setinfinity_exponentmantissa(dbl_valueA,dbl_valueB)	\
+    Deposit_dexponentmantissap1(dbl_valueA, 			\
+    (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH))));	\
+    Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinitypositive(dbl_valueA,dbl_valueB)		\
+    Dallp1(dbl_valueA) 						\
+        = (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH)));	\
+    Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinitynegative(dbl_valueA,dbl_valueB)		\
+    Dallp1(dbl_valueA) = ((unsigned int)1<<31) |		\
+         (DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH)));	\
+    Dmantissap2(dbl_valueB) = 0
+#define Dbl_setinfinity(dbl_valueA,dbl_valueB,sign)		\
+    Dallp1(dbl_valueA) = ((unsigned int)sign << 31) | 		\
+	(DBL_INFINITY_EXPONENT << (32-(1+DBL_EXP_LENGTH)));	\
+    Dmantissap2(dbl_valueB) = 0
+
+#define Dbl_sethigh4bits(dbl_value, extsign) Deposit_dhigh4p1(dbl_value,extsign)
+#define Dbl_set_sign(dbl_value,sign) Deposit_dsign(dbl_value,sign)
+#define Dbl_invert_sign(dbl_value) Deposit_dsign(dbl_value,~Dsign(dbl_value))
+#define Dbl_setone_sign(dbl_value) Deposit_dsign(dbl_value,1)
+#define Dbl_setone_lowmantissap2(dbl_value) Deposit_dlowp2(dbl_value,1)
+#define Dbl_setzero_sign(dbl_value) Dallp1(dbl_value) &= 0x7fffffff
+#define Dbl_setzero_exponent(dbl_value) 		\
+    Dallp1(dbl_value) &= 0x800fffff
+#define Dbl_setzero_mantissa(dbl_valueA,dbl_valueB)	\
+    Dallp1(dbl_valueA) &= 0xfff00000; 			\
+    Dallp2(dbl_valueB) = 0
+#define Dbl_setzero_mantissap1(dbl_value) Dallp1(dbl_value) &= 0xfff00000
+#define Dbl_setzero_mantissap2(dbl_value) Dallp2(dbl_value) = 0
+#define Dbl_setzero_exponentmantissa(dbl_valueA,dbl_valueB)	\
+    Dallp1(dbl_valueA) &= 0x80000000;		\
+    Dallp2(dbl_valueB) = 0
+#define Dbl_setzero_exponentmantissap1(dbl_valueA)	\
+    Dallp1(dbl_valueA) &= 0x80000000
+#define Dbl_setzero(dbl_valueA,dbl_valueB) \
+    Dallp1(dbl_valueA) = 0; Dallp2(dbl_valueB) = 0
+#define Dbl_setzerop1(dbl_value) Dallp1(dbl_value) = 0
+#define Dbl_setzerop2(dbl_value) Dallp2(dbl_value) = 0
+#define Dbl_setnegativezero(dbl_value) \
+    Dallp1(dbl_value) = (unsigned int)1 << 31; Dallp2(dbl_value) = 0
+#define Dbl_setnegativezerop1(dbl_value) Dallp1(dbl_value) = (unsigned int)1<<31
+
+/* Use the following macro for both overflow & underflow conditions */
+#define ovfl -
+#define unfl +
+#define Dbl_setwrapped_exponent(dbl_value,exponent,op) \
+    Deposit_dexponent(dbl_value,(exponent op DBL_WRAP))
+
+#define Dbl_setlargestpositive(dbl_valueA,dbl_valueB) 			\
+    Dallp1(dbl_valueA) = ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) \
+			| ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 );		\
+    Dallp2(dbl_valueB) = 0xFFFFFFFF
+#define Dbl_setlargestnegative(dbl_valueA,dbl_valueB) 			\
+    Dallp1(dbl_valueA) = ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) \
+			| ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 )		\
+			| ((unsigned int)1<<31);			\
+    Dallp2(dbl_valueB) = 0xFFFFFFFF
+#define Dbl_setlargest_exponentmantissa(dbl_valueA,dbl_valueB)		\
+    Deposit_dexponentmantissap1(dbl_valueA,				\
+	(((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH)))		\
+			| ((1<<(32-(1+DBL_EXP_LENGTH))) - 1 )));	\
+    Dallp2(dbl_valueB) = 0xFFFFFFFF
+
+#define Dbl_setnegativeinfinity(dbl_valueA,dbl_valueB) 			\
+    Dallp1(dbl_valueA) = ((1<<DBL_EXP_LENGTH) | DBL_INFINITY_EXPONENT) 	\
+			 << (32-(1+DBL_EXP_LENGTH)) ; 			\
+    Dallp2(dbl_valueB) = 0
+#define Dbl_setlargest(dbl_valueA,dbl_valueB,sign)			\
+    Dallp1(dbl_valueA) = ((unsigned int)sign << 31) |			\
+         ((DBL_EMAX+DBL_BIAS) << (32-(1+DBL_EXP_LENGTH))) |	 	\
+	 ((1 << (32-(1+DBL_EXP_LENGTH))) - 1 );				\
+    Dallp2(dbl_valueB) = 0xFFFFFFFF
+    
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Dbl_right_align(srcdstA,srcdstB,shift,extent)			\
+    if( shift >= 32 ) 							\
+	{								\
+	/* Big shift requires examining the portion shift off 		\
+	the end to properly set inexact.  */				\
+	if(shift < 64)							\
+	    {								\
+	    if(shift > 32)						\
+		{							\
+	        Variable_shift_double(Dallp1(srcdstA),Dallp2(srcdstB),	\
+		 shift-32, Extall(extent));				\
+	        if(Dallp2(srcdstB) << 64 - (shift)) Ext_setone_low(extent); \
+	        }							\
+	    else Extall(extent) = Dallp2(srcdstB);			\
+	    Dallp2(srcdstB) = Dallp1(srcdstA) >> (shift - 32);		\
+	    }								\
+	else								\
+	    {								\
+	    Extall(extent) = Dallp1(srcdstA);				\
+	    if(Dallp2(srcdstB)) Ext_setone_low(extent);			\
+	    Dallp2(srcdstB) = 0;					\
+	    }								\
+	Dallp1(srcdstA) = 0;						\
+	}								\
+    else								\
+	{								\
+	/* Small alignment is simpler.  Extension is easily set. */	\
+	if (shift > 0)							\
+	    {								\
+	    Extall(extent) = Dallp2(srcdstB) << 32 - (shift);		\
+	    Variable_shift_double(Dallp1(srcdstA),Dallp2(srcdstB),shift, \
+	     Dallp2(srcdstB));						\
+	    Dallp1(srcdstA) >>= shift;					\
+	    }								\
+	else Extall(extent) = 0;					\
+	}
+
+/* 
+ * Here we need to shift the result right to correct for an overshift
+ * (due to the exponent becoming negative) during normalization.
+ */
+#define Dbl_fix_overshift(srcdstA,srcdstB,shift,extent)			\
+	    Extall(extent) = Dallp2(srcdstB) << 32 - (shift);		\
+	    Dallp2(srcdstB) = (Dallp1(srcdstA) << 32 - (shift)) |	\
+		(Dallp2(srcdstB) >> (shift));				\
+	    Dallp1(srcdstA) = Dallp1(srcdstA) >> shift
+
+#define Dbl_hiddenhigh3mantissa(dbl_value) Dhiddenhigh3mantissa(dbl_value)
+#define Dbl_hidden(dbl_value) Dhidden(dbl_value)
+#define Dbl_lowmantissap2(dbl_value) Dlowp2(dbl_value)
+
+/* The left argument is never smaller than the right argument */
+#define Dbl_subtract(lefta,leftb,righta,rightb,resulta,resultb)			\
+    if( Dallp2(rightb) > Dallp2(leftb) ) Dallp1(lefta)--;	\
+    Dallp2(resultb) = Dallp2(leftb) - Dallp2(rightb);		\
+    Dallp1(resulta) = Dallp1(lefta) - Dallp1(righta)
+
+/* Subtract right augmented with extension from left augmented with zeros and
+ * store into result and extension. */
+#define Dbl_subtract_withextension(lefta,leftb,righta,rightb,extent,resulta,resultb)	\
+    Dbl_subtract(lefta,leftb,righta,rightb,resulta,resultb);		\
+    if( (Extall(extent) = 0-Extall(extent)) )				\
+        {								\
+        if((Dallp2(resultb)--) == 0) Dallp1(resulta)--;			\
+        }
+
+#define Dbl_addition(lefta,leftb,righta,rightb,resulta,resultb)		\
+    /* If the sum of the low words is less than either source, then	\
+     * an overflow into the next word occurred. */			\
+    Dallp1(resulta) = Dallp1(lefta) + Dallp1(righta);			\
+    if((Dallp2(resultb) = Dallp2(leftb) + Dallp2(rightb)) < Dallp2(rightb)) \
+	Dallp1(resulta)++
+
+#define Dbl_xortointp1(left,right,result)			\
+    result = Dallp1(left) XOR Dallp1(right)
+
+#define Dbl_xorfromintp1(left,right,result)			\
+    Dallp1(result) = left XOR Dallp1(right)
+
+#define Dbl_swap_lower(left,right)				\
+    Dallp2(left)  = Dallp2(left) XOR Dallp2(right);		\
+    Dallp2(right) = Dallp2(left) XOR Dallp2(right);		\
+    Dallp2(left)  = Dallp2(left) XOR Dallp2(right)
+
+/* Need to Initialize */
+#define Dbl_makequietnan(desta,destb)					\
+    Dallp1(desta) = ((DBL_EMAX+DBL_BIAS)+1)<< (32-(1+DBL_EXP_LENGTH))	\
+                 | (1<<(32-(1+DBL_EXP_LENGTH+2)));			\
+    Dallp2(destb) = 0
+#define Dbl_makesignalingnan(desta,destb)				\
+    Dallp1(desta) = ((DBL_EMAX+DBL_BIAS)+1)<< (32-(1+DBL_EXP_LENGTH))	\
+                 | (1<<(32-(1+DBL_EXP_LENGTH+1)));			\
+    Dallp2(destb) = 0
+
+#define Dbl_normalize(dbl_opndA,dbl_opndB,exponent)			\
+	while(Dbl_iszero_hiddenhigh7mantissa(dbl_opndA)) {		\
+		Dbl_leftshiftby8(dbl_opndA,dbl_opndB);			\
+		exponent -= 8;						\
+	}								\
+	if(Dbl_iszero_hiddenhigh3mantissa(dbl_opndA)) {			\
+		Dbl_leftshiftby4(dbl_opndA,dbl_opndB);			\
+		exponent -= 4;						\
+	}								\
+	while(Dbl_iszero_hidden(dbl_opndA)) {				\
+		Dbl_leftshiftby1(dbl_opndA,dbl_opndB);			\
+		exponent -= 1;						\
+	}
+
+#define Twoword_add(src1dstA,src1dstB,src2A,src2B)		\
+	/* 							\
+	 * want this macro to generate:				\
+	 *	ADD	src1dstB,src2B,src1dstB;		\
+	 *	ADDC	src1dstA,src2A,src1dstA;		\
+	 */							\
+	if ((src1dstB) + (src2B) < (src1dstB)) Dallp1(src1dstA)++; \
+	Dallp1(src1dstA) += (src2A);				\
+	Dallp2(src1dstB) += (src2B)
+
+#define Twoword_subtract(src1dstA,src1dstB,src2A,src2B)		\
+	/* 							\
+	 * want this macro to generate:				\
+	 *	SUB	src1dstB,src2B,src1dstB;		\
+	 *	SUBB	src1dstA,src2A,src1dstA;		\
+	 */							\
+	if ((src1dstB) < (src2B)) Dallp1(src1dstA)--;		\
+	Dallp1(src1dstA) -= (src2A);				\
+	Dallp2(src1dstB) -= (src2B)
+
+#define Dbl_setoverflow(resultA,resultB)				\
+	/* set result to infinity or largest number */			\
+	switch (Rounding_mode()) {					\
+		case ROUNDPLUS:						\
+			if (Dbl_isone_sign(resultA)) {			\
+				Dbl_setlargestnegative(resultA,resultB); \
+			}						\
+			else {						\
+				Dbl_setinfinitypositive(resultA,resultB); \
+			}						\
+			break;						\
+		case ROUNDMINUS:					\
+			if (Dbl_iszero_sign(resultA)) {			\
+				Dbl_setlargestpositive(resultA,resultB); \
+			}						\
+			else {						\
+				Dbl_setinfinitynegative(resultA,resultB); \
+			}						\
+			break;						\
+		case ROUNDNEAREST:					\
+			Dbl_setinfinity_exponentmantissa(resultA,resultB); \
+			break;						\
+		case ROUNDZERO:						\
+			Dbl_setlargest_exponentmantissa(resultA,resultB); \
+	}
+
+#define Dbl_denormalize(opndp1,opndp2,exponent,guard,sticky,inexact)	\
+    Dbl_clear_signexponent_set_hidden(opndp1);				\
+    if (exponent >= (1-DBL_P)) {					\
+	if (exponent >= -31) {						\
+	    guard = (Dallp2(opndp2) >> -exponent) & 1;			\
+	    if (exponent < 0) sticky |= Dallp2(opndp2) << (32+exponent); \
+	    if (exponent > -31) {					\
+		Variable_shift_double(opndp1,opndp2,1-exponent,opndp2);	\
+		Dallp1(opndp1) >>= 1-exponent;				\
+	    }								\
+	    else {							\
+		Dallp2(opndp2) = Dallp1(opndp1);			\
+		Dbl_setzerop1(opndp1);					\
+	    }								\
+	}								\
+	else {								\
+	    guard = (Dallp1(opndp1) >> -32-exponent) & 1;		\
+	    if (exponent == -32) sticky |= Dallp2(opndp2);		\
+	    else sticky |= (Dallp2(opndp2) | Dallp1(opndp1) << 64+exponent); \
+	    Dallp2(opndp2) = Dallp1(opndp1) >> -31-exponent;		\
+	    Dbl_setzerop1(opndp1);					\
+	}								\
+	inexact = guard | sticky;					\
+    }									\
+    else {								\
+	guard = 0;							\
+	sticky |= (Dallp1(opndp1) | Dallp2(opndp2));			\
+	Dbl_setzero(opndp1,opndp2);					\
+	inexact = sticky;						\
+    }
+
+/* 
+ * The fused multiply add instructions requires a double extended format,
+ * with 106 bits of mantissa.
+ */
+#define DBLEXT_THRESHOLD 106
+
+#define Dblext_setzero(valA,valB,valC,valD)	\
+    Dextallp1(valA) = 0; Dextallp2(valB) = 0;	\
+    Dextallp3(valC) = 0; Dextallp4(valD) = 0
+
+
+#define Dblext_isnotzero_mantissap3(valC) (Dextallp3(valC)!=0)
+#define Dblext_isnotzero_mantissap4(valD) (Dextallp3(valD)!=0)
+#define Dblext_isone_lowp2(val) (Dextlowp2(val)!=0)
+#define Dblext_isone_highp3(val) (Dexthighp3(val)!=0)
+#define Dblext_isnotzero_low31p3(val) (Dextlow31p3(val)!=0)
+#define Dblext_iszero(valA,valB,valC,valD) (Dextallp1(valA)==0 && \
+    Dextallp2(valB)==0 && Dextallp3(valC)==0 && Dextallp4(valD)==0)
+
+#define Dblext_copy(srca,srcb,srcc,srcd,desta,destb,destc,destd) \
+    Dextallp1(desta) = Dextallp4(srca);	\
+    Dextallp2(destb) = Dextallp4(srcb);	\
+    Dextallp3(destc) = Dextallp4(srcc);	\
+    Dextallp4(destd) = Dextallp4(srcd)
+
+#define Dblext_swap_lower(leftp2,leftp3,leftp4,rightp2,rightp3,rightp4)  \
+    Dextallp2(leftp2)  = Dextallp2(leftp2) XOR Dextallp2(rightp2);  \
+    Dextallp2(rightp2) = Dextallp2(leftp2) XOR Dextallp2(rightp2);  \
+    Dextallp2(leftp2)  = Dextallp2(leftp2) XOR Dextallp2(rightp2);  \
+    Dextallp3(leftp3)  = Dextallp3(leftp3) XOR Dextallp3(rightp3);  \
+    Dextallp3(rightp3) = Dextallp3(leftp3) XOR Dextallp3(rightp3);  \
+    Dextallp3(leftp3)  = Dextallp3(leftp3) XOR Dextallp3(rightp3);  \
+    Dextallp4(leftp4)  = Dextallp4(leftp4) XOR Dextallp4(rightp4);  \
+    Dextallp4(rightp4) = Dextallp4(leftp4) XOR Dextallp4(rightp4);  \
+    Dextallp4(leftp4)  = Dextallp4(leftp4) XOR Dextallp4(rightp4)
+
+#define Dblext_setone_lowmantissap4(dbl_value) Deposit_dextlowp4(dbl_value,1)
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Dblext_right_align(srcdstA,srcdstB,srcdstC,srcdstD,shift) \
+  {int shiftamt, sticky;						\
+    shiftamt = shift % 32;						\
+    sticky = 0;								\
+    switch (shift/32) {							\
+     case 0: if (shiftamt > 0) {					\
+	        sticky = Dextallp4(srcdstD) << 32 - (shiftamt); 	\
+                Variable_shift_double(Dextallp3(srcdstC),		\
+		 Dextallp4(srcdstD),shiftamt,Dextallp4(srcdstD));	\
+                Variable_shift_double(Dextallp2(srcdstB),		\
+		 Dextallp3(srcdstC),shiftamt,Dextallp3(srcdstC));	\
+                Variable_shift_double(Dextallp1(srcdstA),		\
+		 Dextallp2(srcdstB),shiftamt,Dextallp2(srcdstB));	\
+	        Dextallp1(srcdstA) >>= shiftamt;			\
+	     }								\
+	     break;							\
+     case 1: if (shiftamt > 0) {					\
+                sticky = (Dextallp3(srcdstC) << 31 - shiftamt) |	\
+			 Dextallp4(srcdstD);				\
+                Variable_shift_double(Dextallp2(srcdstB),		\
+		 Dextallp3(srcdstC),shiftamt,Dextallp4(srcdstD));	\
+                Variable_shift_double(Dextallp1(srcdstA),		\
+		 Dextallp2(srcdstB),shiftamt,Dextallp3(srcdstC));	\
+	     }								\
+	     else {							\
+		sticky = Dextallp4(srcdstD);				\
+		Dextallp4(srcdstD) = Dextallp3(srcdstC);		\
+		Dextallp3(srcdstC) = Dextallp2(srcdstB);		\
+	     }								\
+	     Dextallp2(srcdstB) = Dextallp1(srcdstA) >> shiftamt;	\
+	     Dextallp1(srcdstA) = 0;					\
+	     break;							\
+     case 2: if (shiftamt > 0) {					\
+                sticky = (Dextallp2(srcdstB) << 31 - shiftamt) |	\
+			 Dextallp3(srcdstC) | Dextallp4(srcdstD);	\
+                Variable_shift_double(Dextallp1(srcdstA),		\
+		 Dextallp2(srcdstB),shiftamt,Dextallp4(srcdstD));	\
+	     }								\
+	     else {							\
+		sticky = Dextallp3(srcdstC) | Dextallp4(srcdstD);	\
+		Dextallp4(srcdstD) = Dextallp2(srcdstB);		\
+	     }								\
+	     Dextallp3(srcdstC) = Dextallp1(srcdstA) >> shiftamt;	\
+	     Dextallp1(srcdstA) = Dextallp2(srcdstB) = 0;		\
+	     break;							\
+     case 3: if (shiftamt > 0) {					\
+                sticky = (Dextallp1(srcdstA) << 31 - shiftamt) |	\
+			 Dextallp2(srcdstB) | Dextallp3(srcdstC) |	\
+			 Dextallp4(srcdstD);				\
+	     }								\
+	     else {							\
+		sticky = Dextallp2(srcdstB) | Dextallp3(srcdstC) |	\
+		    Dextallp4(srcdstD);					\
+	     }								\
+	     Dextallp4(srcdstD) = Dextallp1(srcdstA) >> shiftamt;	\
+	     Dextallp1(srcdstA) = Dextallp2(srcdstB) = 0;		\
+	     Dextallp3(srcdstC) = 0;					\
+	     break;							\
+    }									\
+    if (sticky) Dblext_setone_lowmantissap4(srcdstD);			\
+  }
+
+/* The left argument is never smaller than the right argument */
+#define Dblext_subtract(lefta,leftb,leftc,leftd,righta,rightb,rightc,rightd,resulta,resultb,resultc,resultd) \
+    if( Dextallp4(rightd) > Dextallp4(leftd) ) 			\
+	if( (Dextallp3(leftc)--) == 0)				\
+	    if( (Dextallp2(leftb)--) == 0) Dextallp1(lefta)--;	\
+    Dextallp4(resultd) = Dextallp4(leftd) - Dextallp4(rightd);	\
+    if( Dextallp3(rightc) > Dextallp3(leftc) ) 			\
+        if( (Dextallp2(leftb)--) == 0) Dextallp1(lefta)--;	\
+    Dextallp3(resultc) = Dextallp3(leftc) - Dextallp3(rightc);	\
+    if( Dextallp2(rightb) > Dextallp2(leftb) ) Dextallp1(lefta)--; \
+    Dextallp2(resultb) = Dextallp2(leftb) - Dextallp2(rightb);	\
+    Dextallp1(resulta) = Dextallp1(lefta) - Dextallp1(righta)
+
+#define Dblext_addition(lefta,leftb,leftc,leftd,righta,rightb,rightc,rightd,resulta,resultb,resultc,resultd) \
+    /* If the sum of the low words is less than either source, then \
+     * an overflow into the next word occurred. */ \
+    if ((Dextallp4(resultd) = Dextallp4(leftd)+Dextallp4(rightd)) < \
+	Dextallp4(rightd)) \
+	if((Dextallp3(resultc) = Dextallp3(leftc)+Dextallp3(rightc)+1) <= \
+	    Dextallp3(rightc)) \
+	    if((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)+1) \
+	        <= Dextallp2(rightb))  \
+		    Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+	    else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+	else \
+	    if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)) < \
+	        Dextallp2(rightb)) \
+		    Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+	    else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+    else \
+	if ((Dextallp3(resultc) = Dextallp3(leftc)+Dextallp3(rightc)) < \
+	    Dextallp3(rightc))  \
+	    if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)+1) \
+	        <= Dextallp2(rightb)) \
+		    Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+	    else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta); \
+	else \
+	    if ((Dextallp2(resultb) = Dextallp2(leftb)+Dextallp2(rightb)) < \
+	        Dextallp2(rightb)) \
+		    Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)+1; \
+	    else Dextallp1(resulta) = Dextallp1(lefta)+Dextallp1(righta)
+
+
+#define Dblext_arithrightshiftby1(srcdstA,srcdstB,srcdstC,srcdstD)	\
+    Shiftdouble(Dextallp3(srcdstC),Dextallp4(srcdstD),1,Dextallp4(srcdstD)); \
+    Shiftdouble(Dextallp2(srcdstB),Dextallp3(srcdstC),1,Dextallp3(srcdstC)); \
+    Shiftdouble(Dextallp1(srcdstA),Dextallp2(srcdstB),1,Dextallp2(srcdstB)); \
+    Dextallp1(srcdstA) = (int)Dextallp1(srcdstA) >> 1
+   
+#define Dblext_leftshiftby8(valA,valB,valC,valD) \
+    Shiftdouble(Dextallp1(valA),Dextallp2(valB),24,Dextallp1(valA)); \
+    Shiftdouble(Dextallp2(valB),Dextallp3(valC),24,Dextallp2(valB)); \
+    Shiftdouble(Dextallp3(valC),Dextallp4(valD),24,Dextallp3(valC)); \
+    Dextallp4(valD) <<= 8
+#define Dblext_leftshiftby4(valA,valB,valC,valD) \
+    Shiftdouble(Dextallp1(valA),Dextallp2(valB),28,Dextallp1(valA)); \
+    Shiftdouble(Dextallp2(valB),Dextallp3(valC),28,Dextallp2(valB)); \
+    Shiftdouble(Dextallp3(valC),Dextallp4(valD),28,Dextallp3(valC)); \
+    Dextallp4(valD) <<= 4
+#define Dblext_leftshiftby3(valA,valB,valC,valD) \
+    Shiftdouble(Dextallp1(valA),Dextallp2(valB),29,Dextallp1(valA)); \
+    Shiftdouble(Dextallp2(valB),Dextallp3(valC),29,Dextallp2(valB)); \
+    Shiftdouble(Dextallp3(valC),Dextallp4(valD),29,Dextallp3(valC)); \
+    Dextallp4(valD) <<= 3
+#define Dblext_leftshiftby2(valA,valB,valC,valD) \
+    Shiftdouble(Dextallp1(valA),Dextallp2(valB),30,Dextallp1(valA)); \
+    Shiftdouble(Dextallp2(valB),Dextallp3(valC),30,Dextallp2(valB)); \
+    Shiftdouble(Dextallp3(valC),Dextallp4(valD),30,Dextallp3(valC)); \
+    Dextallp4(valD) <<= 2
+#define Dblext_leftshiftby1(valA,valB,valC,valD) \
+    Shiftdouble(Dextallp1(valA),Dextallp2(valB),31,Dextallp1(valA)); \
+    Shiftdouble(Dextallp2(valB),Dextallp3(valC),31,Dextallp2(valB)); \
+    Shiftdouble(Dextallp3(valC),Dextallp4(valD),31,Dextallp3(valC)); \
+    Dextallp4(valD) <<= 1
+
+#define Dblext_rightshiftby4(valueA,valueB,valueC,valueD) \
+    Shiftdouble(Dextallp3(valueC),Dextallp4(valueD),4,Dextallp4(valueD)); \
+    Shiftdouble(Dextallp2(valueB),Dextallp3(valueC),4,Dextallp3(valueC)); \
+    Shiftdouble(Dextallp1(valueA),Dextallp2(valueB),4,Dextallp2(valueB)); \
+    Dextallp1(valueA) >>= 4
+#define Dblext_rightshiftby1(valueA,valueB,valueC,valueD) \
+    Shiftdouble(Dextallp3(valueC),Dextallp4(valueD),1,Dextallp4(valueD)); \
+    Shiftdouble(Dextallp2(valueB),Dextallp3(valueC),1,Dextallp3(valueC)); \
+    Shiftdouble(Dextallp1(valueA),Dextallp2(valueB),1,Dextallp2(valueB)); \
+    Dextallp1(valueA) >>= 1
+
+#define Dblext_xortointp1(left,right,result) Dbl_xortointp1(left,right,result)
+
+#define Dblext_xorfromintp1(left,right,result) \
+	Dbl_xorfromintp1(left,right,result)
+
+#define Dblext_copytoint_exponentmantissap1(src,dest) \
+	Dbl_copytoint_exponentmantissap1(src,dest)
+
+#define Dblext_ismagnitudeless(leftB,rightB,signlessleft,signlessright) \
+	Dbl_ismagnitudeless(leftB,rightB,signlessleft,signlessright)
+
+#define Dbl_copyto_dblext(src1,src2,dest1,dest2,dest3,dest4) \
+	Dextallp1(dest1) = Dallp1(src1); Dextallp2(dest2) = Dallp2(src2); \
+	Dextallp3(dest3) = 0; Dextallp4(dest4) = 0
+
+#define Dblext_set_sign(dbl_value,sign)  Dbl_set_sign(dbl_value,sign)  
+#define Dblext_clear_signexponent_set_hidden(srcdst) \
+	Dbl_clear_signexponent_set_hidden(srcdst) 
+#define Dblext_clear_signexponent(srcdst) Dbl_clear_signexponent(srcdst) 
+#define Dblext_clear_sign(srcdst) Dbl_clear_sign(srcdst) 
+#define Dblext_isone_hidden(dbl_value) Dbl_isone_hidden(dbl_value) 
+
+/*
+ * The Fourword_add() macro assumes that integers are 4 bytes in size.
+ * It will break if this is not the case.
+ */
+
+#define Fourword_add(src1dstA,src1dstB,src1dstC,src1dstD,src2A,src2B,src2C,src2D) \
+	/* 								\
+	 * want this macro to generate:					\
+	 *	ADD	src1dstD,src2D,src1dstD;			\
+	 *	ADDC	src1dstC,src2C,src1dstC;			\
+	 *	ADDC	src1dstB,src2B,src1dstB;			\
+	 *	ADDC	src1dstA,src2A,src1dstA;			\
+	 */								\
+	if ((unsigned int)(src1dstD += (src2D)) < (unsigned int)(src2D)) { \
+	   if ((unsigned int)(src1dstC += (src2C) + 1) <=		\
+	       (unsigned int)(src2C)) {					\
+	     if ((unsigned int)(src1dstB += (src2B) + 1) <=		\
+		 (unsigned int)(src2B)) src1dstA++;			\
+	   }								\
+	   else if ((unsigned int)(src1dstB += (src2B)) < 		\
+		    (unsigned int)(src2B)) src1dstA++;			\
+	}								\
+	else {								\
+	   if ((unsigned int)(src1dstC += (src2C)) <			\
+	       (unsigned int)(src2C)) {					\
+	      if ((unsigned int)(src1dstB += (src2B) + 1) <=		\
+		  (unsigned int)(src2B)) src1dstA++;			\
+	   }								\
+	   else if ((unsigned int)(src1dstB += (src2B)) <		\
+		    (unsigned int)(src2B)) src1dstA++;			\
+	}								\
+	src1dstA += (src2A)
+
+#define Dblext_denormalize(opndp1,opndp2,opndp3,opndp4,exponent,is_tiny) \
+  {int shiftamt, sticky;						\
+    is_tiny = TRUE;							\
+    if (exponent == 0 && (Dextallp3(opndp3) || Dextallp4(opndp4))) {	\
+	switch (Rounding_mode()) {					\
+	case ROUNDPLUS:							\
+		if (Dbl_iszero_sign(opndp1)) {				\
+			Dbl_increment(opndp1,opndp2);			\
+			if (Dbl_isone_hiddenoverflow(opndp1))		\
+				is_tiny = FALSE;			\
+			Dbl_decrement(opndp1,opndp2);			\
+		}							\
+		break;							\
+	case ROUNDMINUS:						\
+		if (Dbl_isone_sign(opndp1)) {				\
+			Dbl_increment(opndp1,opndp2);			\
+			if (Dbl_isone_hiddenoverflow(opndp1))		\
+				is_tiny = FALSE;			\
+			Dbl_decrement(opndp1,opndp2);			\
+		}							\
+		break;							\
+	case ROUNDNEAREST:						\
+		if (Dblext_isone_highp3(opndp3) &&			\
+		    (Dblext_isone_lowp2(opndp2) || 			\
+		     Dblext_isnotzero_low31p3(opndp3)))	{		\
+			Dbl_increment(opndp1,opndp2);			\
+			if (Dbl_isone_hiddenoverflow(opndp1))		\
+				is_tiny = FALSE;			\
+			Dbl_decrement(opndp1,opndp2);			\
+		}							\
+		break;							\
+	}								\
+    }									\
+    Dblext_clear_signexponent_set_hidden(opndp1);			\
+    if (exponent >= (1-QUAD_P)) {					\
+	shiftamt = (1-exponent) % 32;					\
+	switch((1-exponent)/32) {					\
+	  case 0: sticky = Dextallp4(opndp4) << 32-(shiftamt);		\
+		  Variableshiftdouble(opndp3,opndp4,shiftamt,opndp4);	\
+		  Variableshiftdouble(opndp2,opndp3,shiftamt,opndp3);	\
+		  Variableshiftdouble(opndp1,opndp2,shiftamt,opndp2);	\
+		  Dextallp1(opndp1) >>= shiftamt;			\
+		  break;						\
+	  case 1: sticky = (Dextallp3(opndp3) << 32-(shiftamt)) | 	\
+			   Dextallp4(opndp4);				\
+		  Variableshiftdouble(opndp2,opndp3,shiftamt,opndp4);	\
+		  Variableshiftdouble(opndp1,opndp2,shiftamt,opndp3);	\
+		  Dextallp2(opndp2) = Dextallp1(opndp1) >> shiftamt;	\
+		  Dextallp1(opndp1) = 0;				\
+		  break;						\
+	  case 2: sticky = (Dextallp2(opndp2) << 32-(shiftamt)) |	\
+			    Dextallp3(opndp3) | Dextallp4(opndp4);	\
+		  Variableshiftdouble(opndp1,opndp2,shiftamt,opndp4);	\
+		  Dextallp3(opndp3) = Dextallp1(opndp1) >> shiftamt;	\
+		  Dextallp1(opndp1) = Dextallp2(opndp2) = 0;		\
+		  break;						\
+	  case 3: sticky = (Dextallp1(opndp1) << 32-(shiftamt)) |	\
+		  	Dextallp2(opndp2) | Dextallp3(opndp3) | 	\
+			Dextallp4(opndp4);				\
+		  Dextallp4(opndp4) = Dextallp1(opndp1) >> shiftamt;	\
+		  Dextallp1(opndp1) = Dextallp2(opndp2) = 0;		\
+		  Dextallp3(opndp3) = 0;				\
+		  break;						\
+	}								\
+    }									\
+    else {								\
+	sticky = Dextallp1(opndp1) | Dextallp2(opndp2) |		\
+		 Dextallp3(opndp3) | Dextallp4(opndp4);			\
+	Dblext_setzero(opndp1,opndp2,opndp3,opndp4);			\
+    }									\
+    if (sticky) Dblext_setone_lowmantissap4(opndp4);			\
+    exponent = 0;							\
+  }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/decode_exc.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/decode_exc.c
new file mode 100644
index 0000000..04e550e
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/decode_exc.c
@@ -0,0 +1,370 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/fp/decode_exc.c		$ Revision: $
+ *
+ *  Purpose:
+ *	<<please update with a synopsis of the functionality provided by this file>>
+ *
+ *  External Interfaces:
+ *	<<the following list was autogenerated, please review>>
+ *	decode_fpu(Fpu_register, trap_counts)
+ *
+ *  Internal Interfaces:
+ *	<<please update>>
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+#include <linux/kernel.h>
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+/* #include "types.h" */
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+/* #include <machine/sys/mdep_private.h> */
+
+#undef Fpustatus_register
+#define Fpustatus_register Fpu_register[0]
+
+/* General definitions */
+#define DOESTRAP 1
+#define NOTRAP 0
+#define SIGNALCODE(signal, code) ((signal) << 24 | (code))
+#define copropbit	1<<31-2	/* bit position 2 */
+#define opclass		9	/* bits 21 & 22 */
+#define fmt		11	/* bits 19 & 20 */
+#define df		13	/* bits 17 & 18 */
+#define twobits		3	/* mask low-order 2 bits */
+#define fivebits	31	/* mask low-order 5 bits */
+#define MAX_EXCP_REG	7	/* number of excpeption registers to check */
+
+/* Exception register definitions */
+#define Excp_type(index) Exceptiontype(Fpu_register[index])
+#define Excp_instr(index) Instructionfield(Fpu_register[index])
+#define Clear_excp_register(index) Allexception(Fpu_register[index]) = 0
+#define Excp_format() \
+    (current_ir >> ((current_ir>>opclass & twobits)==1 ? df : fmt) & twobits)
+
+/* Miscellaneous definitions */
+#define Fpu_sgl(index) Fpu_register[index*2]
+
+#define Fpu_dblp1(index) Fpu_register[index*2]
+#define Fpu_dblp2(index) Fpu_register[(index*2)+1]
+
+#define Fpu_quadp1(index) Fpu_register[index*2]
+#define Fpu_quadp2(index) Fpu_register[(index*2)+1]
+#define Fpu_quadp3(index) Fpu_register[(index*2)+2]
+#define Fpu_quadp4(index) Fpu_register[(index*2)+3]
+
+/* Single precision floating-point definitions */
+#ifndef Sgl_decrement
+# define Sgl_decrement(sgl_value) Sall(sgl_value)--
+#endif
+
+/* Double precision floating-point definitions */
+#ifndef Dbl_decrement
+# define Dbl_decrement(dbl_valuep1,dbl_valuep2) \
+    if ((Dallp2(dbl_valuep2)--) == 0) Dallp1(dbl_valuep1)-- 
+#endif
+
+
+#define update_trap_counts(Fpu_register, aflags, bflags, trap_counts) {	\
+	aflags=(Fpu_register[0])>>27;	/* assumes zero fill. 32 bit */	\
+	Fpu_register[0] |= bflags;					\
+}
+
+u_int
+decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
+{
+    unsigned int current_ir, excp;
+    int target, exception_index = 1;
+    boolean inexact;
+    unsigned int aflags;
+    unsigned int bflags;
+    unsigned int excptype;
+
+
+    /* Keep stats on how many floating point exceptions (based on type)
+     * that happen.  Want to keep this overhead low, but still provide
+     * some information to the customer.  All exits from this routine
+     * need to restore Fpu_register[0]
+    */
+
+    bflags=(Fpu_register[0] & 0xf8000000);
+    Fpu_register[0] &= 0x07ffffff;
+
+    /* exception_index is used to index the exception register queue.  It
+     *   always points at the last register that contains a valid exception.  A
+     *   zero value implies no exceptions (also the initialized value).  Setting
+     *   the T-bit resets the exception_index to zero.
+     */
+
+    /*
+     * Check for reserved-op exception.  A reserved-op exception does not 
+     * set any exception registers nor does it set the T-bit.  If the T-bit
+     * is not set then a reserved-op exception occurred.
+     *
+     * At some point, we may want to report reserved op exceptions as
+     * illegal instructions.
+     */
+    
+    if (!Is_tbit_set()) {
+	update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+	return SIGNALCODE(SIGILL, ILL_COPROC);
+    }
+
+    /* 
+     * Is a coprocessor op. 
+     *
+     * Now we need to determine what type of exception occurred.
+     */
+    for (exception_index=1; exception_index<=MAX_EXCP_REG; exception_index++) {
+	current_ir = Excp_instr(exception_index);
+	  /*
+	   * On PA89: there are 5 different unimplemented exception
+	   * codes: 0x1, 0x9, 0xb, 0x3, and 0x23.  PA-RISC 2.0 adds
+	   * another, 0x2b.  Only these have the low order bit set.
+	   */
+	excptype = Excp_type(exception_index);
+	if (excptype & UNIMPLEMENTEDEXCEPTION) {
+		/*
+		 * Clear T-bit and exception register so that
+		 * we can tell if a trap really occurs while 
+		 * emulating the instruction.
+		 */
+		Clear_tbit();
+		Clear_excp_register(exception_index);
+		/*
+		 * Now emulate this instruction.  If a trap occurs,
+		 * fpudispatch will return a non-zero number 
+		 */
+		excp = fpudispatch(current_ir,excptype,0,Fpu_register);
+		/* accumulate the status flags, don't lose them as in hpux */
+		if (excp) {
+			/*
+			 * We now need to make sure that the T-bit and the
+			 * exception register contain the correct values
+			 * before continuing.
+			 */
+			/*
+			 * Set t-bit since it might still be needed for a
+			 * subsequent real trap (I don't understand fully -PB)
+			 */
+			Set_tbit();
+			/* some of the following code uses
+			 * Excp_type(exception_index) so fix that up */
+			Set_exceptiontype_and_instr_field(excp,current_ir,
+			 Fpu_register[exception_index]);
+			if (excp == UNIMPLEMENTEDEXCEPTION) {
+				/*
+			 	 * it is really unimplemented, so restore the
+			 	 * TIMEX extended unimplemented exception code
+			 	 */
+				excp = excptype;
+				update_trap_counts(Fpu_register, aflags, bflags, 
+					   trap_counts);
+				return SIGNALCODE(SIGILL, ILL_COPROC);
+			}
+			/* some of the following code uses excptype, so
+			 * fix that up too */
+			excptype = excp;
+		}
+		/* handle exceptions other than the real UNIMPLIMENTED the
+		 * same way as if the hardware had caused them */
+		if (excp == NOEXCEPTION)
+			/* For now use 'break', should technically be 'continue' */
+			break;
+	}
+
+	  /*
+	   * In PA89, the underflow exception has been extended to encode
+	   * additional information.  The exception looks like pp01x0,
+	   * where x is 1 if inexact and pp represent the inexact bit (I)
+	   * and the round away bit (RA)
+	   */
+	if (excptype & UNDERFLOWEXCEPTION) {
+		/* check for underflow trap enabled */
+		if (Is_underflowtrap_enabled()) {
+			update_trap_counts(Fpu_register, aflags, bflags, 
+					   trap_counts);
+			return SIGNALCODE(SIGFPE, FPE_FLTUND);
+		} else {
+		    /*
+		     * Isn't a real trap; we need to 
+		     * return the default value.
+		     */
+		    target = current_ir & fivebits;
+#ifndef lint
+		    if (Ibit(Fpu_register[exception_index])) inexact = TRUE;
+		    else inexact = FALSE;
+#endif
+		    switch (Excp_format()) {
+		      case SGL:
+		        /*
+		         * If ra (round-away) is set, will 
+		         * want to undo the rounding done
+		         * by the hardware.
+		         */
+		        if (Rabit(Fpu_register[exception_index])) 
+				Sgl_decrement(Fpu_sgl(target));
+
+			/* now denormalize */
+			sgl_denormalize(&Fpu_sgl(target),&inexact,Rounding_mode());
+		    	break;
+		      case DBL:
+		    	/*
+		    	 * If ra (round-away) is set, will 
+		    	 * want to undo the rounding done
+		    	 * by the hardware.
+		    	 */
+		    	if (Rabit(Fpu_register[exception_index])) 
+				Dbl_decrement(Fpu_dblp1(target),Fpu_dblp2(target));
+
+			/* now denormalize */
+			dbl_denormalize(&Fpu_dblp1(target),&Fpu_dblp2(target),
+			  &inexact,Rounding_mode());
+		    	break;
+		    }
+		    if (inexact) Set_underflowflag();
+		    /* 
+		     * Underflow can generate an inexact
+		     * exception.  If inexact trap is enabled,
+		     * want to do an inexact trap, otherwise 
+		     * set inexact flag.
+		     */
+		    if (inexact && Is_inexacttrap_enabled()) {
+		    	/*
+		    	 * Set exception field of exception register
+		    	 * to inexact, parm field to zero.
+			 * Underflow bit should be cleared.
+		    	 */
+		    	Set_exceptiontype(Fpu_register[exception_index],
+			 INEXACTEXCEPTION);
+			Set_parmfield(Fpu_register[exception_index],0);
+			update_trap_counts(Fpu_register, aflags, bflags, 
+					   trap_counts);
+			return SIGNALCODE(SIGFPE, FPE_FLTRES);
+		    }
+		    else {
+		    	/*
+		    	 * Exception register needs to be cleared.  
+			 * Inexact flag needs to be set if inexact.
+		    	 */
+		    	Clear_excp_register(exception_index);
+		    	if (inexact) Set_inexactflag();
+		    }
+		}
+		continue;
+	}
+	switch(Excp_type(exception_index)) {
+	  case OVERFLOWEXCEPTION:
+	  case OVERFLOWEXCEPTION | INEXACTEXCEPTION:
+		/* check for overflow trap enabled */
+			update_trap_counts(Fpu_register, aflags, bflags, 
+					   trap_counts);
+		if (Is_overflowtrap_enabled()) {
+			update_trap_counts(Fpu_register, aflags, bflags, 
+					   trap_counts);
+			return SIGNALCODE(SIGFPE, FPE_FLTOVF);
+		} else {
+			/*
+			 * Isn't a real trap; we need to 
+			 * return the default value.
+			 */
+			target = current_ir & fivebits;
+			switch (Excp_format()) {
+			  case SGL: 
+				Sgl_setoverflow(Fpu_sgl(target));
+				break;
+			  case DBL:
+				Dbl_setoverflow(Fpu_dblp1(target),Fpu_dblp2(target));
+				break;
+			}
+			Set_overflowflag();
+			/* 
+			 * Overflow always generates an inexact
+			 * exception.  If inexact trap is enabled,
+			 * want to do an inexact trap, otherwise 
+			 * set inexact flag.
+			 */
+			if (Is_inexacttrap_enabled()) {
+				/*
+				 * Set exception field of exception
+				 * register to inexact.  Overflow
+				 * bit should be cleared.
+				 */
+				Set_exceptiontype(Fpu_register[exception_index],
+				 INEXACTEXCEPTION);
+				update_trap_counts(Fpu_register, aflags, bflags,
+					   trap_counts);
+				return SIGNALCODE(SIGFPE, FPE_FLTRES);
+			}
+			else {
+				/*
+				 * Exception register needs to be cleared.  
+				 * Inexact flag needs to be set.
+				 */
+				Clear_excp_register(exception_index);
+				Set_inexactflag();
+			}
+		}
+		break;
+	  case INVALIDEXCEPTION:
+	  case OPC_2E_INVALIDEXCEPTION:
+		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+		return SIGNALCODE(SIGFPE, FPE_FLTINV);
+	  case DIVISIONBYZEROEXCEPTION:
+		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+		Clear_excp_register(exception_index);
+	  	return SIGNALCODE(SIGFPE, FPE_FLTDIV);
+	  case INEXACTEXCEPTION:
+		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+		return SIGNALCODE(SIGFPE, FPE_FLTRES);
+	  default:
+		update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+		printk("%s(%d) Unknown FPU exception 0x%x\n", __FILE__,
+			__LINE__, Excp_type(exception_index));
+		return SIGNALCODE(SIGILL, ILL_COPROC);
+	  case NOEXCEPTION:	/* no exception */
+		/*
+		 * Clear exception register in case 
+		 * other fields are non-zero.
+		 */
+		Clear_excp_register(exception_index);
+		break;
+	}
+    }
+    /*
+     * No real exceptions occurred.
+     */
+    Clear_tbit();
+    update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+    return(NOTRAP);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/denormal.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/denormal.c
new file mode 100644
index 0000000..60687e1
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/denormal.c
@@ -0,0 +1,135 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/fp/denormal.c		$ Revision: $
+ *
+ *  Purpose:
+ *	<<please update with a synopsis of the functionality provided by this file>>
+ *
+ *  External Interfaces:
+ *	<<the following list was autogenerated, please review>>
+ *	dbl_denormalize(dbl_opndp1,dbl_opndp2,inexactflag,rmode)
+ *	sgl_denormalize(sgl_opnd,inexactflag,rmode)
+ *
+ *  Internal Interfaces:
+ *	<<please update>>
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "hppa.h"
+#include <linux/kernel.h>
+/* #include <machine/sys/mdep_private.h> */
+
+#undef Fpustatus_register
+#define Fpustatus_register Fpu_register[0]
+
+void
+sgl_denormalize(unsigned int *sgl_opnd, boolean *inexactflag, int rmode)
+{
+	unsigned int opnd;
+	int sign, exponent;
+	boolean guardbit = FALSE, stickybit, inexact;
+
+	opnd = *sgl_opnd;
+	stickybit = *inexactflag;
+        exponent = Sgl_exponent(opnd) - SGL_WRAP;
+        sign = Sgl_sign(opnd);
+	Sgl_denormalize(opnd,exponent,guardbit,stickybit,inexact);
+	if (inexact) {
+	    switch (rmode) {
+	      case ROUNDPLUS:
+		if (sign == 0) {
+			Sgl_increment(opnd);
+		}
+		break;
+	      case ROUNDMINUS:
+		if (sign != 0) {
+			Sgl_increment(opnd);
+		}
+		break;
+	      case ROUNDNEAREST:
+		if (guardbit && (stickybit || 
+		       Sgl_isone_lowmantissa(opnd))) {
+			   Sgl_increment(opnd);
+		}
+		break;
+	    }
+	}
+	Sgl_set_sign(opnd,sign);
+	*sgl_opnd = opnd;
+	*inexactflag = inexact;
+	return;
+}
+
+void
+dbl_denormalize(unsigned int *dbl_opndp1,
+	unsigned int * dbl_opndp2,
+	boolean *inexactflag,
+	int rmode)
+{
+	unsigned int opndp1, opndp2;
+	int sign, exponent;
+	boolean guardbit = FALSE, stickybit, inexact;
+
+	opndp1 = *dbl_opndp1;
+	opndp2 = *dbl_opndp2;
+	stickybit = *inexactflag;
+	exponent = Dbl_exponent(opndp1) - DBL_WRAP;
+	sign = Dbl_sign(opndp1);
+	Dbl_denormalize(opndp1,opndp2,exponent,guardbit,stickybit,inexact);
+	if (inexact) {
+	    switch (rmode) {
+	      case ROUNDPLUS:
+		if (sign == 0) {
+			Dbl_increment(opndp1,opndp2);
+		}
+		break;
+	      case ROUNDMINUS:
+		if (sign != 0) {
+			Dbl_increment(opndp1,opndp2);
+		}
+		break;
+	      case ROUNDNEAREST:
+		if (guardbit && (stickybit || 
+		       Dbl_isone_lowmantissap2(opndp2))) {
+			   Dbl_increment(opndp1,opndp2);
+		}
+		break;
+	    }
+	}
+	Dbl_set_sign(opndp1,sign);
+	*dbl_opndp1 = opndp1;
+	*dbl_opndp2 = opndp2;
+	*inexactflag = inexact;
+	return;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfadd.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfadd.c
new file mode 100644
index 0000000..d37e2d2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfadd.c
@@ -0,0 +1,524 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfadd.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double_add: add two double precision values.
+ *
+ *  External Interfaces:
+ *	dbl_fadd(leftptr, rightptr, dstptr, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double_add: add two double precision values.
+ */
+dbl_fadd(
+    dbl_floating_point *leftptr,
+    dbl_floating_point *rightptr,
+    dbl_floating_point *dstptr,
+    unsigned int *status)
+{
+    register unsigned int signless_upper_left, signless_upper_right, save;
+    register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
+    register unsigned int resultp1 = 0, resultp2 = 0;
+    
+    register int result_exponent, right_exponent, diff_exponent;
+    register int sign_save, jumpsize;
+    register boolean inexact = FALSE;
+    register boolean underflowtrap;
+        
+    /* Create local copies of the numbers */
+    Dbl_copyfromptr(leftptr,leftp1,leftp2);
+    Dbl_copyfromptr(rightptr,rightp1,rightp2);
+
+    /* A zero "save" helps discover equal operands (for later),  *
+     * and is used in swapping operands (if needed).             */
+    Dbl_xortointp1(leftp1,rightp1,/*to*/save);
+
+    /*
+     * check first operand for NaN's or infinity
+     */
+    if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
+	{
+	if (Dbl_iszero_mantissa(leftp1,leftp2)) 
+	    {
+	    if (Dbl_isnotnan(rightp1,rightp2)) 
+		{
+		if (Dbl_isinfinity(rightp1,rightp2) && save!=0) 
+		    {
+		    /* 
+		     * invalid since operands are opposite signed infinity's
+		     */
+		    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                    Set_invalidflag();
+                    Dbl_makequietnan(resultp1,resultp2);
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		/*
+	 	 * return infinity
+	 	 */
+		Dbl_copytoptr(leftp1,leftp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    }
+	else 
+	    {
+            /*
+             * is NaN; signaling or quiet?
+             */
+            if (Dbl_isone_signaling(leftp1)) 
+		{
+               	/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+        	/* make NaN quiet */
+        	Set_invalidflag();
+        	Dbl_set_quiet(leftp1);
+        	}
+	    /* 
+	     * is second operand a signaling NaN? 
+	     */
+	    else if (Dbl_is_signalingnan(rightp1)) 
+		{
+        	/* trap if INVALIDTRAP enabled */
+               	if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Dbl_set_quiet(rightp1);
+		Dbl_copytoptr(rightp1,rightp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    /*
+ 	     * return quiet NaN
+ 	     */
+	    Dbl_copytoptr(leftp1,leftp2,dstptr);
+ 	    return(NOEXCEPTION);
+	    }
+	} /* End left NaN or Infinity processing */
+    /*
+     * check second operand for NaN's or infinity
+     */
+    if (Dbl_isinfinity_exponent(rightp1)) 
+	{
+	if (Dbl_iszero_mantissa(rightp1,rightp2)) 
+	    {
+	    /* return infinity */
+	    Dbl_copytoptr(rightp1,rightp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+        /*
+         * is NaN; signaling or quiet?
+         */
+        if (Dbl_isone_signaling(rightp1)) 
+	    {
+            /* trap if INVALIDTRAP enabled */
+	    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+	    /* make NaN quiet */
+	    Set_invalidflag();
+	    Dbl_set_quiet(rightp1);
+	    }
+	/*
+	 * return quiet NaN
+ 	 */
+	Dbl_copytoptr(rightp1,rightp2,dstptr);
+	return(NOEXCEPTION);
+    	} /* End right NaN or Infinity processing */
+
+    /* Invariant: Must be dealing with finite numbers */
+
+    /* Compare operands by removing the sign */
+    Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
+    Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
+
+    /* sign difference selects add or sub operation. */
+    if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
+	{
+	/* Set the left operand to the larger one by XOR swap *
+	 *  First finish the first word using "save"          */
+	Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
+	Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
+     	Dbl_swap_lower(leftp2,rightp2);
+	result_exponent = Dbl_exponent(leftp1);
+	}
+    /* Invariant:  left is not smaller than right. */ 
+
+    if((right_exponent = Dbl_exponent(rightp1)) == 0)
+        {
+	/* Denormalized operands.  First look for zeroes */
+	if(Dbl_iszero_mantissa(rightp1,rightp2)) 
+	    {
+	    /* right is zero */
+	    if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
+		{
+		/* Both operands are zeros */
+		if(Is_rounding_mode(ROUNDMINUS))
+		    {
+		    Dbl_or_signs(leftp1,/*with*/rightp1);
+		    }
+		else
+		    {
+		    Dbl_and_signs(leftp1,/*with*/rightp1);
+		    }
+		}
+	    else 
+		{
+		/* Left is not a zero and must be the result.  Trapped
+		 * underflows are signaled if left is denormalized.  Result
+		 * is always exact. */
+		if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+		    {
+		    /* need to normalize results mantissa */
+	    	    sign_save = Dbl_signextendedsign(leftp1);
+		    Dbl_leftshiftby1(leftp1,leftp2);
+		    Dbl_normalize(leftp1,leftp2,result_exponent);
+		    Dbl_set_sign(leftp1,/*using*/sign_save);
+                    Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
+		    Dbl_copytoptr(leftp1,leftp2,dstptr);
+		    /* inexact = FALSE */
+		    return(UNDERFLOWEXCEPTION);
+		    }
+		}
+	    Dbl_copytoptr(leftp1,leftp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+
+	/* Neither are zeroes */
+	Dbl_clear_sign(rightp1);	/* Exponent is already cleared */
+	if(result_exponent == 0 )
+	    {
+	    /* Both operands are denormalized.  The result must be exact
+	     * and is simply calculated.  A sum could become normalized and a
+	     * difference could cancel to a true zero. */
+	    if( (/*signed*/int) save < 0 )
+		{
+		Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
+		/*into*/resultp1,resultp2);
+		if(Dbl_iszero_mantissa(resultp1,resultp2))
+		    {
+		    if(Is_rounding_mode(ROUNDMINUS))
+			{
+			Dbl_setone_sign(resultp1);
+			}
+		    else
+			{
+			Dbl_setzero_sign(resultp1);
+			}
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		}
+	    else
+		{
+		Dbl_addition(leftp1,leftp2,rightp1,rightp2,
+		/*into*/resultp1,resultp2);
+		if(Dbl_isone_hidden(resultp1))
+		    {
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		}
+	    if(Is_underflowtrap_enabled())
+		{
+		/* need to normalize result */
+	    	sign_save = Dbl_signextendedsign(resultp1);
+		Dbl_leftshiftby1(resultp1,resultp2);
+		Dbl_normalize(resultp1,resultp2,result_exponent);
+		Dbl_set_sign(resultp1,/*using*/sign_save);
+                Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+	        Dbl_copytoptr(resultp1,resultp2,dstptr);
+		/* inexact = FALSE */
+	        return(UNDERFLOWEXCEPTION);
+		}
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+	right_exponent = 1;	/* Set exponent to reflect different bias
+				 * with denomalized numbers. */
+	}
+    else
+	{
+	Dbl_clear_signexponent_set_hidden(rightp1);
+	}
+    Dbl_clear_exponent_set_hidden(leftp1);
+    diff_exponent = result_exponent - right_exponent;
+
+    /* 
+     * Special case alignment of operands that would force alignment 
+     * beyond the extent of the extension.  A further optimization
+     * could special case this but only reduces the path length for this
+     * infrequent case.
+     */
+    if(diff_exponent > DBL_THRESHOLD)
+	{
+	diff_exponent = DBL_THRESHOLD;
+	}
+    
+    /* Align right operand by shifting to right */
+    Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
+    /*and lower to*/extent);
+
+    /* Treat sum and difference of the operands separately. */
+    if( (/*signed*/int) save < 0 )
+	{
+	/*
+	 * Difference of the two operands.  Their can be no overflow.  A
+	 * borrow can occur out of the hidden bit and force a post
+	 * normalization phase.
+	 */
+	Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
+	/*with*/extent,/*into*/resultp1,resultp2);
+	if(Dbl_iszero_hidden(resultp1))
+	    {
+	    /* Handle normalization */
+	    /* A straight forward algorithm would now shift the result
+	     * and extension left until the hidden bit becomes one.  Not
+	     * all of the extension bits need participate in the shift.
+	     * Only the two most significant bits (round and guard) are
+	     * needed.  If only a single shift is needed then the guard
+	     * bit becomes a significant low order bit and the extension
+	     * must participate in the rounding.  If more than a single 
+	     * shift is needed, then all bits to the right of the guard 
+	     * bit are zeros, and the guard bit may or may not be zero. */
+	    sign_save = Dbl_signextendedsign(resultp1);
+            Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
+
+            /* Need to check for a zero result.  The sign and exponent
+	     * fields have already been zeroed.  The more efficient test
+	     * of the full object can be used.
+	     */
+    	    if(Dbl_iszero(resultp1,resultp2))
+		/* Must have been "x-x" or "x+(-x)". */
+		{
+		if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    result_exponent--;
+	    /* Look to see if normalization is finished. */
+	    if(Dbl_isone_hidden(resultp1))
+		{
+		if(result_exponent==0)
+		    {
+		    /* Denormalized, exponent should be zero.  Left operand *
+		     * was normalized, so extent (guard, round) was zero    */
+		    goto underflow;
+		    }
+		else
+		    {
+		    /* No further normalization is needed. */
+		    Dbl_set_sign(resultp1,/*using*/sign_save);
+	    	    Ext_leftshiftby1(extent);
+		    goto round;
+		    }
+		}
+
+	    /* Check for denormalized, exponent should be zero.  Left    *
+	     * operand was normalized, so extent (guard, round) was zero */
+	    if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+	       result_exponent==0) goto underflow;
+
+	    /* Shift extension to complete one bit of normalization and
+	     * update exponent. */
+	    Ext_leftshiftby1(extent);
+
+	    /* Discover first one bit to determine shift amount.  Use a
+	     * modified binary search.  We have already shifted the result
+	     * one position right and still not found a one so the remainder
+	     * of the extension must be zero and simplifies rounding. */
+	    /* Scan bytes */
+	    while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
+		{
+		Dbl_leftshiftby8(resultp1,resultp2);
+		if((result_exponent -= 8) <= 0  && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Now narrow it down to the nibble */
+	    if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
+		{
+		/* The lower nibble contains the normalizing one */
+		Dbl_leftshiftby4(resultp1,resultp2);
+		if((result_exponent -= 4) <= 0 && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Select case were first bit is set (already normalized)
+	     * otherwise select the proper shift. */
+	    if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
+		{
+		/* Already normalized */
+		if(result_exponent <= 0) goto underflow;
+		Dbl_set_sign(resultp1,/*using*/sign_save);
+		Dbl_set_exponent(resultp1,/*using*/result_exponent);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    Dbl_sethigh4bits(resultp1,/*using*/sign_save);
+	    switch(jumpsize) 
+		{
+		case 1:
+		    {
+		    Dbl_leftshiftby3(resultp1,resultp2);
+		    result_exponent -= 3;
+		    break;
+		    }
+		case 2:
+		case 3:
+		    {
+		    Dbl_leftshiftby2(resultp1,resultp2);
+		    result_exponent -= 2;
+		    break;
+		    }
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+		    {
+		    Dbl_leftshiftby1(resultp1,resultp2);
+		    result_exponent -= 1;
+		    break;
+		    }
+		}
+	    if(result_exponent > 0) 
+		{
+		Dbl_set_exponent(resultp1,/*using*/result_exponent);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION); 	/* Sign bit is already set */
+		}
+	    /* Fixup potential underflows */
+	  underflow:
+	    if(Is_underflowtrap_enabled())
+		{
+		Dbl_set_sign(resultp1,sign_save);
+                Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		/* inexact = FALSE */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    /* 
+	     * Since we cannot get an inexact denormalized result,
+	     * we can now return.
+	     */
+	    Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
+	    Dbl_clear_signexponent(resultp1);
+	    Dbl_set_sign(resultp1,sign_save);
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    return(NOEXCEPTION);
+	    } /* end if(hidden...)... */
+	/* Fall through and round */
+	} /* end if(save < 0)... */
+    else 
+	{
+	/* Add magnitudes */
+	Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
+	if(Dbl_isone_hiddenoverflow(resultp1))
+	    {
+	    /* Prenormalization required. */
+	    Dbl_rightshiftby1_withextent(resultp2,extent,extent);
+	    Dbl_arithrightshiftby1(resultp1,resultp2);
+	    result_exponent++;
+	    } /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+    
+    /* Round the result.  If the extension is all zeros,then the result is
+     * exact.  Otherwise round in the correct direction.  No underflow is
+     * possible. If a postnormalization is necessary, then the mantissa is
+     * all zeros so no shift is needed. */
+  round:
+    if(Ext_isnotzero(extent))
+	{
+	inexact = TRUE;
+	switch(Rounding_mode())
+	    {
+	    case ROUNDNEAREST: /* The default. */
+	    if(Ext_isone_sign(extent))
+		{
+		/* at least 1/2 ulp */
+		if(Ext_isnotzero_lower(extent)  ||
+		  Dbl_isone_lowmantissap2(resultp2))
+		    {
+		    /* either exactly half way and odd or more than 1/2ulp */
+		    Dbl_increment(resultp1,resultp2);
+		    }
+		}
+	    break;
+
+	    case ROUNDPLUS:
+	    if(Dbl_iszero_sign(resultp1))
+		{
+		/* Round up positive results */
+		Dbl_increment(resultp1,resultp2);
+		}
+	    break;
+	    
+	    case ROUNDMINUS:
+	    if(Dbl_isone_sign(resultp1))
+		{
+		/* Round down negative results */
+		Dbl_increment(resultp1,resultp2);
+		}
+	    
+	    case ROUNDZERO:;
+	    /* truncate is simple */
+	    } /* end switch... */
+	if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+    if(result_exponent == DBL_INFINITY_EXPONENT)
+        {
+        /* Overflow */
+        if(Is_overflowtrap_enabled())
+	    {
+	    Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    if (inexact)
+		if (Is_inexacttrap_enabled())
+			return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+		else Set_inexactflag();
+	    return(OVERFLOWEXCEPTION);
+	    }
+        else
+	    {
+	    inexact = TRUE;
+	    Set_overflowflag();
+	    Dbl_setoverflow(resultp1,resultp2);
+	    }
+	}
+    else Dbl_set_exponent(resultp1,result_exponent);
+    Dbl_copytoptr(resultp1,resultp2,dstptr);
+    if(inexact) 
+	if(Is_inexacttrap_enabled())
+	    return(INEXACTEXCEPTION);
+	else Set_inexactflag();
+    return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfcmp.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfcmp.c
new file mode 100644
index 0000000..5952126
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfcmp.c
@@ -0,0 +1,181 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfcmp.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	dbl_cmp: compare two values
+ *
+ *  External Interfaces:
+ *	dbl_fcmp(leftptr, rightptr, cond, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "dbl_float.h"
+    
+/*
+ * dbl_cmp: compare two values
+ */
+int
+dbl_fcmp (dbl_floating_point * leftptr, dbl_floating_point * rightptr,
+	  unsigned int cond, unsigned int *status)
+                                           
+                       /* The predicate to be tested */
+                         
+    {
+    register unsigned int leftp1, leftp2, rightp1, rightp2;
+    register int xorresult;
+        
+    /* Create local copies of the numbers */
+    Dbl_copyfromptr(leftptr,leftp1,leftp2);
+    Dbl_copyfromptr(rightptr,rightp1,rightp2);
+    /*
+     * Test for NaN
+     */
+    if(    (Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+        || (Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT) )
+	{
+	/* Check if a NaN is involved.  Signal an invalid exception when 
+	 * comparing a signaling NaN or when comparing quiet NaNs and the
+	 * low bit of the condition is set */
+        if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+	    && Dbl_isnotzero_mantissa(leftp1,leftp2) 
+	    && (Exception(cond) || Dbl_isone_signaling(leftp1)))
+	   ||
+	    ((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
+	    && Dbl_isnotzero_mantissa(rightp1,rightp2) 
+	    && (Exception(cond) || Dbl_isone_signaling(rightp1))) )
+	    {
+	    if( Is_invalidtrap_enabled() ) {
+	    	Set_status_cbit(Unordered(cond));
+		return(INVALIDEXCEPTION);
+	    }
+	    else Set_invalidflag();
+	    Set_status_cbit(Unordered(cond));
+	    return(NOEXCEPTION);
+	    }
+	/* All the exceptional conditions are handled, now special case
+	   NaN compares */
+        else if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
+	    && Dbl_isnotzero_mantissa(leftp1,leftp2))
+	   ||
+	    ((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
+	    && Dbl_isnotzero_mantissa(rightp1,rightp2)) )
+	    {
+	    /* NaNs always compare unordered. */
+	    Set_status_cbit(Unordered(cond));
+	    return(NOEXCEPTION);
+	    }
+	/* infinities will drop down to the normal compare mechanisms */
+	}
+    /* First compare for unequal signs => less or greater or
+     * special equal case */
+    Dbl_xortointp1(leftp1,rightp1,xorresult);
+    if( xorresult < 0 )
+        {
+        /* left negative => less, left positive => greater.
+         * equal is possible if both operands are zeros. */
+        if( Dbl_iszero_exponentmantissa(leftp1,leftp2) 
+	  && Dbl_iszero_exponentmantissa(rightp1,rightp2) )
+            {
+	    Set_status_cbit(Equal(cond));
+	    }
+	else if( Dbl_isone_sign(leftp1) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+	else
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+        }
+    /* Signs are the same.  Treat negative numbers separately
+     * from the positives because of the reversed sense.  */
+    else if(Dbl_isequal(leftp1,leftp2,rightp1,rightp2))
+        {
+        Set_status_cbit(Equal(cond));
+        }
+    else if( Dbl_iszero_sign(leftp1) )
+        {
+        /* Positive compare */
+	if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+	else if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+	else
+	    {
+	    /* Equal first parts.  Now we must use unsigned compares to
+	     * resolve the two possibilities. */
+	    if( Dbl_allp2(leftp2) < Dbl_allp2(rightp2) )
+		{
+		Set_status_cbit(Lessthan(cond));
+		}
+	    else 
+		{
+		Set_status_cbit(Greaterthan(cond));
+		}
+	    }
+	}
+    else
+        {
+        /* Negative compare.  Signed or unsigned compares
+         * both work the same.  That distinction is only
+         * important when the sign bits differ. */
+	if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+	else if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+	else
+	    {
+	    /* Equal first parts.  Now we must use unsigned compares to
+	     * resolve the two possibilities. */
+	    if( Dbl_allp2(leftp2) > Dbl_allp2(rightp2) )
+		{
+		Set_status_cbit(Lessthan(cond));
+		}
+	    else 
+		{
+		Set_status_cbit(Greaterthan(cond));
+		}
+	    }
+        }
+	return(NOEXCEPTION);
+    }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfdiv.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfdiv.c
new file mode 100644
index 0000000..d7d4bec
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfdiv.c
@@ -0,0 +1,400 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfdiv.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double Precision Floating-point Divide
+ *
+ *  External Interfaces:
+ *	dbl_fdiv(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ *  Double Precision Floating-point Divide
+ */
+
+int
+dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
+	  dbl_floating_point * dstptr, unsigned int *status)
+{
+	register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+	register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
+	register int dest_exponent, count;
+	register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+	boolean is_tiny;
+
+	Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+	Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+	/* 
+	 * set sign bit of result 
+	 */
+	if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) 
+		Dbl_setnegativezerop1(resultp1);  
+	else Dbl_setzerop1(resultp1);
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd1p1)) {
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+				if (Dbl_isinfinity(opnd2p1,opnd2p2)) {
+					/* 
+					 * invalid since both operands 
+					 * are infinity 
+					 */
+					if (Is_invalidtrap_enabled())
+                                		return(INVALIDEXCEPTION);
+                                	Set_invalidflag();
+                                	Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+			 	 * return infinity
+			 	 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Dbl_isone_signaling(opnd1p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled())
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd1p1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd2p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled())
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd2p1);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+                	return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd2p1)) {
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			/*
+			 * return zero
+			 */
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Dbl_isone_signaling(opnd2p1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Dbl_set_quiet(opnd2p1);
+                }
+                /*
+                 * return quiet NaN
+                 */
+		Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                return(NOEXCEPTION);
+	}
+        /*
+         * check for division by zero
+         */
+        if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+                if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+                        /* invalid since both operands are zero */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        Set_invalidflag();
+                        Dbl_makequietnan(resultp1,resultp2);
+                        Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        return(NOEXCEPTION);
+                }
+                if (Is_divisionbyzerotrap_enabled())
+                       	return(DIVISIONBYZEROEXCEPTION);
+                Set_divisionbyzeroflag();
+                Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+                Dbl_copytoptr(resultp1,resultp2,dstptr);
+                return(NOEXCEPTION);
+        }
+	/*
+	 * Generate exponent 
+	 */
+	dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS;
+
+	/*
+	 * Generate mantissa
+	 */
+	if (Dbl_isnotzero_exponent(opnd1p1)) {
+		/* set hidden bit */
+		Dbl_clear_signexponent_set_hidden(opnd1p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /* is denormalized, want to normalize */
+                Dbl_clear_signexponent(opnd1p1);
+                Dbl_leftshiftby1(opnd1p1,opnd1p2);
+		Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Dbl_isnotzero_exponent(opnd2p1)) {
+		Dbl_clear_signexponent_set_hidden(opnd2p1);
+	}
+	else {
+                /* is denormalized; want to normalize */
+                Dbl_clear_signexponent(opnd2p1);
+                Dbl_leftshiftby1(opnd2p1,opnd2p2);
+                while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) {
+                        dest_exponent+=8;
+                        Dbl_leftshiftby8(opnd2p1,opnd2p2);
+                }
+                if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) {
+                        dest_exponent+=4;
+                        Dbl_leftshiftby4(opnd2p1,opnd2p2);
+                }
+                while (Dbl_iszero_hidden(opnd2p1)) {
+                        dest_exponent++;
+                        Dbl_leftshiftby1(opnd2p1,opnd2p2);
+                }
+	}
+
+	/* Divide the source mantissas */
+
+	/* 
+	 * A non-restoring divide algorithm is used.
+	 */
+	Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+	Dbl_setzero(opnd3p1,opnd3p2);
+	for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) {
+		Dbl_leftshiftby1(opnd1p1,opnd1p2);
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+		if (Dbl_iszero_sign(opnd1p1)) {
+			Dbl_setone_lowmantissap2(opnd3p2);
+			Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+		}
+		else {
+			Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2);
+		}
+	}
+	if (count <= DBL_P) {
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+		Dbl_setone_lowmantissap2(opnd3p2);
+		Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count));
+		if (Dbl_iszero_hidden(opnd3p1)) {
+			Dbl_leftshiftby1(opnd3p1,opnd3p2);
+			dest_exponent--;
+		}
+	}
+	else {
+		if (Dbl_iszero_hidden(opnd3p1)) {
+			/* need to get one more bit of result */
+			Dbl_leftshiftby1(opnd1p1,opnd1p2);
+			Dbl_leftshiftby1(opnd3p1,opnd3p2);
+			if (Dbl_iszero_sign(opnd1p1)) {
+				Dbl_setone_lowmantissap2(opnd3p2);
+				Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+			}
+			else {
+				Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
+			}
+			dest_exponent--;
+		}
+		if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE;
+		stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2);
+	}
+	inexact = guardbit | stickybit;
+
+	/* 
+	 * round result 
+	 */
+	if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
+		Dbl_clear_signexponent(opnd3p1);
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) 
+					Dbl_increment(opnd3p1,opnd3p2);
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) 
+					Dbl_increment(opnd3p1,opnd3p2);
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Dbl_isone_lowmantissap2(opnd3p2))) {
+			      		Dbl_increment(opnd3p1,opnd3p2);
+				}
+		}
+		if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
+	}
+	Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+
+        /* 
+         * Test for overflow
+         */
+	if (dest_exponent >= DBL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
+                        Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact) 
+                            if (Is_inexacttrap_enabled())
+                                return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return(OVERFLOWEXCEPTION);
+                }
+		Set_overflowflag();
+                /* set result to infinity or largest number */
+		Dbl_setoverflow(resultp1,resultp2);
+		inexact = TRUE;
+	}
+        /* 
+         * Test for underflow
+         */
+	else if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+                        Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact) 
+                            if (Is_inexacttrap_enabled())
+                                return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return(UNDERFLOWEXCEPTION);
+                }
+
+		/* Determine if should set underflow flag */
+		is_tiny = TRUE;
+		if (dest_exponent == 0 && inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Dbl_isone_lowmantissap2(opnd3p2))) {
+				      	Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			}
+		}
+
+                /*
+                 * denormalize result or set to signed zero
+                 */
+		stickybit = inexact;
+		Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
+		 stickybit,inexact);
+
+		/* return rounded number */ 
+		if (inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Dbl_iszero_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Dbl_isone_lowmantissap2(opnd3p2))) {
+			      		Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			}
+                	if (is_tiny) Set_underflowflag();
+                }
+		Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+	}
+	else Dbl_set_exponent(resultp1,dest_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+
+	/* check for inexact */
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfmpy.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfmpy.c
new file mode 100644
index 0000000..4380f5a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfmpy.c
@@ -0,0 +1,394 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfmpy.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double Precision Floating-point Multiply
+ *
+ *  External Interfaces:
+ *	dbl_fmpy(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ *  Double Precision Floating-point Multiply
+ */
+
+int
+dbl_fmpy(
+	    dbl_floating_point *srcptr1,
+	    dbl_floating_point *srcptr2,
+	    dbl_floating_point *dstptr,
+	    unsigned int *status)
+{
+	register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+	register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
+	register int dest_exponent, count;
+	register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+	boolean is_tiny;
+
+	Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+	Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+
+	/* 
+	 * set sign bit of result 
+	 */
+	if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) 
+		Dbl_setnegativezerop1(resultp1); 
+	else Dbl_setzerop1(resultp1);
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd1p1)) {
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+				if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled())
+                                		return(INVALIDEXCEPTION);
+                                	Set_invalidflag();
+                                	Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+			 	 * return infinity
+			 	 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Dbl_isone_signaling(opnd1p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd1p1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd2p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled())
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd2p1);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+                	return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd2p1)) {
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+				/* invalid since operands are zero & infinity */
+				if (Is_invalidtrap_enabled())
+                                	return(INVALIDEXCEPTION);
+                                Set_invalidflag();
+                                Dbl_makequietnan(opnd2p1,opnd2p2);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/*
+			 * return infinity
+			 */
+			Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Dbl_isone_signaling(opnd2p1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Dbl_set_quiet(opnd2p1);
+                }
+                /*
+                 * return quiet NaN
+                 */
+		Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent 
+	 */
+	dest_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) -DBL_BIAS;
+
+	/*
+	 * Generate mantissa
+	 */
+	if (Dbl_isnotzero_exponent(opnd1p1)) {
+		/* set hidden bit */
+		Dbl_clear_signexponent_set_hidden(opnd1p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /* is denormalized, adjust exponent */
+                Dbl_clear_signexponent(opnd1p1);
+                Dbl_leftshiftby1(opnd1p1,opnd1p2);
+		Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Dbl_isnotzero_exponent(opnd2p1)) {
+		Dbl_clear_signexponent_set_hidden(opnd2p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /* is denormalized; want to normalize */
+                Dbl_clear_signexponent(opnd2p1);
+                Dbl_leftshiftby1(opnd2p1,opnd2p2);
+		Dbl_normalize(opnd2p1,opnd2p2,dest_exponent);
+	}
+
+	/* Multiply two source mantissas together */
+
+	/* make room for guard bits */
+	Dbl_leftshiftby7(opnd2p1,opnd2p2);
+	Dbl_setzero(opnd3p1,opnd3p2);
+        /* 
+         * Four bits at a time are inspected in each loop, and a 
+         * simple shift and add multiply algorithm is used. 
+         */ 
+	for (count=1;count<=DBL_P;count+=4) {
+		stickybit |= Dlow4p2(opnd3p2);
+		Dbl_rightshiftby4(opnd3p1,opnd3p2);
+		if (Dbit28p2(opnd1p2)) {
+	 		/* Twoword_add should be an ADDC followed by an ADD. */
+                        Twoword_add(opnd3p1, opnd3p2, opnd2p1<<3 | opnd2p2>>29, 
+				    opnd2p2<<3);
+		}
+		if (Dbit29p2(opnd1p2)) {
+                        Twoword_add(opnd3p1, opnd3p2, opnd2p1<<2 | opnd2p2>>30, 
+				    opnd2p2<<2);
+		}
+		if (Dbit30p2(opnd1p2)) {
+                        Twoword_add(opnd3p1, opnd3p2, opnd2p1<<1 | opnd2p2>>31,
+				    opnd2p2<<1);
+		}
+		if (Dbit31p2(opnd1p2)) {
+                        Twoword_add(opnd3p1, opnd3p2, opnd2p1, opnd2p2);
+		}
+		Dbl_rightshiftby4(opnd1p1,opnd1p2);
+	}
+	if (Dbit3p1(opnd3p1)==0) {
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+	}
+	else {
+		/* result mantissa >= 2. */
+		dest_exponent++;
+	}
+	/* check for denormalized result */
+	while (Dbit3p1(opnd3p1)==0) {
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+		dest_exponent--;
+	}
+	/*
+	 * check for guard, sticky and inexact bits 
+	 */
+	stickybit |= Dallp2(opnd3p2) << 25;
+	guardbit = (Dallp2(opnd3p2) << 24) >> 31;
+	inexact = guardbit | stickybit;
+
+	/* align result mantissa */
+	Dbl_rightshiftby8(opnd3p1,opnd3p2);
+
+	/* 
+	 * round result 
+	 */
+	if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
+		Dbl_clear_signexponent(opnd3p1);
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) 
+					Dbl_increment(opnd3p1,opnd3p2);
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) 
+					Dbl_increment(opnd3p1,opnd3p2);
+				break;
+			case ROUNDNEAREST:
+				if (guardbit) {
+			   	if (stickybit || Dbl_isone_lowmantissap2(opnd3p2))
+			      	Dbl_increment(opnd3p1,opnd3p2);
+				}
+		}
+		if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
+	}
+	Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+
+        /* 
+         * Test for overflow
+         */
+	if (dest_exponent >= DBL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+			Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return (OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+			    else Set_inexactflag();
+			return (OVERFLOWEXCEPTION);
+                }
+		inexact = TRUE;
+		Set_overflowflag();
+                /* set result to infinity or largest number */
+		Dbl_setoverflow(resultp1,resultp2);
+	}
+        /* 
+         * Test for underflow
+         */
+	else if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+			Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return (UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+			    else Set_inexactflag();
+			return (UNDERFLOWEXCEPTION);
+                }
+
+		/* Determine if should set underflow flag */
+		is_tiny = TRUE;
+		if (dest_exponent == 0 && inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Dbl_isone_lowmantissap2(opnd3p2))) {
+				      	Dbl_increment(opnd3p1,opnd3p2);
+					if (Dbl_isone_hiddenoverflow(opnd3p1))
+                			    is_tiny = FALSE;
+					Dbl_decrement(opnd3p1,opnd3p2);
+				}
+				break;
+			}
+		}
+
+		/*
+		 * denormalize result or set to signed zero
+		 */
+		stickybit = inexact;
+		Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
+		 stickybit,inexact);
+
+		/* return zero or smallest number */
+		if (inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) {
+					Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Dbl_isone_lowmantissap2(opnd3p2))) {
+			      		Dbl_increment(opnd3p1,opnd3p2);
+				}
+				break;
+			}
+                	if (is_tiny) Set_underflowflag();
+		}
+		Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
+	}
+	else Dbl_set_exponent(resultp1,dest_exponent);
+	/* check for inexact */
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfrem.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfrem.c
new file mode 100644
index 0000000..b983785
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfrem.c
@@ -0,0 +1,297 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfrem.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double Precision Floating-point Remainder
+ *
+ *  External Interfaces:
+ *	dbl_frem(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ *  Double Precision Floating-point Remainder
+ */
+
+int
+dbl_frem (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
+	  dbl_floating_point * dstptr, unsigned int *status)
+{
+	register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
+	register unsigned int resultp1, resultp2;
+	register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
+	register boolean roundup = FALSE;
+
+	Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
+	Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if ((opnd1_exponent = Dbl_exponent(opnd1p1)) == DBL_INFINITY_EXPONENT) {
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
+				/* invalid since first operand is infinity */
+				if (Is_invalidtrap_enabled()) 
+                                	return(INVALIDEXCEPTION);
+                                Set_invalidflag();
+                                Dbl_makequietnan(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Dbl_isone_signaling(opnd1p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd1p1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd2p1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Dbl_set_quiet(opnd2p1);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+                	return(NOEXCEPTION);
+		}
+	} 
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if ((opnd2_exponent = Dbl_exponent(opnd2p1)) == DBL_INFINITY_EXPONENT) {
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			/*
+			 * return first operand
+			 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Dbl_isone_signaling(opnd2p1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Dbl_set_quiet(opnd2p1);
+                }
+                /*
+                 * return quiet NaN
+                 */
+		Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+                return(NOEXCEPTION);
+	}
+	/*
+	 * check second operand for zero
+	 */
+	if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+		/* invalid since second operand is zero */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                Set_invalidflag();
+                Dbl_makequietnan(resultp1,resultp2);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+
+	/* 
+	 * get sign of result
+	 */
+	resultp1 = opnd1p1;  
+
+	/* 
+	 * check for denormalized operands
+	 */
+	if (opnd1_exponent == 0) {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* normalize, then continue */
+		opnd1_exponent = 1;
+		Dbl_normalize(opnd1p1,opnd1p2,opnd1_exponent);
+	}
+	else {
+		Dbl_clear_signexponent_set_hidden(opnd1p1);
+	}
+	if (opnd2_exponent == 0) {
+		/* normalize, then continue */
+		opnd2_exponent = 1;
+		Dbl_normalize(opnd2p1,opnd2p2,opnd2_exponent);
+	}
+	else {
+		Dbl_clear_signexponent_set_hidden(opnd2p1);
+	}
+
+	/* find result exponent and divide step loop count */
+	dest_exponent = opnd2_exponent - 1;
+	stepcount = opnd1_exponent - opnd2_exponent;
+
+	/*
+	 * check for opnd1/opnd2 < 1
+	 */
+	if (stepcount < 0) {
+		/*
+		 * check for opnd1/opnd2 > 1/2
+		 *
+		 * In this case n will round to 1, so 
+		 *    r = opnd1 - opnd2 
+		 */
+		if (stepcount == -1 && 
+		    Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+			/* set sign */
+			Dbl_allp1(resultp1) = ~Dbl_allp1(resultp1);
+			/* align opnd2 with opnd1 */
+			Dbl_leftshiftby1(opnd2p1,opnd2p2); 
+			Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,
+			 opnd2p1,opnd2p2);
+			/* now normalize */
+                	while (Dbl_iszero_hidden(opnd2p1)) {
+                        	Dbl_leftshiftby1(opnd2p1,opnd2p2);
+                        	dest_exponent--;
+			}
+			Dbl_set_exponentmantissa(resultp1,resultp2,opnd2p1,opnd2p2);
+			goto testforunderflow;
+		}
+		/*
+		 * opnd1/opnd2 <= 1/2
+		 *
+		 * In this case n will round to zero, so 
+		 *    r = opnd1
+		 */
+		Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
+		dest_exponent = opnd1_exponent;
+		goto testforunderflow;
+	}
+
+	/*
+	 * Generate result
+	 *
+	 * Do iterative subtract until remainder is less than operand 2.
+	 */
+	while (stepcount-- > 0 && (Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2))) {
+		if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+			Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
+		}
+		Dbl_leftshiftby1(opnd1p1,opnd1p2);
+	}
+	/*
+	 * Do last subtract, then determine which way to round if remainder 
+	 * is exactly 1/2 of opnd2 
+	 */
+	if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+		Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
+		roundup = TRUE;
+	}
+	if (stepcount > 0 || Dbl_iszero(opnd1p1,opnd1p2)) {
+		/* division is exact, remainder is zero */
+		Dbl_setzero_exponentmantissa(resultp1,resultp2);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+
+	/* 
+	 * Check for cases where opnd1/opnd2 < n 
+	 *
+	 * In this case the result's sign will be opposite that of
+	 * opnd1.  The mantissa also needs some correction.
+	 */
+	Dbl_leftshiftby1(opnd1p1,opnd1p2);
+	if (Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
+		Dbl_invert_sign(resultp1);
+		Dbl_leftshiftby1(opnd2p1,opnd2p2);
+		Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,opnd1p1,opnd1p2);
+	}
+	/* check for remainder being exactly 1/2 of opnd2 */
+	else if (Dbl_isequal(opnd1p1,opnd1p2,opnd2p1,opnd2p2) && roundup) { 
+		Dbl_invert_sign(resultp1);
+	}
+
+	/* normalize result's mantissa */
+        while (Dbl_iszero_hidden(opnd1p1)) {
+                dest_exponent--;
+                Dbl_leftshiftby1(opnd1p1,opnd1p2);
+        }
+	Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
+
+        /* 
+         * Test for underflow
+         */
+    testforunderflow:
+	if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
+			/* frem is always exact */
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(UNDERFLOWEXCEPTION);
+                }
+                /*
+                 * denormalize result or set to signed zero
+                 */
+                if (dest_exponent >= (1 - DBL_P)) {
+			Dbl_rightshift_exponentmantissa(resultp1,resultp2,
+			 1-dest_exponent);
+                }
+                else {
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+		}
+	}
+	else Dbl_set_exponent(resultp1,dest_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsqrt.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsqrt.c
new file mode 100644
index 0000000..9542c6d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsqrt.c
@@ -0,0 +1,195 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfsqrt.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double Floating-point Square Root
+ *
+ *  External Interfaces:
+ *	dbl_fsqrt(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ *  Double Floating-point Square Root
+ */
+
+/*ARGSUSED*/
+unsigned int
+dbl_fsqrt(
+	    dbl_floating_point *srcptr,
+	    unsigned int *nullptr,
+	    dbl_floating_point *dstptr,
+	    unsigned int *status)
+{
+	register unsigned int srcp1, srcp2, resultp1, resultp2;
+	register unsigned int newbitp1, newbitp2, sump1, sump2;
+	register int src_exponent;
+	register boolean guardbit = FALSE, even_exponent;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+        /*
+         * check source operand for NaN or infinity
+         */
+        if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
+                /*
+                 * is signaling NaN?
+                 */
+                if (Dbl_isone_signaling(srcp1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Dbl_set_quiet(srcp1);
+                }
+                /*
+                 * Return quiet NaN or positive infinity.
+		 *  Fall through to negative test if negative infinity.
+                 */
+		if (Dbl_iszero_sign(srcp1) || 
+		    Dbl_isnotzero_mantissa(srcp1,srcp2)) {
+                	Dbl_copytoptr(srcp1,srcp2,dstptr);
+                	return(NOEXCEPTION);
+		}
+        }
+
+        /*
+         * check for zero source operand
+         */
+	if (Dbl_iszero_exponentmantissa(srcp1,srcp2)) {
+		Dbl_copytoptr(srcp1,srcp2,dstptr);
+		return(NOEXCEPTION);
+	}
+
+        /*
+         * check for negative source operand 
+         */
+	if (Dbl_isone_sign(srcp1)) {
+		/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Dbl_makequietnan(srcp1,srcp2);
+		Dbl_copytoptr(srcp1,srcp2,dstptr);
+		return(NOEXCEPTION);
+	}
+
+	/*
+	 * Generate result
+	 */
+	if (src_exponent > 0) {
+		even_exponent = Dbl_hidden(srcp1);
+		Dbl_clear_signexponent_set_hidden(srcp1);
+	}
+	else {
+		/* normalize operand */
+		Dbl_clear_signexponent(srcp1);
+		src_exponent++;
+		Dbl_normalize(srcp1,srcp2,src_exponent);
+		even_exponent = src_exponent & 1;
+	}
+	if (even_exponent) {
+		/* exponent is even */
+		/* Add comment here.  Explain why odd exponent needs correction */
+		Dbl_leftshiftby1(srcp1,srcp2);
+	}
+	/*
+	 * Add comment here.  Explain following algorithm.
+	 * 
+	 * Trust me, it works.
+	 *
+	 */
+	Dbl_setzero(resultp1,resultp2);
+	Dbl_allp1(newbitp1) = 1 << (DBL_P - 32);
+	Dbl_setzero_mantissap2(newbitp2);
+	while (Dbl_isnotzero(newbitp1,newbitp2) && Dbl_isnotzero(srcp1,srcp2)) {
+		Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,sump1,sump2);
+		if(Dbl_isnotgreaterthan(sump1,sump2,srcp1,srcp2)) {
+			Dbl_leftshiftby1(newbitp1,newbitp2);
+			/* update result */
+			Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,
+			 resultp1,resultp2);  
+			Dbl_subtract(srcp1,srcp2,sump1,sump2,srcp1,srcp2);
+			Dbl_rightshiftby2(newbitp1,newbitp2);
+		}
+		else {
+			Dbl_rightshiftby1(newbitp1,newbitp2);
+		}
+		Dbl_leftshiftby1(srcp1,srcp2);
+	}
+	/* correct exponent for pre-shift */
+	if (even_exponent) {
+		Dbl_rightshiftby1(resultp1,resultp2);
+	}
+
+	/* check for inexact */
+	if (Dbl_isnotzero(srcp1,srcp2)) {
+		if (!even_exponent && Dbl_islessthan(resultp1,resultp2,srcp1,srcp2)) {
+			Dbl_increment(resultp1,resultp2);
+		}
+		guardbit = Dbl_lowmantissap2(resultp2);
+		Dbl_rightshiftby1(resultp1,resultp2);
+
+		/*  now round result  */
+		switch (Rounding_mode()) {
+		case ROUNDPLUS:
+		     Dbl_increment(resultp1,resultp2);
+		     break;
+		case ROUNDNEAREST:
+		     /* stickybit is always true, so guardbit 
+		      * is enough to determine rounding */
+		     if (guardbit) {
+			    Dbl_increment(resultp1,resultp2);
+		     }
+		     break;
+		}
+		/* increment result exponent by 1 if mantissa overflowed */
+		if (Dbl_isone_hiddenoverflow(resultp1)) src_exponent+=2;
+
+		if (Is_inexacttrap_enabled()) {
+			Dbl_set_exponent(resultp1,
+			 ((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	else {
+		Dbl_rightshiftby1(resultp1,resultp2);
+	}
+	Dbl_set_exponent(resultp1,((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsub.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsub.c
new file mode 100644
index 0000000..2e8b5a7
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/dfsub.c
@@ -0,0 +1,526 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/dfsub.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double_subtract: subtract two double precision values.
+ *
+ *  External Interfaces:
+ *	dbl_fsub(leftptr, rightptr, dstptr, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "dbl_float.h"
+
+/*
+ * Double_subtract: subtract two double precision values.
+ */
+int
+dbl_fsub(
+	    dbl_floating_point *leftptr,
+	    dbl_floating_point *rightptr,
+	    dbl_floating_point *dstptr,
+	    unsigned int *status)
+    {
+    register unsigned int signless_upper_left, signless_upper_right, save;
+    register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
+    register unsigned int resultp1 = 0, resultp2 = 0;
+    
+    register int result_exponent, right_exponent, diff_exponent;
+    register int sign_save, jumpsize;
+    register boolean inexact = FALSE, underflowtrap;
+        
+    /* Create local copies of the numbers */
+    Dbl_copyfromptr(leftptr,leftp1,leftp2);
+    Dbl_copyfromptr(rightptr,rightp1,rightp2);
+
+    /* A zero "save" helps discover equal operands (for later),  *
+     * and is used in swapping operands (if needed).             */
+    Dbl_xortointp1(leftp1,rightp1,/*to*/save);
+
+    /*
+     * check first operand for NaN's or infinity
+     */
+    if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
+	{
+	if (Dbl_iszero_mantissa(leftp1,leftp2)) 
+	    {
+	    if (Dbl_isnotnan(rightp1,rightp2)) 
+		{
+		if (Dbl_isinfinity(rightp1,rightp2) && save==0) 
+		    {
+		    /* 
+		     * invalid since operands are same signed infinity's
+		     */
+		    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                    Set_invalidflag();
+                    Dbl_makequietnan(resultp1,resultp2);
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		/*
+	 	 * return infinity
+	 	 */
+		Dbl_copytoptr(leftp1,leftp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    }
+	else 
+	    {
+            /*
+             * is NaN; signaling or quiet?
+             */
+            if (Dbl_isone_signaling(leftp1)) 
+		{
+               	/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+        	/* make NaN quiet */
+        	Set_invalidflag();
+        	Dbl_set_quiet(leftp1);
+        	}
+	    /* 
+	     * is second operand a signaling NaN? 
+	     */
+	    else if (Dbl_is_signalingnan(rightp1)) 
+		{
+        	/* trap if INVALIDTRAP enabled */
+               	if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Dbl_set_quiet(rightp1);
+		Dbl_copytoptr(rightp1,rightp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    /*
+ 	     * return quiet NaN
+ 	     */
+	    Dbl_copytoptr(leftp1,leftp2,dstptr);
+ 	    return(NOEXCEPTION);
+	    }
+	} /* End left NaN or Infinity processing */
+    /*
+     * check second operand for NaN's or infinity
+     */
+    if (Dbl_isinfinity_exponent(rightp1)) 
+	{
+	if (Dbl_iszero_mantissa(rightp1,rightp2)) 
+	    {
+	    /* return infinity */
+	    Dbl_invert_sign(rightp1);
+	    Dbl_copytoptr(rightp1,rightp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+        /*
+         * is NaN; signaling or quiet?
+         */
+        if (Dbl_isone_signaling(rightp1)) 
+	    {
+            /* trap if INVALIDTRAP enabled */
+	    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+	    /* make NaN quiet */
+	    Set_invalidflag();
+	    Dbl_set_quiet(rightp1);
+	    }
+	/*
+	 * return quiet NaN
+ 	 */
+	Dbl_copytoptr(rightp1,rightp2,dstptr);
+	return(NOEXCEPTION);
+    	} /* End right NaN or Infinity processing */
+
+    /* Invariant: Must be dealing with finite numbers */
+
+    /* Compare operands by removing the sign */
+    Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
+    Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
+
+    /* sign difference selects add or sub operation. */
+    if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
+	{
+	/* Set the left operand to the larger one by XOR swap *
+	 *  First finish the first word using "save"          */
+	Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
+	Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
+     	Dbl_swap_lower(leftp2,rightp2);
+	result_exponent = Dbl_exponent(leftp1);
+	Dbl_invert_sign(leftp1);
+	}
+    /* Invariant:  left is not smaller than right. */ 
+
+    if((right_exponent = Dbl_exponent(rightp1)) == 0)
+        {
+	/* Denormalized operands.  First look for zeroes */
+	if(Dbl_iszero_mantissa(rightp1,rightp2)) 
+	    {
+	    /* right is zero */
+	    if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
+		{
+		/* Both operands are zeros */
+		Dbl_invert_sign(rightp1);
+		if(Is_rounding_mode(ROUNDMINUS))
+		    {
+		    Dbl_or_signs(leftp1,/*with*/rightp1);
+		    }
+		else
+		    {
+		    Dbl_and_signs(leftp1,/*with*/rightp1);
+		    }
+		}
+	    else 
+		{
+		/* Left is not a zero and must be the result.  Trapped
+		 * underflows are signaled if left is denormalized.  Result
+		 * is always exact. */
+		if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+		    {
+		    /* need to normalize results mantissa */
+	    	    sign_save = Dbl_signextendedsign(leftp1);
+		    Dbl_leftshiftby1(leftp1,leftp2);
+		    Dbl_normalize(leftp1,leftp2,result_exponent);
+		    Dbl_set_sign(leftp1,/*using*/sign_save);
+                    Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
+		    Dbl_copytoptr(leftp1,leftp2,dstptr);
+		    /* inexact = FALSE */
+		    return(UNDERFLOWEXCEPTION);
+		    }
+		}
+	    Dbl_copytoptr(leftp1,leftp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+
+	/* Neither are zeroes */
+	Dbl_clear_sign(rightp1);	/* Exponent is already cleared */
+	if(result_exponent == 0 )
+	    {
+	    /* Both operands are denormalized.  The result must be exact
+	     * and is simply calculated.  A sum could become normalized and a
+	     * difference could cancel to a true zero. */
+	    if( (/*signed*/int) save >= 0 )
+		{
+		Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
+		 /*into*/resultp1,resultp2);
+		if(Dbl_iszero_mantissa(resultp1,resultp2))
+		    {
+		    if(Is_rounding_mode(ROUNDMINUS))
+			{
+			Dbl_setone_sign(resultp1);
+			}
+		    else
+			{
+			Dbl_setzero_sign(resultp1);
+			}
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		}
+	    else
+		{
+		Dbl_addition(leftp1,leftp2,rightp1,rightp2,
+		 /*into*/resultp1,resultp2);
+		if(Dbl_isone_hidden(resultp1))
+		    {
+		    Dbl_copytoptr(resultp1,resultp2,dstptr);
+		    return(NOEXCEPTION);
+		    }
+		}
+	    if(Is_underflowtrap_enabled())
+		{
+		/* need to normalize result */
+	    	sign_save = Dbl_signextendedsign(resultp1);
+		Dbl_leftshiftby1(resultp1,resultp2);
+		Dbl_normalize(resultp1,resultp2,result_exponent);
+		Dbl_set_sign(resultp1,/*using*/sign_save);
+                Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		/* inexact = FALSE */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    return(NOEXCEPTION);
+	    }
+	right_exponent = 1;	/* Set exponent to reflect different bias
+				 * with denomalized numbers. */
+	}
+    else
+	{
+	Dbl_clear_signexponent_set_hidden(rightp1);
+	}
+    Dbl_clear_exponent_set_hidden(leftp1);
+    diff_exponent = result_exponent - right_exponent;
+
+    /* 
+     * Special case alignment of operands that would force alignment 
+     * beyond the extent of the extension.  A further optimization
+     * could special case this but only reduces the path length for this
+     * infrequent case.
+     */
+    if(diff_exponent > DBL_THRESHOLD)
+	{
+	diff_exponent = DBL_THRESHOLD;
+	}
+    
+    /* Align right operand by shifting to right */
+    Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
+     /*and lower to*/extent);
+
+    /* Treat sum and difference of the operands separately. */
+    if( (/*signed*/int) save >= 0 )
+	{
+	/*
+	 * Difference of the two operands.  Their can be no overflow.  A
+	 * borrow can occur out of the hidden bit and force a post
+	 * normalization phase.
+	 */
+	Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
+	 /*with*/extent,/*into*/resultp1,resultp2);
+	if(Dbl_iszero_hidden(resultp1))
+	    {
+	    /* Handle normalization */
+	    /* A straight forward algorithm would now shift the result
+	     * and extension left until the hidden bit becomes one.  Not
+	     * all of the extension bits need participate in the shift.
+	     * Only the two most significant bits (round and guard) are
+	     * needed.  If only a single shift is needed then the guard
+	     * bit becomes a significant low order bit and the extension
+	     * must participate in the rounding.  If more than a single 
+	     * shift is needed, then all bits to the right of the guard 
+	     * bit are zeros, and the guard bit may or may not be zero. */
+	    sign_save = Dbl_signextendedsign(resultp1);
+            Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
+
+            /* Need to check for a zero result.  The sign and exponent
+	     * fields have already been zeroed.  The more efficient test
+	     * of the full object can be used.
+	     */
+    	    if(Dbl_iszero(resultp1,resultp2))
+		/* Must have been "x-x" or "x+(-x)". */
+		{
+		if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    result_exponent--;
+	    /* Look to see if normalization is finished. */
+	    if(Dbl_isone_hidden(resultp1))
+		{
+		if(result_exponent==0)
+		    {
+		    /* Denormalized, exponent should be zero.  Left operand *
+		     * was normalized, so extent (guard, round) was zero    */
+		    goto underflow;
+		    }
+		else
+		    {
+		    /* No further normalization is needed. */
+		    Dbl_set_sign(resultp1,/*using*/sign_save);
+	    	    Ext_leftshiftby1(extent);
+		    goto round;
+		    }
+		}
+
+	    /* Check for denormalized, exponent should be zero.  Left    *
+	     * operand was normalized, so extent (guard, round) was zero */
+	    if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+	       result_exponent==0) goto underflow;
+
+	    /* Shift extension to complete one bit of normalization and
+	     * update exponent. */
+	    Ext_leftshiftby1(extent);
+
+	    /* Discover first one bit to determine shift amount.  Use a
+	     * modified binary search.  We have already shifted the result
+	     * one position right and still not found a one so the remainder
+	     * of the extension must be zero and simplifies rounding. */
+	    /* Scan bytes */
+	    while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
+		{
+		Dbl_leftshiftby8(resultp1,resultp2);
+		if((result_exponent -= 8) <= 0  && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Now narrow it down to the nibble */
+	    if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
+		{
+		/* The lower nibble contains the normalizing one */
+		Dbl_leftshiftby4(resultp1,resultp2);
+		if((result_exponent -= 4) <= 0 && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Select case were first bit is set (already normalized)
+	     * otherwise select the proper shift. */
+	    if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
+		{
+		/* Already normalized */
+		if(result_exponent <= 0) goto underflow;
+		Dbl_set_sign(resultp1,/*using*/sign_save);
+		Dbl_set_exponent(resultp1,/*using*/result_exponent);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+		}
+	    Dbl_sethigh4bits(resultp1,/*using*/sign_save);
+	    switch(jumpsize) 
+		{
+		case 1:
+		    {
+		    Dbl_leftshiftby3(resultp1,resultp2);
+		    result_exponent -= 3;
+		    break;
+		    }
+		case 2:
+		case 3:
+		    {
+		    Dbl_leftshiftby2(resultp1,resultp2);
+		    result_exponent -= 2;
+		    break;
+		    }
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+		    {
+		    Dbl_leftshiftby1(resultp1,resultp2);
+		    result_exponent -= 1;
+		    break;
+		    }
+		}
+	    if(result_exponent > 0) 
+		{
+		Dbl_set_exponent(resultp1,/*using*/result_exponent);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);		/* Sign bit is already set */
+		}
+	    /* Fixup potential underflows */
+	  underflow:
+	    if(Is_underflowtrap_enabled())
+		{
+		Dbl_set_sign(resultp1,sign_save);
+                Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		/* inexact = FALSE */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    /* 
+	     * Since we cannot get an inexact denormalized result,
+	     * we can now return.
+	     */
+	    Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
+	    Dbl_clear_signexponent(resultp1);
+	    Dbl_set_sign(resultp1,sign_save);
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    return(NOEXCEPTION);
+	    } /* end if(hidden...)... */
+	/* Fall through and round */
+	} /* end if(save >= 0)... */
+    else 
+	{
+	/* Subtract magnitudes */
+	Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
+	if(Dbl_isone_hiddenoverflow(resultp1))
+	    {
+	    /* Prenormalization required. */
+	    Dbl_rightshiftby1_withextent(resultp2,extent,extent);
+	    Dbl_arithrightshiftby1(resultp1,resultp2);
+	    result_exponent++;
+	    } /* end if hiddenoverflow... */
+	} /* end else ...subtract magnitudes... */
+    
+    /* Round the result.  If the extension is all zeros,then the result is
+     * exact.  Otherwise round in the correct direction.  No underflow is
+     * possible. If a postnormalization is necessary, then the mantissa is
+     * all zeros so no shift is needed. */
+  round:
+    if(Ext_isnotzero(extent))
+	{
+	inexact = TRUE;
+	switch(Rounding_mode())
+	    {
+	    case ROUNDNEAREST: /* The default. */
+	    if(Ext_isone_sign(extent))
+		{
+		/* at least 1/2 ulp */
+		if(Ext_isnotzero_lower(extent)  ||
+		  Dbl_isone_lowmantissap2(resultp2))
+		    {
+		    /* either exactly half way and odd or more than 1/2ulp */
+		    Dbl_increment(resultp1,resultp2);
+		    }
+		}
+	    break;
+
+	    case ROUNDPLUS:
+	    if(Dbl_iszero_sign(resultp1))
+		{
+		/* Round up positive results */
+		Dbl_increment(resultp1,resultp2);
+		}
+	    break;
+	    
+	    case ROUNDMINUS:
+	    if(Dbl_isone_sign(resultp1))
+		{
+		/* Round down negative results */
+		Dbl_increment(resultp1,resultp2);
+		}
+	    
+	    case ROUNDZERO:;
+	    /* truncate is simple */
+	    } /* end switch... */
+	if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+    if(result_exponent == DBL_INFINITY_EXPONENT)
+        {
+        /* Overflow */
+        if(Is_overflowtrap_enabled())
+	    {
+	    Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+	    Dbl_copytoptr(resultp1,resultp2,dstptr);
+	    if (inexact)
+	    if (Is_inexacttrap_enabled())
+		return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+		else Set_inexactflag();
+	    return(OVERFLOWEXCEPTION);
+	    }
+        else
+	    {
+	    inexact = TRUE;
+	    Set_overflowflag();
+	    Dbl_setoverflow(resultp1,resultp2);
+	    }
+	}
+    else Dbl_set_exponent(resultp1,result_exponent);
+    Dbl_copytoptr(resultp1,resultp2,dstptr);
+    if(inexact) 
+	if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+	else Set_inexactflag();
+    return(NOEXCEPTION);
+    }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/driver.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/driver.c
new file mode 100644
index 0000000..2fb59d2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/driver.c
@@ -0,0 +1,129 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ *  linux/arch/math-emu/driver.c.c
+ *
+ *	decodes and dispatches unimplemented FPU instructions
+ *
+ *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
+ *  Copyright (C) 2001	      Hewlett-Packard <bame@debian.org>
+ */
+
+#include <linux/sched/signal.h>
+
+#include "float.h"
+#include "math-emu.h"
+
+
+#define fptpos 31
+#define fpr1pos 10
+#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
+
+#define FPUDEBUG 0
+
+/* Format of the floating-point exception registers. */
+struct exc_reg {
+	unsigned int exception : 6;
+	unsigned int ei : 26;
+};
+
+/* Macros for grabbing bits of the instruction format from the 'ei'
+   field above. */
+/* Major opcode 0c and 0e */
+#define FP0CE_UID(i) (((i) >> 6) & 3)
+#define FP0CE_CLASS(i) (((i) >> 9) & 3)
+#define FP0CE_SUBOP(i) (((i) >> 13) & 7)
+#define FP0CE_SUBOP1(i) (((i) >> 15) & 7) /* Class 1 subopcode */
+#define FP0C_FORMAT(i) (((i) >> 11) & 3)
+#define FP0E_FORMAT(i) (((i) >> 11) & 1)
+
+/* Major opcode 0c, uid 2 (performance monitoring) */
+#define FPPM_SUBOP(i) (((i) >> 9) & 0x1f)
+
+/* Major opcode 2e (fused operations).   */
+#define FP2E_SUBOP(i)  (((i) >> 5) & 1)
+#define FP2E_FORMAT(i) (((i) >> 11) & 1)
+
+/* Major opcode 26 (FMPYSUB) */
+/* Major opcode 06 (FMPYADD) */
+#define FPx6_FORMAT(i) ((i) & 0x1f)
+
+/* Flags and enable bits of the status word. */
+#define FPSW_FLAGS(w) ((w) >> 27)
+#define FPSW_ENABLE(w) ((w) & 0x1f)
+#define FPSW_V (1<<4)
+#define FPSW_Z (1<<3)
+#define FPSW_O (1<<2)
+#define FPSW_U (1<<1)
+#define FPSW_I (1<<0)
+
+/* Handle a floating point exception.  Return zero if the faulting
+   instruction can be completed successfully. */
+int
+handle_fpe(struct pt_regs *regs)
+{
+	extern void printbinary(unsigned long x, int nbits);
+	struct siginfo si;
+	unsigned int orig_sw, sw;
+	int signalcode;
+	/* need an intermediate copy of float regs because FPU emulation
+	 * code expects an artificial last entry which contains zero
+	 *
+	 * also, the passed in fr registers contain one word that defines
+	 * the fpu type. the fpu type information is constructed 
+	 * inside the emulation code
+	 */
+	__u64 frcopy[36];
+
+	memcpy(frcopy, regs->fr, sizeof regs->fr);
+	frcopy[32] = 0;
+
+	memcpy(&orig_sw, frcopy, sizeof(orig_sw));
+
+	if (FPUDEBUG) {
+		printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n   ");
+		printbinary(orig_sw, 32);
+		printk(KERN_DEBUG "\n");
+	}
+
+	signalcode = decode_fpu(frcopy, 0x666);
+
+	/* Status word = FR0L. */
+	memcpy(&sw, frcopy, sizeof(sw));
+	if (FPUDEBUG) {
+		printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n",
+			signalcode >> 24, signalcode & 0xffffff);
+		printbinary(sw, 32);
+		printk(KERN_DEBUG "\n");
+	}
+
+	memcpy(regs->fr, frcopy, sizeof regs->fr);
+	if (signalcode != 0) {
+	    si.si_signo = signalcode >> 24;
+	    si.si_errno = 0;
+	    si.si_code = signalcode & 0xffffff;
+	    si.si_addr = (void __user *) regs->iaoq[0];
+	    force_sig_info(si.si_signo, &si, current);
+	    return -1;
+	}
+
+	return signalcode ? -1 : 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvff.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvff.c
new file mode 100644
index 0000000..76c063f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvff.c
@@ -0,0 +1,309 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvff.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Floating-point to Double Floating-point
+ *	Double Floating-point to Single Floating-point
+ *
+ *  External Interfaces:
+ *	dbl_to_sgl_fcnvff(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvff(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ *  Single Floating-point to Double Floating-point 
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvff(
+	    sgl_floating_point *srcptr,
+	    unsigned int *nullptr,
+	    dbl_floating_point *dstptr,
+	    unsigned int *status)
+{
+	register unsigned int src, resultp1, resultp2;
+	register int src_exponent;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src);
+	Dbl_allp1(resultp1) = Sgl_all(src);  /* set sign of result */
+	/* 
+ 	 * Test for NaN or infinity
+ 	 */
+	if (src_exponent == SGL_INFINITY_EXPONENT) {
+		/*
+		 * determine if NaN or infinity
+		 */
+		if (Sgl_iszero_mantissa(src)) {
+			/*
+			 * is infinity; want to return double infinity
+			 */
+			Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		else {
+			/* 
+			 * is NaN; signaling or quiet?
+			 */
+			if (Sgl_isone_signaling(src)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(INVALIDEXCEPTION);
+				/* make NaN quiet */
+				else {
+					Set_invalidflag();
+					Sgl_set_quiet(src);
+				}
+			}
+			/* 
+			 * NaN is quiet, return as double NaN 
+			 */
+			Dbl_setinfinity_exponent(resultp1);
+			Sgl_to_dbl_mantissa(src,resultp1,resultp2);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+	/* 
+ 	 * Test for zero or denormalized
+ 	 */
+	if (src_exponent == 0) {
+		/*
+		 * determine if zero or denormalized
+		 */
+		if (Sgl_isnotzero_mantissa(src)) {
+			/*
+			 * is denormalized; want to normalize
+			 */
+			Sgl_clear_signexponent(src);
+			Sgl_leftshiftby1(src);
+			Sgl_normalize(src,src_exponent);
+			Sgl_to_dbl_exponent(src_exponent,resultp1);
+			Sgl_to_dbl_mantissa(src,resultp1,resultp2);
+		}
+		else {
+			Dbl_setzero_exponentmantissa(resultp1,resultp2);
+		}
+		Dbl_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * No special cases, just complete the conversion
+	 */
+	Sgl_to_dbl_exponent(src_exponent, resultp1);
+	Sgl_to_dbl_mantissa(Sgl_mantissa(src), resultp1,resultp2);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Single Floating-point 
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvff(
+		    dbl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    sgl_floating_point *dstptr,
+		    unsigned int *status)
+{
+        register unsigned int srcp1, srcp2, result;
+        register int src_exponent, dest_exponent, dest_mantissa;
+        register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+	register boolean lsb_odd = FALSE;
+	boolean is_tiny;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+        src_exponent = Dbl_exponent(srcp1);
+	Sgl_all(result) = Dbl_allp1(srcp1);  /* set sign of result */
+        /* 
+         * Test for NaN or infinity
+         */
+        if (src_exponent == DBL_INFINITY_EXPONENT) {
+                /*
+                 * determine if NaN or infinity
+                 */
+                if (Dbl_iszero_mantissa(srcp1,srcp2)) {
+                        /*
+                         * is infinity; want to return single infinity
+                         */
+                        Sgl_setinfinity_exponentmantissa(result);
+                        *dstptr = result;
+                        return(NOEXCEPTION);
+                }
+                /* 
+                 * is NaN; signaling or quiet?
+                 */
+                if (Dbl_isone_signaling(srcp1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        else {
+				Set_invalidflag();
+                        	/* make NaN quiet */
+                        	Dbl_set_quiet(srcp1);
+			}
+                }
+                /* 
+                 * NaN is quiet, return as single NaN 
+                 */
+                Sgl_setinfinity_exponent(result);
+		Sgl_set_mantissa(result,Dallp1(srcp1)<<3 | Dallp2(srcp2)>>29);
+		if (Sgl_iszero_mantissa(result)) Sgl_set_quiet(result);
+                *dstptr = result;
+                return(NOEXCEPTION);
+        }
+        /*
+         * Generate result
+         */
+        Dbl_to_sgl_exponent(src_exponent,dest_exponent);
+	if (dest_exponent > 0) {
+        	Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,guardbit, 
+		stickybit,lsb_odd);
+	}
+	else {
+		if (Dbl_iszero_exponentmantissa(srcp1,srcp2)){
+			Sgl_setzero_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                if (Is_underflowtrap_enabled()) {
+			Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,
+			guardbit,stickybit,lsb_odd);
+                }
+		else {
+			/* compute result, determine inexact info,
+			 * and set Underflowflag if appropriate
+			 */
+			Dbl_to_sgl_denormalized(srcp1,srcp2,dest_exponent,
+			dest_mantissa,inexact,guardbit,stickybit,lsb_odd,
+			is_tiny);
+		}
+	}
+        /* 
+         * Now round result if not exact
+         */
+        if (inexact) {
+                switch (Rounding_mode()) {
+                        case ROUNDPLUS: 
+                                if (Sgl_iszero_sign(result)) dest_mantissa++;
+                                break;
+                        case ROUNDMINUS: 
+                                if (Sgl_isone_sign(result)) dest_mantissa++;
+                                break;
+                        case ROUNDNEAREST:
+                                if (guardbit) {
+                                   if (stickybit || lsb_odd) dest_mantissa++;
+                                   }
+                }
+        }
+        Sgl_set_exponentmantissa(result,dest_mantissa);
+
+        /*
+         * check for mantissa overflow after rounding
+         */
+        if ((dest_exponent>0 || Is_underflowtrap_enabled()) && 
+	    Sgl_isone_hidden(result)) dest_exponent++;
+
+        /* 
+         * Test for overflow
+         */
+        if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /* 
+                         * Check for gross overflow
+                         */
+                        if (dest_exponent >= SGL_INFINITY_EXPONENT+SGL_WRAP) 
+                        	return(UNIMPLEMENTEDEXCEPTION);
+                        
+                        /*
+                         * Adjust bias of result
+                         */
+			Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+			*dstptr = result;
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return(OVERFLOWEXCEPTION|INEXACTEXCEPTION);
+			    else Set_inexactflag();
+                        return(OVERFLOWEXCEPTION);
+                }
+                Set_overflowflag();
+		inexact = TRUE;
+		/* set result to infinity or largest number */
+		Sgl_setoverflow(result);
+        }
+        /* 
+         * Test for underflow
+         */
+        else if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /* 
+                         * Check for gross underflow
+                         */
+                        if (dest_exponent <= -(SGL_WRAP))
+                        	return(UNIMPLEMENTEDEXCEPTION);
+                        /*
+                         * Adjust bias of result
+                         */
+			Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+			*dstptr = result;
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return(UNDERFLOWEXCEPTION|INEXACTEXCEPTION);
+			    else Set_inexactflag();
+                        return(UNDERFLOWEXCEPTION);
+                }
+                 /* 
+                  * result is denormalized or signed zero
+                  */
+               if (inexact && is_tiny) Set_underflowflag();
+
+        }
+	else Sgl_set_exponent(result,dest_exponent);
+	*dstptr = result;
+        /* 
+         * Trap if inexact trap is enabled
+         */
+        if (inexact)
+        	if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+        	else Set_inexactflag();
+        return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfu.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfu.c
new file mode 100644
index 0000000..7e85655
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfu.c
@@ -0,0 +1,536 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvfu.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Floating-point to Unsigned Fixed-point Converts
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ *  Floating-point to Unsigned Fixed-point Converts			*
+ ************************************************************************/
+
+/*
+ *  Single Floating-point to Single Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfu(
+			sgl_floating_point *srcptr,
+			unsigned int *nullptr,
+			unsigned int *dstptr,
+			unsigned int *status)
+{
+	register unsigned int src, result;
+	register int src_exponent;
+	register boolean inexact = FALSE;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP + 1) {
+		if (Sgl_isone_sign(src)) {
+			result = 0;
+		} else {
+			result = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Sgl_isone_sign(src)) {
+			result = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+		Sgl_clear_signexponent_set_hidden(src);
+		Suint_from_sgl_mantissa(src,src_exponent,result);
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				result++;
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				if (Sgl_isone_roundbit(src,src_exponent) &&
+				    (Sgl_isone_stickybit(src,src_exponent) ||
+				     (result & 1))) {
+			     		result++;
+				}
+				break;
+			}
+		}
+	} else {
+		result = 0;
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Sgl_iszero_sign(src)) {
+					result++;
+				}
+				break;
+			case ROUNDMINUS:
+				if (Sgl_isone_sign(src)) {
+					result = 0;
+					if (Is_invalidtrap_enabled()) {
+						return(INVALIDEXCEPTION);
+					}
+					Set_invalidflag();
+					inexact = FALSE;
+				}
+				break;
+			case ROUNDNEAREST:
+				if (src_exponent == -1 &&
+				    Sgl_isnotzero_mantissa(src)) {
+					if (Sgl_isone_sign(src)) {
+						result = 0;
+						if (Is_invalidtrap_enabled()) {
+							return(INVALIDEXCEPTION);
+						}
+						Set_invalidflag();
+						inexact = FALSE;
+					}
+			      		else result++;
+				}
+				break;
+			}
+		}
+	}
+	*dstptr = result;
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point to Double Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfu(
+		    sgl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    dbl_unsigned *dstptr,
+		    unsigned int *status)
+{
+	register int src_exponent;
+	register unsigned int src, resultp1, resultp2;
+	register boolean inexact = FALSE;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP + 1) {
+		if (Sgl_isone_sign(src)) {
+			resultp1 = resultp2 = 0;
+		} else {
+			resultp1 = resultp2 = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+    		Duint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Sgl_isone_sign(src)) {
+			resultp1 = resultp2 = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+    			Duint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Sgl_clear_signexponent_set_hidden(src);
+		Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				Duint_increment(resultp1,resultp2);
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				if (Sgl_isone_roundbit(src,src_exponent) &&
+				    (Sgl_isone_stickybit(src,src_exponent) || 
+				     Duint_isone_lowp2(resultp2))) {
+					Duint_increment(resultp1,resultp2);
+				}
+				break;
+			}
+		}
+	} else {
+		Duint_setzero(resultp1,resultp2);
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Sgl_iszero_sign(src)) {
+					Duint_increment(resultp1,resultp2);
+				}
+				break;
+			case ROUNDMINUS:
+				if (Sgl_isone_sign(src)) {
+					resultp1 = resultp2 = 0;
+					if (Is_invalidtrap_enabled()) {
+						return(INVALIDEXCEPTION);
+					}
+					Set_invalidflag();
+					inexact = FALSE;
+				}
+				break;
+			case ROUNDNEAREST:
+				if (src_exponent == -1 &&
+				    Sgl_isnotzero_mantissa(src)) {
+					if (Sgl_isone_sign(src)) {
+						resultp1 = 0;
+						resultp2 = 0;
+						if (Is_invalidtrap_enabled()) {
+							return(INVALIDEXCEPTION);
+						}
+						Set_invalidflag();
+						inexact = FALSE;
+					}
+					else Duint_increment(resultp1,resultp2);
+				}
+			}
+		}
+	}
+	Duint_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Single Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+		   unsigned int *dstptr, unsigned int *status)
+{
+	register unsigned int srcp1, srcp2, result;
+	register int src_exponent;
+	register boolean inexact = FALSE;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP + 1) {
+		if (Dbl_isone_sign(srcp1)) {
+			result = 0;
+		} else {
+			result = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Dbl_isone_sign(srcp1)) {
+			result = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+		Dbl_clear_signexponent_set_hidden(srcp1);
+		Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     result++;
+			     break;
+			case ROUNDMINUS: /* never negative */
+			     break;
+			case ROUNDNEAREST:
+			     if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent) &&
+				(Dbl_isone_stickybit(srcp1,srcp2,src_exponent)||
+				 result&1))
+				   result++;
+			     break;
+			}
+			/* check for overflow */
+			if (result == 0) {
+				result = 0xffffffff;
+				if (Is_invalidtrap_enabled()) {
+					return(INVALIDEXCEPTION);
+				}
+				Set_invalidflag();
+				*dstptr = result;
+				return(NOEXCEPTION);
+			}
+		}
+	} else {
+		result = 0;
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Dbl_iszero_sign(srcp1)) result++;
+				break;
+			case ROUNDMINUS:
+				if (Dbl_isone_sign(srcp1)) {
+					result = 0;
+					if (Is_invalidtrap_enabled()) {
+						return(INVALIDEXCEPTION);
+					}
+					Set_invalidflag();
+					inexact = FALSE;
+				}
+				break;
+			case ROUNDNEAREST:
+				if (src_exponent == -1 &&
+				    Dbl_isnotzero_mantissa(srcp1,srcp2))
+					if (Dbl_isone_sign(srcp1)) {
+						result = 0;
+						if (Is_invalidtrap_enabled()) {
+							return(INVALIDEXCEPTION);
+						}
+						Set_invalidflag();
+						inexact = FALSE;
+					}
+					else result++;
+			}
+		}
+	}
+	*dstptr = result;
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Double Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
+		   dbl_unsigned * dstptr, unsigned int *status)
+{
+	register int src_exponent;
+	register unsigned int srcp1, srcp2, resultp1, resultp2;
+	register boolean inexact = FALSE;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP + 1) {
+		if (Dbl_isone_sign(srcp1)) {
+			resultp1 = resultp2 = 0;
+		} else {
+			resultp1 = resultp2 = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+    		Duint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+ 
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Dbl_isone_sign(srcp1)) {
+			resultp1 = resultp2 = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+    			Duint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Dbl_clear_signexponent_set_hidden(srcp1);
+		Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,resultp1,
+		  resultp2);
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				Duint_increment(resultp1,resultp2);
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+				  if(Dbl_isone_stickybit(srcp1,srcp2,src_exponent) || 
+				     Duint_isone_lowp2(resultp2))
+					Duint_increment(resultp1,resultp2);
+			} 
+		}
+	} else {
+		Duint_setzero(resultp1,resultp2);
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Dbl_iszero_sign(srcp1)) {
+					Duint_increment(resultp1,resultp2);
+				}
+				break;
+			case ROUNDMINUS:
+				if (Dbl_isone_sign(srcp1)) {
+					resultp1 = resultp2 = 0;
+					if (Is_invalidtrap_enabled()) {
+						return(INVALIDEXCEPTION);
+					}
+					Set_invalidflag();
+					inexact = FALSE;
+				}
+				break;
+			case ROUNDNEAREST:
+				if (src_exponent == -1 &&
+				    Dbl_isnotzero_mantissa(srcp1,srcp2))
+					if (Dbl_iszero_sign(srcp1)) {
+						Duint_increment(resultp1,resultp2);
+					} else {
+						resultp1 = 0;
+						resultp2 = 0;
+						if (Is_invalidtrap_enabled()) {
+							return(INVALIDEXCEPTION);
+						}
+						Set_invalidflag();
+						inexact = FALSE;
+					}
+			}
+		}
+	}
+	Duint_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfut.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfut.c
new file mode 100644
index 0000000..4176a44
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfut.c
@@ -0,0 +1,332 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvfut.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Floating-point to Unsigned Fixed-point Converts with Truncation
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ *  Floating-point to Unsigned Fixed-point Converts with Truncation	*
+ ************************************************************************/
+
+/*
+ *  Convert single floating-point to single fixed-point format
+ *  with truncated result
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+		    unsigned int *dstptr, unsigned int *status)
+{
+	register unsigned int src, result;
+	register int src_exponent;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP + 1) {
+		if (Sgl_isone_sign(src)) {
+			result = 0;
+		} else {
+			result = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Sgl_isone_sign(src)) {
+			result = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+		Sgl_clear_signexponent_set_hidden(src);
+		Suint_from_sgl_mantissa(src,src_exponent,result);
+		*dstptr = result;
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		*dstptr = 0;
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point to Double Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
+		    dbl_unsigned * dstptr, unsigned int *status)
+{
+	register int src_exponent;
+	register unsigned int src, resultp1, resultp2;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP + 1) {
+		if (Sgl_isone_sign(src)) {
+			resultp1 = resultp2 = 0;
+		} else {
+			resultp1 = resultp2 = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+    		Duint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Sgl_isone_sign(src)) {
+			resultp1 = resultp2 = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+    			Duint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Sgl_clear_signexponent_set_hidden(src);
+		Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
+		Duint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		Duint_setzero(resultp1,resultp2);
+		Duint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Single Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+		    unsigned int *dstptr, unsigned int *status)
+{
+	register unsigned int srcp1, srcp2, result;
+	register int src_exponent;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP + 1) {
+		if (Dbl_isone_sign(srcp1)) {
+			result = 0;
+		} else {
+			result = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Dbl_isone_sign(srcp1)) {
+			result = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+		Dbl_clear_signexponent_set_hidden(srcp1);
+		Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
+		*dstptr = result;
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		*dstptr = 0;
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Double Unsigned Fixed 
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
+		    dbl_unsigned * dstptr, unsigned int *status)
+{
+	register int src_exponent;
+	register unsigned int srcp1, srcp2, resultp1, resultp2;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP + 1) {
+		if (Dbl_isone_sign(srcp1)) {
+			resultp1 = resultp2 = 0;
+		} else {
+			resultp1 = resultp2 = 0xffffffff;
+		}
+		if (Is_invalidtrap_enabled()) {
+			return(INVALIDEXCEPTION);
+		}
+		Set_invalidflag();
+    		Duint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		/* 
+		 * Check sign.
+		 * If negative, trap unimplemented.
+		 */
+		if (Dbl_isone_sign(srcp1)) {
+			resultp1 = resultp2 = 0;
+			if (Is_invalidtrap_enabled()) {
+				return(INVALIDEXCEPTION);
+			}
+			Set_invalidflag();
+    			Duint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Dbl_clear_signexponent_set_hidden(srcp1);
+		Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,
+		  resultp1,resultp2);
+		Duint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		Duint_setzero(resultp1,resultp2);
+		Duint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfx.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfx.c
new file mode 100644
index 0000000..d6475bd
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfx.c
@@ -0,0 +1,501 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvfx.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Floating-point to Single Fixed-point
+ *	Single Floating-point to Double Fixed-point 
+ *	Double Floating-point to Single Fixed-point 
+ *	Double Floating-point to Double Fixed-point 
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ *  Single Floating-point to Single Fixed-point 
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfx(
+		    sgl_floating_point *srcptr,
+		    sgl_floating_point *nullptr,
+		    int *dstptr,
+		    sgl_floating_point *status)
+{
+	register unsigned int src, temp;
+	register int src_exponent, result;
+	register boolean inexact = FALSE;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > SGL_FX_MAX_EXP + 1) || 
+		Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+                        if (Sgl_iszero_sign(src)) result = 0x7fffffff;
+                        else result = 0x80000000; 
+
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+       		}
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		temp = src;
+		Sgl_clear_signexponent_set_hidden(temp);
+		Int_from_sgl_mantissa(temp,src_exponent);
+		if (Sgl_isone_sign(src))  result = -Sgl_all(temp);
+		else result = Sgl_all(temp);
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_fix(src,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Sgl_iszero_sign(src)) result++;
+			     break;
+			case ROUNDMINUS:
+			     if (Sgl_isone_sign(src)) result--;
+			     break;
+			case ROUNDNEAREST:
+			     if (Sgl_isone_roundbit(src,src_exponent)) {
+			        if (Sgl_isone_stickybit(src,src_exponent) 
+				|| (Sgl_isone_lowmantissa(temp)))
+			           if (Sgl_iszero_sign(src)) result++;
+			           else result--;
+			     }
+			} 
+		}
+	}
+	else {
+		result = 0;
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Sgl_iszero_sign(src)) result++;
+			     break;
+			case ROUNDMINUS:
+			     if (Sgl_isone_sign(src)) result--;
+			     break;
+			case ROUNDNEAREST:
+			     if (src_exponent == -1)
+			        if (Sgl_isnotzero_mantissa(src))
+			           if (Sgl_iszero_sign(src)) result++;
+			           else result--;
+			} 
+		}
+	}
+	*dstptr = result;
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point to Double Fixed-point 
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfx(
+		sgl_floating_point *srcptr,
+		unsigned int *nullptr,
+		dbl_integer *dstptr,
+		unsigned int *status)
+{
+	register int src_exponent, resultp1;
+	register unsigned int src, temp, resultp2;
+	register boolean inexact = FALSE;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > DBL_FX_MAX_EXP + 1) || 
+		Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+                        if (Sgl_iszero_sign(src)) {
+                              resultp1 = 0x7fffffff;
+			      resultp2 = 0xffffffff;
+			}
+                        else {
+			    resultp1 = 0x80000000; 
+			    resultp2 = 0;
+			}
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+    		        Dint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Dint_set_minint(resultp1,resultp2);
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		temp = src;
+		Sgl_clear_signexponent_set_hidden(temp);
+		Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
+		if (Sgl_isone_sign(src)) {
+			Dint_setone_sign(resultp1,resultp2);
+		}
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_fix(src,src_exponent)) {
+			inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Sgl_iszero_sign(src)) {
+				Dint_increment(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDMINUS:
+                             if (Sgl_isone_sign(src)) {
+				Dint_decrement(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDNEAREST:
+                             if (Sgl_isone_roundbit(src,src_exponent))
+                                if (Sgl_isone_stickybit(src,src_exponent) || 
+				(Dint_isone_lowp2(resultp2)))
+				   if (Sgl_iszero_sign(src)) {
+				      Dint_increment(resultp1,resultp2);
+				   }
+                                   else {
+				      Dint_decrement(resultp1,resultp2);
+				   }
+                        }
+                }
+        }
+	else {
+		Dint_setzero(resultp1,resultp2);
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Sgl_iszero_sign(src)) {
+				Dint_increment(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDMINUS:
+                             if (Sgl_isone_sign(src)) {
+				Dint_decrement(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDNEAREST:
+                             if (src_exponent == -1)
+                                if (Sgl_isnotzero_mantissa(src))
+                                   if (Sgl_iszero_sign(src)) {
+				      Dint_increment(resultp1,resultp2);
+				   }
+                                   else {
+				      Dint_decrement(resultp1,resultp2);
+				   }
+			}
+		}
+	}
+	Dint_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Single Fixed-point 
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfx(
+		    dbl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    int *dstptr,
+		    unsigned int *status)
+{
+	register unsigned int srcp1,srcp2, tempp1,tempp2;
+	register int src_exponent, result;
+	register boolean inexact = FALSE;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP) {
+		/* check for MININT */
+		if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
+                        if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+                        else result = 0x80000000; 
+
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		tempp1 = srcp1;
+		tempp2 = srcp2;
+		Dbl_clear_signexponent_set_hidden(tempp1);
+		Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
+		if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
+			result = -Dbl_allp1(tempp1);
+		else result = Dbl_allp1(tempp1);
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+                        inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Dbl_iszero_sign(srcp1)) result++;
+                             break;
+                        case ROUNDMINUS:
+                             if (Dbl_isone_sign(srcp1)) result--;
+                             break;
+                        case ROUNDNEAREST:
+                             if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+                                if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) || 
+				(Dbl_isone_lowmantissap1(tempp1)))
+                                   if (Dbl_iszero_sign(srcp1)) result++;
+                                   else result--;
+                        } 
+			/* check for overflow */
+			if ((Dbl_iszero_sign(srcp1) && result < 0) ||
+			    (Dbl_isone_sign(srcp1) && result > 0)) {
+			        
+                          if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+                          else result = 0x80000000; 
+
+	                  if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                          }
+                          Set_invalidflag();
+			  *dstptr = result;
+			  return(NOEXCEPTION);
+			}
+                }
+	}
+	else {
+		result = 0;
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+                        inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Dbl_iszero_sign(srcp1)) result++;
+                             break;
+                        case ROUNDMINUS:
+                             if (Dbl_isone_sign(srcp1)) result--;
+                             break;
+                        case ROUNDNEAREST:
+                             if (src_exponent == -1)
+                                if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+                                   if (Dbl_iszero_sign(srcp1)) result++;
+                                   else result--;
+			}
+                }
+	}
+	*dstptr = result;
+        if (inexact) {
+                if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+        }
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Double Fixed-point 
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfx(
+		    dbl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    dbl_integer *dstptr,
+		    unsigned int *status)
+{
+	register int src_exponent, resultp1;
+	register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
+	register boolean inexact = FALSE;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > DBL_FX_MAX_EXP + 1) || 
+		Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
+                        if (Dbl_iszero_sign(srcp1)) {
+                              resultp1 = 0x7fffffff;
+			      resultp2 = 0xffffffff;
+			}
+                        else {
+			    resultp1 = 0x80000000; 
+			    resultp2 = 0;
+			}
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+    		        Dint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+ 
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		tempp1 = srcp1;
+		tempp2 = srcp2;
+		Dbl_clear_signexponent_set_hidden(tempp1);
+		Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,resultp1,
+		resultp2);
+		if (Dbl_isone_sign(srcp1)) {
+			Dint_setone_sign(resultp1,resultp2);
+		}
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+                        inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Dbl_iszero_sign(srcp1)) {
+				Dint_increment(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDMINUS:
+                             if (Dbl_isone_sign(srcp1)) {
+				Dint_decrement(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDNEAREST:
+                             if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+                                if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) || 
+				(Dint_isone_lowp2(resultp2)))
+                                   if (Dbl_iszero_sign(srcp1)) {
+				      Dint_increment(resultp1,resultp2);
+				   }
+                                   else {
+				      Dint_decrement(resultp1,resultp2);
+				   }
+                        } 
+                }
+	}
+	else {
+		Dint_setzero(resultp1,resultp2);
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+                        inexact = TRUE;
+                        /*  round result  */
+                        switch (Rounding_mode()) {
+                        case ROUNDPLUS:
+                             if (Dbl_iszero_sign(srcp1)) {
+				Dint_increment(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDMINUS:
+                             if (Dbl_isone_sign(srcp1)) {
+				Dint_decrement(resultp1,resultp2);
+			     }
+                             break;
+                        case ROUNDNEAREST:
+                             if (src_exponent == -1)
+                                if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+                                   if (Dbl_iszero_sign(srcp1)) {
+				      Dint_increment(resultp1,resultp2);
+				   }
+                                   else {
+				      Dint_decrement(resultp1,resultp2);
+				   }
+			}
+                }
+	}
+	Dint_copytoptr(resultp1,resultp2,dstptr);
+        if (inexact) {
+                if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+        	else Set_inexactflag();
+        }
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfxt.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfxt.c
new file mode 100644
index 0000000..8b9010c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvfxt.c
@@ -0,0 +1,328 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvfxt.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Floating-point to Single Fixed-point /w truncated result
+ *	Single Floating-point to Double Fixed-point /w truncated result
+ *	Double Floating-point to Single Fixed-point /w truncated result
+ *	Double Floating-point to Double Fixed-point /w truncated result
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ *  Convert single floating-point to single fixed-point format
+ *  with truncated result
+ */
+/*ARGSUSED*/
+int
+sgl_to_sgl_fcnvfxt(
+		    sgl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    int *dstptr,
+		    unsigned int *status)
+{
+	register unsigned int src, temp;
+	register int src_exponent, result;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > SGL_FX_MAX_EXP + 1) || 
+		Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+                        if (Sgl_iszero_sign(src)) result = 0x7fffffff;
+                        else result = 0x80000000; 
+
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		temp = src;
+		Sgl_clear_signexponent_set_hidden(temp);
+		Int_from_sgl_mantissa(temp,src_exponent);
+		if (Sgl_isone_sign(src))  result = -Sgl_all(temp);
+		else result = Sgl_all(temp);
+		*dstptr = result;
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_fix(src,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		*dstptr = 0;
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point to Double Fixed-point 
+ */
+/*ARGSUSED*/
+int
+sgl_to_dbl_fcnvfxt(
+		    sgl_floating_point *srcptr,
+		    unsigned int *nullptr,
+		    dbl_integer *dstptr,
+		    unsigned int *status)
+{
+	register int src_exponent, resultp1;
+	register unsigned int src, temp, resultp2;
+
+	src = *srcptr;
+	src_exponent = Sgl_exponent(src) - SGL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > DBL_FX_MAX_EXP + 1) || 
+		Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
+                        if (Sgl_iszero_sign(src)) {
+                              resultp1 = 0x7fffffff;
+			      resultp2 = 0xffffffff;
+			}
+                        else {
+			    resultp1 = 0x80000000; 
+			    resultp2 = 0;
+			}
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+    		        Dint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+		Dint_set_minint(resultp1,resultp2);
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		temp = src;
+		Sgl_clear_signexponent_set_hidden(temp);
+		Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
+		if (Sgl_isone_sign(src)) {
+			Dint_setone_sign(resultp1,resultp2);
+		}
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Sgl_isinexact_to_fix(src,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		Dint_setzero(resultp1,resultp2);
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Single Fixed-point 
+ */
+/*ARGSUSED*/
+int
+dbl_to_sgl_fcnvfxt(
+			dbl_floating_point *srcptr,
+			unsigned int *nullptr,
+			int *dstptr,
+			unsigned int *status)
+{
+	register unsigned int srcp1, srcp2, tempp1, tempp2;
+	register int src_exponent, result;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > SGL_FX_MAX_EXP) {
+		/* check for MININT */
+		if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
+                        if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
+                        else result = 0x80000000; 
+
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		tempp1 = srcp1;
+		tempp2 = srcp2;
+		Dbl_clear_signexponent_set_hidden(tempp1);
+		Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
+		if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
+			result = -Dbl_allp1(tempp1);
+		else result = Dbl_allp1(tempp1);
+		*dstptr = result;
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		*dstptr = 0;
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point to Double Fixed-point 
+ */
+/*ARGSUSED*/
+int
+dbl_to_dbl_fcnvfxt(
+			dbl_floating_point *srcptr,
+			unsigned int *nullptr,
+			dbl_integer *dstptr,
+			unsigned int *status)
+{
+	register int src_exponent, resultp1;
+	register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+	src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
+
+	/* 
+	 * Test for overflow
+	 */
+	if (src_exponent > DBL_FX_MAX_EXP) {
+		/* check for MININT */
+		if ((src_exponent > DBL_FX_MAX_EXP + 1) || 
+		Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
+                        if (Dbl_iszero_sign(srcp1)) {
+                              resultp1 = 0x7fffffff;
+			      resultp2 = 0xffffffff;
+			}
+                        else {
+			    resultp1 = 0x80000000; 
+			    resultp2 = 0;
+			}
+	                if (Is_invalidtrap_enabled()) {
+                            return(INVALIDEXCEPTION);
+                        }
+                        Set_invalidflag();
+    		        Dint_copytoptr(resultp1,resultp2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		tempp1 = srcp1;
+		tempp2 = srcp2;
+		Dbl_clear_signexponent_set_hidden(tempp1);
+		Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,
+		resultp1,resultp2);
+		if (Dbl_isone_sign(srcp1)) {
+			Dint_setone_sign(resultp1,resultp2);
+		}
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	else {
+		Dint_setzero(resultp1,resultp2);
+		Dint_copytoptr(resultp1,resultp2,dstptr);
+
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+			else Set_inexactflag();
+		}
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvuf.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvuf.c
new file mode 100644
index 0000000..5e68189
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvuf.c
@@ -0,0 +1,318 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvuf.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Fixed point to Floating-point Converts
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/************************************************************************
+ *  Fixed point to Floating-point Converts				*
+ ************************************************************************/
+
+/*
+ *  Convert Single Unsigned Fixed to Single Floating-point format
+ */
+
+int
+sgl_to_sgl_fcnvuf(
+			unsigned int *srcptr,
+			unsigned int *nullptr,
+			sgl_floating_point *dstptr,
+			unsigned int *status)
+{
+	register unsigned int src, result = 0;
+	register int dst_exponent;
+
+	src = *srcptr;
+
+	/* Check for zero */ 
+	if (src == 0) { 
+	       	Sgl_setzero(result); 
+		*dstptr = result;
+	       	return(NOEXCEPTION); 
+	} 
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	/*
+	 * Check word for most significant bit set.  Returns
+	 * a value in dst_exponent indicating the bit position,
+	 * between -1 and 30.
+	 */
+	Find_ms_one_bit(src,dst_exponent);
+	/*  left justify source, with msb at bit position 0  */
+	src <<= dst_exponent+1;
+	Sgl_set_mantissa(result, src >> SGL_EXP_LENGTH);
+	Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
+
+	/* check for inexact */
+	if (Suint_isinexact_to_sgl(src)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				Sgl_increment(result);
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				Sgl_roundnearest_from_suint(src,result);
+				break;
+		}
+		if (Is_inexacttrap_enabled()) {
+			*dstptr = result;
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Unsigned Fixed to Double Floating-point 
+ */
+
+int
+sgl_to_dbl_fcnvuf(
+			unsigned int *srcptr,
+			unsigned int *nullptr,
+			dbl_floating_point *dstptr,
+			unsigned int *status)
+{
+	register int dst_exponent;
+	register unsigned int src, resultp1 = 0, resultp2 = 0;
+
+	src = *srcptr;
+
+	/* Check for zero */
+	if (src == 0) {
+	       	Dbl_setzero(resultp1,resultp2);
+	       	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	       	return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	/*
+	 * Check word for most significant bit set.  Returns
+	 * a value in dst_exponent indicating the bit position,
+	 * between -1 and 30.
+	 */
+	Find_ms_one_bit(src,dst_exponent);
+	/*  left justify source, with msb at bit position 0  */
+	src <<= dst_exponent+1;
+	Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH);
+	Dbl_set_mantissap2(resultp2, src << (32-DBL_EXP_LENGTH));
+	Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Unsigned Fixed to Single Floating-point 
+ */
+
+int
+dbl_to_sgl_fcnvuf(
+			dbl_unsigned *srcptr,
+			unsigned int *nullptr,
+			sgl_floating_point *dstptr,
+			unsigned int *status)
+{
+	int dst_exponent;
+	unsigned int srcp1, srcp2, result = 0;
+
+	Duint_copyfromptr(srcptr,srcp1,srcp2);
+
+	/* Check for zero */
+	if (srcp1 == 0 && srcp2 == 0) {
+	       	Sgl_setzero(result);
+	       	*dstptr = result;
+	       	return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	if (srcp1 == 0) {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp2,dst_exponent);
+		/*  left justify source, with msb at bit position 0  */
+		srcp1 = srcp2 << dst_exponent+1;    
+		srcp2 = 0;
+		/*
+		 *  since msb set is in second word, need to 
+		 *  adjust bit position count
+		 */
+		dst_exponent += 32;
+	}
+	else {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 *
+		 */
+		Find_ms_one_bit(srcp1,dst_exponent);
+		/*  left justify source, with msb at bit position 0  */
+		if (dst_exponent >= 0) {
+			Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
+			 srcp1); 
+			srcp2 <<= dst_exponent+1;
+		}
+	}
+	Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH);
+	Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
+
+	/* check for inexact */
+	if (Duint_isinexact_to_sgl(srcp1,srcp2)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				Sgl_increment(result);
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				Sgl_roundnearest_from_duint(srcp1,srcp2,result);
+				break;
+		}
+		if (Is_inexacttrap_enabled()) {
+			*dstptr = result;
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Unsigned Fixed to Double Floating-point 
+ */
+
+int
+dbl_to_dbl_fcnvuf(
+		    dbl_unsigned *srcptr,
+		    unsigned int *nullptr,
+		    dbl_floating_point *dstptr,
+		    unsigned int *status)
+{
+	register int dst_exponent;
+	register unsigned int srcp1, srcp2, resultp1 = 0, resultp2 = 0;
+
+	Duint_copyfromptr(srcptr,srcp1,srcp2);
+
+	/* Check for zero */
+	if (srcp1 == 0 && srcp2 ==0) {
+	       	Dbl_setzero(resultp1,resultp2);
+	       	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	       	return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	if (srcp1 == 0) {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp2,dst_exponent);
+		/*  left justify source, with msb at bit position 0  */
+		srcp1 = srcp2 << dst_exponent+1;
+		srcp2 = 0;
+		/*
+		 *  since msb set is in second word, need to 
+		 *  adjust bit position count
+		 */
+		dst_exponent += 32;
+	}
+	else {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp1,dst_exponent);
+		/*  left justify source, with msb at bit position 0  */
+		if (dst_exponent >= 0) {
+			Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
+			 srcp1); 
+			srcp2 <<= dst_exponent+1;
+		}
+	}
+	Dbl_set_mantissap1(resultp1, srcp1 >> DBL_EXP_LENGTH);
+	Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH,resultp2);
+	Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
+
+	/* check for inexact */
+	if (Duint_isinexact_to_dbl(srcp2)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				Dbl_increment(resultp1,resultp2);
+				break;
+			case ROUNDMINUS: /* never negative */
+				break;
+			case ROUNDNEAREST:
+				Dbl_roundnearest_from_duint(srcp2,resultp1,
+				resultp2);
+				break;
+		}
+		if (Is_inexacttrap_enabled()) {
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvxf.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvxf.c
new file mode 100644
index 0000000..05c7fad
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fcnvxf.c
@@ -0,0 +1,386 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fcnvxf.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Fixed-point to Single Floating-point
+ *	Single Fixed-point to Double Floating-point 
+ *	Double Fixed-point to Single Floating-point 
+ *	Double Fixed-point to Double Floating-point 
+ *
+ *  External Interfaces:
+ *	dbl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
+ *	dbl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
+ *	sgl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
+ *	sgl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ *  Convert single fixed-point to single floating-point format
+ */
+
+int
+sgl_to_sgl_fcnvxf(
+		    int *srcptr,
+		    unsigned int *nullptr,
+		    sgl_floating_point *dstptr,
+		    unsigned int *status)
+{
+	register int src, dst_exponent;
+	register unsigned int result = 0;
+
+	src = *srcptr;
+	/* 
+	 * set sign bit of result and get magnitude of source 
+	 */
+	if (src < 0) {
+		Sgl_setone_sign(result);  
+		Int_negate(src);
+	}
+	else {
+		Sgl_setzero_sign(result);
+        	/* Check for zero */ 
+        	if (src == 0) { 
+                	Sgl_setzero(result); 
+			*dstptr = result;
+                	return(NOEXCEPTION); 
+        	} 
+	}
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	/*
+	 * Check word for most significant bit set.  Returns
+	 * a value in dst_exponent indicating the bit position,
+	 * between -1 and 30.
+	 */
+	Find_ms_one_bit(src,dst_exponent);
+	/*  left justify source, with msb at bit position 1  */
+	if (dst_exponent >= 0) src <<= dst_exponent;
+	else src = 1 << 30;
+	Sgl_set_mantissa(result, src >> (SGL_EXP_LENGTH-1));
+	Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
+
+	/* check for inexact */
+	if (Int_isinexact_to_sgl(src)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) 
+					Sgl_increment(result);
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) 
+					Sgl_increment(result);
+				break;
+			case ROUNDNEAREST:
+				Sgl_roundnearest_from_int(src,result);
+		}
+		if (Is_inexacttrap_enabled()) {
+			*dstptr = result;
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Fixed-point to Double Floating-point 
+ */
+
+int
+sgl_to_dbl_fcnvxf(
+		    int *srcptr,
+		    unsigned int *nullptr,
+		    dbl_floating_point *dstptr,
+		    unsigned int *status)
+{
+	register int src, dst_exponent;
+	register unsigned int resultp1 = 0, resultp2 = 0;
+
+	src = *srcptr;
+	/* 
+	 * set sign bit of result and get magnitude of source 
+	 */
+	if (src < 0) {
+		Dbl_setone_sign(resultp1);  
+		Int_negate(src);
+	}
+	else {
+		Dbl_setzero_sign(resultp1);
+        	/* Check for zero */
+        	if (src == 0) {
+                	Dbl_setzero(resultp1,resultp2);
+                	Dbl_copytoptr(resultp1,resultp2,dstptr);
+                	return(NOEXCEPTION);
+        	}
+	}
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	/*
+	 * Check word for most significant bit set.  Returns
+	 * a value in dst_exponent indicating the bit position,
+	 * between -1 and 30.
+	 */
+	Find_ms_one_bit(src,dst_exponent);
+	/*  left justify source, with msb at bit position 1  */
+	if (dst_exponent >= 0) src <<= dst_exponent;
+	else src = 1 << 30;
+	Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH - 1);
+	Dbl_set_mantissap2(resultp2, src << (33-DBL_EXP_LENGTH));
+	Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Fixed-point to Single Floating-point 
+ */
+
+int
+dbl_to_sgl_fcnvxf(
+			dbl_integer *srcptr,
+			unsigned int *nullptr,
+			sgl_floating_point *dstptr,
+			unsigned int *status)
+{
+	int dst_exponent, srcp1;
+	unsigned int result = 0, srcp2;
+
+	Dint_copyfromptr(srcptr,srcp1,srcp2);
+	/* 
+	 * set sign bit of result and get magnitude of source 
+	 */
+	if (srcp1 < 0) {
+		Sgl_setone_sign(result);  
+		Dint_negate(srcp1,srcp2);
+	}
+	else {
+		Sgl_setzero_sign(result);
+        	/* Check for zero */
+        	if (srcp1 == 0 && srcp2 == 0) {
+                	Sgl_setzero(result);
+                	*dstptr = result;
+                	return(NOEXCEPTION);
+		}
+        }
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	if (srcp1 == 0) {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp2,dst_exponent);
+		/*  left justify source, with msb at bit position 1  */
+		if (dst_exponent >= 0) {
+			srcp1 = srcp2 << dst_exponent;    
+			srcp2 = 0;
+		}
+		else {
+			srcp1 = srcp2 >> 1;
+			srcp2 <<= 31; 
+		}
+		/*
+		 *  since msb set is in second word, need to 
+		 *  adjust bit position count
+		 */
+		dst_exponent += 32;
+	}
+	else {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 *
+		 */
+		Find_ms_one_bit(srcp1,dst_exponent);
+		/*  left justify source, with msb at bit position 1  */
+		if (dst_exponent > 0) {
+			Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
+			 srcp1); 
+			srcp2 <<= dst_exponent;
+		}
+		/*
+		 * If dst_exponent = 0, we don't need to shift anything.
+		 * If dst_exponent = -1, src = - 2**63 so we won't need to 
+		 * shift srcp2.
+		 */
+		else srcp1 >>= -(dst_exponent);
+	}
+	Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH - 1);
+	Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
+
+	/* check for inexact */
+	if (Dint_isinexact_to_sgl(srcp1,srcp2)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) 
+					Sgl_increment(result);
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) 
+					Sgl_increment(result);
+				break;
+			case ROUNDNEAREST:
+				Sgl_roundnearest_from_dint(srcp1,srcp2,result);
+		}
+		if (Is_inexacttrap_enabled()) {
+			*dstptr = result;
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Fixed-point to Double Floating-point 
+ */
+
+int
+dbl_to_dbl_fcnvxf(
+		    dbl_integer *srcptr,
+		    unsigned int *nullptr,
+		    dbl_floating_point *dstptr,
+		    unsigned int *status)
+{
+	register int srcp1, dst_exponent;
+	register unsigned int srcp2, resultp1 = 0, resultp2 = 0;
+
+	Dint_copyfromptr(srcptr,srcp1,srcp2);
+	/* 
+	 * set sign bit of result and get magnitude of source 
+	 */
+	if (srcp1 < 0) {
+		Dbl_setone_sign(resultp1);
+		Dint_negate(srcp1,srcp2);
+	}
+	else {
+		Dbl_setzero_sign(resultp1);
+        	/* Check for zero */
+        	if (srcp1 == 0 && srcp2 ==0) {
+                	Dbl_setzero(resultp1,resultp2);
+                	Dbl_copytoptr(resultp1,resultp2,dstptr);
+                	return(NOEXCEPTION);
+		}
+        }
+	/*
+	 * Generate exponent and normalized mantissa
+	 */
+	dst_exponent = 16;    /* initialize for normalization */
+	if (srcp1 == 0) {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp2,dst_exponent);
+		/*  left justify source, with msb at bit position 1  */
+		if (dst_exponent >= 0) {
+			srcp1 = srcp2 << dst_exponent;    
+			srcp2 = 0;
+		}
+		else {
+			srcp1 = srcp2 >> 1;
+			srcp2 <<= 31;
+		}
+		/*
+		 *  since msb set is in second word, need to 
+		 *  adjust bit position count
+		 */
+		dst_exponent += 32;
+	}
+	else {
+		/*
+		 * Check word for most significant bit set.  Returns
+		 * a value in dst_exponent indicating the bit position,
+		 * between -1 and 30.
+		 */
+		Find_ms_one_bit(srcp1,dst_exponent);
+		/*  left justify source, with msb at bit position 1  */
+		if (dst_exponent > 0) {
+			Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
+			 srcp1); 
+			srcp2 <<= dst_exponent;
+		}
+		/*
+		 * If dst_exponent = 0, we don't need to shift anything.
+		 * If dst_exponent = -1, src = - 2**63 so we won't need to 
+		 * shift srcp2.
+		 */
+		else srcp1 >>= -(dst_exponent);
+	}
+	Dbl_set_mantissap1(resultp1, srcp1 >> (DBL_EXP_LENGTH-1));
+	Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH-1,resultp2);
+	Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
+
+	/* check for inexact */
+	if (Dint_isinexact_to_dbl(srcp2)) {
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Dbl_iszero_sign(resultp1)) {
+					Dbl_increment(resultp1,resultp2);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Dbl_isone_sign(resultp1)) {
+					Dbl_increment(resultp1,resultp2);
+				}
+				break;
+			case ROUNDNEAREST:
+				Dbl_roundnearest_from_dint(srcp2,resultp1,
+				resultp2);
+		}
+		if (Is_inexacttrap_enabled()) {
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/float.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/float.h
new file mode 100644
index 0000000..7a51f97
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/float.h
@@ -0,0 +1,581 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ * 
+ *  File: 
+ *      @(#)	pa/spmath/float.h		$Revision: 1.1 $
+ * 
+ *  Purpose:
+ *      <<please update with a synopis of the functionality provided by this file>>
+ * 
+ *  BE header:  no
+ *
+ *  Shipped:  yes
+ *	/usr/conf/pa/spmath/float.h
+ *
+ * END_DESC  
+*/
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+#include "fpbits.h"
+#include "hppa.h"
+/*
+ * Want to pick up the FPU capability flags, not the PDC structures.
+ * 'LOCORE' isn't really true in this case, but we don't want the C structures
+ * so it suits our purposes
+ */
+#define LOCORE
+#include "fpu.h"
+
+/*
+ * Declare the basic structures for the 3 different
+ * floating-point precisions.
+ *        
+ * Single number  
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|       exp     |               mantissa                      |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define	Sall(object) (object)
+#define	Ssign(object) Bitfield_extract( 0,  1,object)
+#define	Ssignedsign(object) Bitfield_signed_extract( 0,  1,object)
+#define	Sexponent(object) Bitfield_extract( 1,  8,object)
+#define	Smantissa(object) Bitfield_mask( 9, 23,object)
+#define	Ssignaling(object) Bitfield_extract( 9,  1,object)
+#define	Ssignalingnan(object) Bitfield_extract( 1,  9,object)
+#define	Shigh2mantissa(object) Bitfield_extract( 9,  2,object)
+#define	Sexponentmantissa(object) Bitfield_mask( 1, 31,object)
+#define	Ssignexponent(object) Bitfield_extract( 0,  9,object)
+#define	Shidden(object) Bitfield_extract( 8,  1,object)
+#define	Shiddenoverflow(object) Bitfield_extract( 7,  1,object)
+#define	Shiddenhigh7mantissa(object) Bitfield_extract( 8,  8,object)
+#define	Shiddenhigh3mantissa(object) Bitfield_extract( 8,  4,object)
+#define	Slow(object) Bitfield_mask( 31,  1,object)
+#define	Slow4(object) Bitfield_mask( 28,  4,object)
+#define	Slow31(object) Bitfield_mask( 1, 31,object)
+#define	Shigh31(object) Bitfield_extract( 0, 31,object)
+#define	Ssignedhigh31(object) Bitfield_signed_extract( 0, 31,object)
+#define	Shigh4(object) Bitfield_extract( 0,  4,object)
+#define	Sbit24(object) Bitfield_extract( 24,  1,object)
+#define	Sbit28(object) Bitfield_extract( 28,  1,object)
+#define	Sbit29(object) Bitfield_extract( 29,  1,object)
+#define	Sbit30(object) Bitfield_extract( 30,  1,object)
+#define	Sbit31(object) Bitfield_mask( 31,  1,object)
+
+#define Deposit_ssign(object,value) Bitfield_deposit(value,0,1,object)
+#define Deposit_sexponent(object,value) Bitfield_deposit(value,1,8,object)
+#define Deposit_smantissa(object,value) Bitfield_deposit(value,9,23,object)
+#define Deposit_shigh2mantissa(object,value) Bitfield_deposit(value,9,2,object)
+#define Deposit_sexponentmantissa(object,value) \
+    Bitfield_deposit(value,1,31,object)
+#define Deposit_ssignexponent(object,value) Bitfield_deposit(value,0,9,object)
+#define Deposit_slow(object,value) Bitfield_deposit(value,31,1,object)
+#define Deposit_shigh4(object,value) Bitfield_deposit(value,0,4,object)
+
+#define	Is_ssign(object) Bitfield_mask( 0,  1,object)
+#define	Is_ssignaling(object) Bitfield_mask( 9,  1,object)
+#define	Is_shidden(object) Bitfield_mask( 8,  1,object)
+#define	Is_shiddenoverflow(object) Bitfield_mask( 7,  1,object)
+#define	Is_slow(object) Bitfield_mask( 31,  1,object)
+#define	Is_sbit24(object) Bitfield_mask( 24,  1,object)
+#define	Is_sbit28(object) Bitfield_mask( 28,  1,object)
+#define	Is_sbit29(object) Bitfield_mask( 29,  1,object)
+#define	Is_sbit30(object) Bitfield_mask( 30,  1,object)
+#define	Is_sbit31(object) Bitfield_mask( 31,  1,object)
+
+/* 
+ * Double number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|       exponent      |          mantissa part 1              |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    mantissa part 2                            |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Dallp1(object) (object)
+#define Dsign(object) Bitfield_extract( 0,  1,object)
+#define Dsignedsign(object) Bitfield_signed_extract( 0,  1,object)
+#define Dexponent(object) Bitfield_extract( 1,  11,object)
+#define Dmantissap1(object) Bitfield_mask( 12, 20,object)
+#define Dsignaling(object) Bitfield_extract( 12,  1,object)
+#define Dsignalingnan(object) Bitfield_extract( 1,  12,object)
+#define Dhigh2mantissa(object) Bitfield_extract( 12,  2,object)
+#define Dexponentmantissap1(object) Bitfield_mask( 1, 31,object)
+#define Dsignexponent(object) Bitfield_extract( 0, 12,object)
+#define Dhidden(object) Bitfield_extract( 11,  1,object)
+#define Dhiddenoverflow(object) Bitfield_extract( 10,  1,object)
+#define Dhiddenhigh7mantissa(object) Bitfield_extract( 11,  8,object)
+#define Dhiddenhigh3mantissa(object) Bitfield_extract( 11,  4,object)
+#define Dlowp1(object) Bitfield_mask( 31,  1,object)
+#define Dlow31p1(object) Bitfield_mask( 1, 31,object)
+#define Dhighp1(object) Bitfield_extract( 0,  1,object)
+#define Dhigh4p1(object) Bitfield_extract( 0,  4,object)
+#define Dhigh31p1(object) Bitfield_extract( 0, 31,object)
+#define Dsignedhigh31p1(object) Bitfield_signed_extract( 0, 31,object)
+#define Dbit3p1(object) Bitfield_extract( 3,  1,object)
+
+#define Deposit_dsign(object,value) Bitfield_deposit(value,0,1,object)
+#define Deposit_dexponent(object,value) Bitfield_deposit(value,1,11,object)
+#define Deposit_dmantissap1(object,value) Bitfield_deposit(value,12,20,object)
+#define Deposit_dhigh2mantissa(object,value) Bitfield_deposit(value,12,2,object)
+#define Deposit_dexponentmantissap1(object,value) \
+    Bitfield_deposit(value,1,31,object)
+#define Deposit_dsignexponent(object,value) Bitfield_deposit(value,0,12,object)
+#define Deposit_dlowp1(object,value) Bitfield_deposit(value,31,1,object)
+#define Deposit_dhigh4p1(object,value) Bitfield_deposit(value,0,4,object)
+
+#define Is_dsign(object) Bitfield_mask( 0,  1,object)
+#define Is_dsignaling(object) Bitfield_mask( 12,  1,object)
+#define Is_dhidden(object) Bitfield_mask( 11,  1,object)
+#define Is_dhiddenoverflow(object) Bitfield_mask( 10,  1,object)
+#define Is_dlowp1(object) Bitfield_mask( 31,  1,object)
+#define Is_dhighp1(object) Bitfield_mask( 0,  1,object)
+#define Is_dbit3p1(object) Bitfield_mask( 3,  1,object)
+
+#define Dallp2(object) (object)
+#define Dmantissap2(object) (object)
+#define Dlowp2(object) Bitfield_mask( 31,  1,object)
+#define Dlow4p2(object) Bitfield_mask( 28,  4,object)
+#define Dlow31p2(object) Bitfield_mask( 1, 31,object)
+#define Dhighp2(object) Bitfield_extract( 0,  1,object)
+#define Dhigh31p2(object) Bitfield_extract( 0, 31,object)
+#define Dbit2p2(object) Bitfield_extract( 2,  1,object)
+#define Dbit3p2(object) Bitfield_extract( 3,  1,object)
+#define Dbit21p2(object) Bitfield_extract( 21,  1,object)
+#define Dbit28p2(object) Bitfield_extract( 28,  1,object)
+#define Dbit29p2(object) Bitfield_extract( 29,  1,object)
+#define Dbit30p2(object) Bitfield_extract( 30,  1,object)
+#define Dbit31p2(object) Bitfield_mask( 31,  1,object)
+
+#define Deposit_dlowp2(object,value) Bitfield_deposit(value,31,1,object)
+
+#define Is_dlowp2(object) Bitfield_mask( 31,  1,object)
+#define Is_dhighp2(object) Bitfield_mask( 0,  1,object)
+#define Is_dbit2p2(object) Bitfield_mask( 2,  1,object)
+#define Is_dbit3p2(object) Bitfield_mask( 3,  1,object)
+#define Is_dbit21p2(object) Bitfield_mask( 21,  1,object)
+#define Is_dbit28p2(object) Bitfield_mask( 28,  1,object)
+#define Is_dbit29p2(object) Bitfield_mask( 29,  1,object)
+#define Is_dbit30p2(object) Bitfield_mask( 30,  1,object)
+#define Is_dbit31p2(object) Bitfield_mask( 31,  1,object)
+
+/* 
+ * Quad number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|          exponent           |      mantissa part 1          |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    mantissa part 2                            |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    mantissa part 3                            |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    mantissa part 4                            |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+typedef struct
+    {
+    union
+	{
+	struct { unsigned qallp1; } u_qallp1;
+/* Not needed for now...
+	Bitfield_extract( 0,  1,u_qsign,qsign)
+	Bitfield_signed_extract( 0,  1,u_qsignedsign,qsignedsign)
+	Bitfield_extract( 1, 15,u_qexponent,qexponent)
+	Bitfield_extract(16, 16,u_qmantissap1,qmantissap1)
+	Bitfield_extract(16,  1,u_qsignaling,qsignaling)
+	Bitfield_extract(1,  16,u_qsignalingnan,qsignalingnan)
+	Bitfield_extract(16,  2,u_qhigh2mantissa,qhigh2mantissa)
+	Bitfield_extract( 1, 31,u_qexponentmantissap1,qexponentmantissap1)
+	Bitfield_extract( 0, 16,u_qsignexponent,qsignexponent)
+	Bitfield_extract(15,  1,u_qhidden,qhidden)
+	Bitfield_extract(14,  1,u_qhiddenoverflow,qhiddenoverflow)
+	Bitfield_extract(15,  8,u_qhiddenhigh7mantissa,qhiddenhigh7mantissa)
+	Bitfield_extract(15,  4,u_qhiddenhigh3mantissa,qhiddenhigh3mantissa)
+	Bitfield_extract(31,  1,u_qlowp1,qlowp1)
+	Bitfield_extract( 1, 31,u_qlow31p1,qlow31p1)
+	Bitfield_extract( 0,  1,u_qhighp1,qhighp1)
+	Bitfield_extract( 0,  4,u_qhigh4p1,qhigh4p1)
+	Bitfield_extract( 0, 31,u_qhigh31p1,qhigh31p1)
+  */
+	} quad_u1;
+    union
+	{
+	struct { unsigned qallp2; } u_qallp2;
+  /* Not needed for now...
+	Bitfield_extract(31,  1,u_qlowp2,qlowp2)
+	Bitfield_extract( 1, 31,u_qlow31p2,qlow31p2)
+	Bitfield_extract( 0,  1,u_qhighp2,qhighp2)
+	Bitfield_extract( 0, 31,u_qhigh31p2,qhigh31p2)
+   */
+	} quad_u2;
+    union
+	{
+	struct { unsigned qallp3; } u_qallp3;
+  /* Not needed for now...
+	Bitfield_extract(31,  1,u_qlowp3,qlowp3)
+	Bitfield_extract( 1, 31,u_qlow31p3,qlow31p3)
+	Bitfield_extract( 0,  1,u_qhighp3,qhighp3)
+	Bitfield_extract( 0, 31,u_qhigh31p3,qhigh31p3)
+   */ 
+	} quad_u3;
+    union
+	{
+	struct { unsigned qallp4; } u_qallp4;
+    /* Not need for now...
+	Bitfield_extract(31,  1,u_qlowp4,qlowp4)
+	Bitfield_extract( 1, 31,u_qlow31p4,qlow31p4)
+	Bitfield_extract( 0,  1,u_qhighp4,qhighp4)
+	Bitfield_extract( 0, 31,u_qhigh31p4,qhigh31p4)
+     */
+	} quad_u4;
+    } quad_floating_point;
+
+/* Extension - An additional structure to hold the guard, round and
+ *             sticky bits during computations.
+ */
+#define Extall(object) (object)
+#define Extsign(object) Bitfield_extract( 0,  1,object)
+#define Exthigh31(object) Bitfield_extract( 0, 31,object)
+#define Extlow31(object) Bitfield_extract( 1, 31,object)
+#define Extlow(object) Bitfield_extract( 31,  1,object)
+
+/*
+ * Single extended - The upper word is just like single precision,
+ *                 but one additional word of mantissa is needed.
+ */
+#define Sextallp1(object) (object)
+#define Sextallp2(object) (object)
+#define Sextlowp1(object) Bitfield_extract( 31,  1,object)
+#define Sexthighp2(object) Bitfield_extract( 0,  1,object)
+#define Sextlow31p2(object) Bitfield_extract( 1, 31,object)
+#define Sexthiddenoverflow(object) Bitfield_extract( 4,  1,object)
+#define Is_sexthiddenoverflow(object) Bitfield_mask( 4,  1,object)
+
+/*
+ * Double extended - The upper two words are just like double precision,
+ *		     but two additional words of mantissa are needed.
+ */
+#define Dextallp1(object) (object)
+#define Dextallp2(object) (object)
+#define Dextallp3(object) (object)
+#define Dextallp4(object) (object)
+#define Dextlowp2(object) Bitfield_extract( 31,  1,object)
+#define Dexthighp3(object) Bitfield_extract( 0,  1,object)
+#define Dextlow31p3(object) Bitfield_extract( 1, 31,object)
+#define Dexthiddenoverflow(object) Bitfield_extract( 10,  1,object)
+#define Is_dexthiddenoverflow(object) Bitfield_mask( 10,  1,object)
+#define Deposit_dextlowp4(object,value) Bitfield_deposit(value,31,1,object)
+
+/*
+ * Declare the basic structures for the 3 different
+ * fixed-point precisions.
+ *        
+ * Single number  
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|                    integer                                  |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+typedef int sgl_integer;
+
+/* 
+ * Double number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|                     high integer                            |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                       low integer                             |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+struct dint {
+        int  wd0;
+        unsigned int wd1;
+};
+
+struct dblwd {
+        unsigned int wd0;
+        unsigned int wd1;
+};
+
+/* 
+ * Quad number.
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |s|                  integer part1                              |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    integer part 2                             |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    integer part 3                             |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                    integer part 4                             |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+
+struct quadwd {
+        int  wd0;
+        unsigned int wd1;
+        unsigned int wd2;
+        unsigned int wd3;
+};
+
+typedef struct quadwd quad_integer;
+
+
+/* useful typedefs */
+typedef unsigned int sgl_floating_point;
+typedef struct dblwd dbl_floating_point;
+typedef struct dint dbl_integer;
+typedef struct dblwd dbl_unsigned;
+
+/* 
+ * Define the different precisions' parameters.
+ */
+#define SGL_BITLENGTH 32
+#define SGL_EMAX 127
+#define SGL_EMIN (-126)
+#define SGL_BIAS 127
+#define SGL_WRAP 192
+#define SGL_INFINITY_EXPONENT (SGL_EMAX+SGL_BIAS+1)
+#define SGL_THRESHOLD 32
+#define SGL_EXP_LENGTH 8
+#define SGL_P 24
+
+#define DBL_BITLENGTH 64
+#define DBL_EMAX 1023
+#define DBL_EMIN (-1022)
+#define DBL_BIAS 1023
+#define DBL_WRAP 1536
+#define DBL_INFINITY_EXPONENT (DBL_EMAX+DBL_BIAS+1)
+#define DBL_THRESHOLD 64
+#define DBL_EXP_LENGTH 11
+#define DBL_P 53
+
+#define QUAD_BITLENGTH 128
+#define QUAD_EMAX 16383
+#define QUAD_EMIN (-16382)
+#define QUAD_BIAS 16383
+#define QUAD_WRAP 24576
+#define QUAD_INFINITY_EXPONENT (QUAD_EMAX+QUAD_BIAS+1)
+#define QUAD_P 113
+
+/* Boolean Values etc. */
+#define FALSE 0
+#define TRUE (!FALSE)
+#define NOT !
+#define XOR ^
+
+/* other constants */
+#undef NULL
+#define NULL 0
+#define NIL 0
+#define SGL 0
+#define DBL 1
+#define BADFMT 2
+#define QUAD 3
+
+
+/* Types */
+typedef int boolean;
+typedef int FORMAT;
+typedef int VOID;
+
+
+/* Declare status register equivalent to FPUs architecture.
+ *
+ *  0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |V|Z|O|U|I|C|  rsv  |  model    | version |RM |rsv|T|r|V|Z|O|U|I|
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Cbit(object) Bitfield_extract( 5, 1,object)
+#define Tbit(object) Bitfield_extract( 25, 1,object)
+#define Roundingmode(object) Bitfield_extract( 21, 2,object)
+#define Invalidtrap(object) Bitfield_extract( 27, 1,object)
+#define Divisionbyzerotrap(object) Bitfield_extract( 28, 1,object)
+#define Overflowtrap(object) Bitfield_extract( 29, 1,object)
+#define Underflowtrap(object) Bitfield_extract( 30, 1,object)
+#define Inexacttrap(object) Bitfield_extract( 31, 1,object)
+#define Invalidflag(object) Bitfield_extract( 0, 1,object)
+#define Divisionbyzeroflag(object) Bitfield_extract( 1, 1,object)
+#define Overflowflag(object) Bitfield_extract( 2, 1,object)
+#define Underflowflag(object) Bitfield_extract( 3, 1,object)
+#define Inexactflag(object) Bitfield_extract( 4, 1,object)
+#define Allflags(object) Bitfield_extract( 0, 5,object)
+
+/* Definitions relevant to the status register */
+
+/* Rounding Modes */
+#define ROUNDNEAREST 0
+#define ROUNDZERO    1
+#define ROUNDPLUS    2
+#define ROUNDMINUS   3
+
+/* Exceptions */
+#define NOEXCEPTION		0x0
+#define INVALIDEXCEPTION	0x20
+#define DIVISIONBYZEROEXCEPTION	0x10
+#define OVERFLOWEXCEPTION	0x08
+#define UNDERFLOWEXCEPTION	0x04
+#define INEXACTEXCEPTION	0x02
+#define UNIMPLEMENTEDEXCEPTION	0x01
+
+/* New exceptions for the 2E Opcode */
+#define OPC_2E_INVALIDEXCEPTION     0x30
+#define OPC_2E_OVERFLOWEXCEPTION    0x18
+#define OPC_2E_UNDERFLOWEXCEPTION   0x0c
+#define OPC_2E_INEXACTEXCEPTION     0x12
+
+/* Declare exception registers equivalent to FPUs architecture 
+ *
+ *  0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |excepttype |  r1     | r2/ext  |  operation  |parm |n| t/cond  |
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Allexception(object) (object)
+#define Exceptiontype(object) Bitfield_extract( 0, 6,object)
+#define Instructionfield(object) Bitfield_mask( 6,26,object)
+#define Parmfield(object) Bitfield_extract( 23, 3,object)
+#define Rabit(object) Bitfield_extract( 24, 1,object)
+#define Ibit(object) Bitfield_extract( 25, 1,object)
+
+#define Set_exceptiontype(object,value) Bitfield_deposit(value, 0, 6,object)
+#define Set_parmfield(object,value) Bitfield_deposit(value, 23, 3,object)
+#define Set_exceptiontype_and_instr_field(exception,instruction,object) \
+    object = exception << 26 | instruction
+
+/* Declare the condition field
+ *
+ *  0 1 2 3 4 5 6 7 8 910 1 2 3 4 5 6 7 8 920 1 2 3 4 5 6 7 8 930 1
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ * |                                                     |G|L|E|U|X|
+ * +-------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define Greaterthanbit(object) Bitfield_extract( 27, 1,object)
+#define Lessthanbit(object) Bitfield_extract( 28, 1,object)
+#define Equalbit(object) Bitfield_extract( 29, 1,object)
+#define Unorderedbit(object) Bitfield_extract( 30, 1,object)
+#define Exceptionbit(object) Bitfield_extract( 31, 1,object)
+
+/* An alias name for the status register */
+#define Fpustatus_register (*status)
+
+/**************************************************
+ * Status register referencing and manipulation.  *
+ **************************************************/
+
+/* Rounding mode */
+#define Rounding_mode()  Roundingmode(Fpustatus_register)
+#define Is_rounding_mode(rmode) \
+    (Roundingmode(Fpustatus_register) == rmode)
+#define Set_rounding_mode(value) \
+    Bitfield_deposit(value,21,2,Fpustatus_register)
+
+/* Boolean testing of the trap enable bits */
+#define Is_invalidtrap_enabled() Invalidtrap(Fpustatus_register)
+#define Is_divisionbyzerotrap_enabled() Divisionbyzerotrap(Fpustatus_register)
+#define Is_overflowtrap_enabled() Overflowtrap(Fpustatus_register)
+#define Is_underflowtrap_enabled() Underflowtrap(Fpustatus_register)
+#define Is_inexacttrap_enabled() Inexacttrap(Fpustatus_register)
+
+/* Set the indicated flags in the status register */
+#define Set_invalidflag() Bitfield_deposit(1,0,1,Fpustatus_register)
+#define Set_divisionbyzeroflag() Bitfield_deposit(1,1,1,Fpustatus_register)
+#define Set_overflowflag() Bitfield_deposit(1,2,1,Fpustatus_register)
+#define Set_underflowflag() Bitfield_deposit(1,3,1,Fpustatus_register)
+#define Set_inexactflag() Bitfield_deposit(1,4,1,Fpustatus_register)
+
+#define Clear_all_flags() Bitfield_deposit(0,0,5,Fpustatus_register)
+
+/* Manipulate the trap and condition code bits (tbit and cbit) */
+#define Set_tbit() Bitfield_deposit(1,25,1,Fpustatus_register)
+#define Clear_tbit() Bitfield_deposit(0,25,1,Fpustatus_register)
+#define Is_tbit_set() Tbit(Fpustatus_register)
+#define Is_cbit_set() Cbit(Fpustatus_register)
+
+#define Set_status_cbit(value)  \
+        Bitfield_deposit(value,5,1,Fpustatus_register)
+
+/*******************************
+ * Condition field referencing *
+ *******************************/
+#define Unordered(cond) Unorderedbit(cond)
+#define Equal(cond) Equalbit(cond)
+#define Lessthan(cond) Lessthanbit(cond)
+#define Greaterthan(cond) Greaterthanbit(cond)
+#define Exception(cond) Exceptionbit(cond)
+
+
+/* Defines for the extension */
+#define Ext_isone_sign(extent) (Extsign(extent))
+#define Ext_isnotzero(extent) \
+    (Extall(extent))
+#define Ext_isnotzero_lower(extent) \
+    (Extlow31(extent))
+#define Ext_leftshiftby1(extent) \
+    Extall(extent) <<= 1
+#define Ext_negate(extent) \
+    (int )Extall(extent) = 0 - (int )Extall(extent)
+#define Ext_setone_low(extent) Bitfield_deposit(1,31,1,extent)
+#define Ext_setzero(extent) Extall(extent) = 0
+
+typedef int operation;
+
+/* error messages */
+
+#define		NONE		0
+#define		UNDEFFPINST	1
+
+/* Function definitions: opcode, opclass */
+#define FTEST	(1<<2) | 0
+#define FCPY	(2<<2) | 0
+#define FABS	(3<<2) | 0
+#define FSQRT   (4<<2) | 0
+#define FRND    (5<<2) | 0
+
+#define FCNVFF	(0<<2) | 1
+#define FCNVXF	(1<<2) | 1
+#define FCNVFX	(2<<2) | 1
+#define FCNVFXT	(3<<2) | 1
+
+#define FCMP    (0<<2) | 2
+
+#define FADD	(0<<2) | 3
+#define FSUB	(1<<2) | 3
+#define FMPY	(2<<2) | 3
+#define FDIV	(3<<2) | 3
+#define FREM	(4<<2) | 3
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fmpyfadd.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fmpyfadd.c
new file mode 100644
index 0000000..b067c45
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fmpyfadd.c
@@ -0,0 +1,2655 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/fmpyfadd.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Double Floating-point Multiply Fused Add
+ *	Double Floating-point Multiply Negate Fused Add
+ *	Single Floating-point Multiply Fused Add
+ *	Single Floating-point Multiply Negate Fused Add
+ *
+ *  External Interfaces:
+ *	dbl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ *	dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ *	sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ *	sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+
+
+/*
+ *  Double Floating-point Multiply Fused Add
+ */
+
+int
+dbl_fmpyfadd(
+	    dbl_floating_point *src1ptr,
+	    dbl_floating_point *src2ptr,
+	    dbl_floating_point *src3ptr,
+	    unsigned int *status,
+	    dbl_floating_point *dstptr)
+{
+	unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
+	register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
+	unsigned int rightp1, rightp2, rightp3, rightp4;
+	unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
+	register int mpy_exponent, add_exponent, count;
+	boolean inexact = FALSE, is_tiny = FALSE;
+
+	unsigned int signlessleft1, signlessright1, save;
+	register int result_exponent, diff_exponent;
+	int sign_save, jumpsize;
+	
+	Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
+	Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
+	Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
+
+	/* 
+	 * set sign bit of result of multiply
+	 */
+	if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) 
+		Dbl_setnegativezerop1(resultp1); 
+	else Dbl_setzerop1(resultp1);
+
+	/*
+	 * Generate multiply exponent 
+	 */
+	mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
+
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd1p1)) {
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
+			    Dbl_isnotnan(opnd3p1,opnd3p2)) {
+				if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+				    (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+			 	 * return infinity
+			 	 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+		 	 * is NaN; signaling or quiet?
+		 	 */
+			if (Dbl_isone_signaling(opnd1p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled()) 
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd1p1);
+			}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd2p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd2p1);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd3p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd3p1);
+				Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/*
+		 	 * return quiet NaN
+		 	 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd2p1)) {
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
+				if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+					/* 
+					 * invalid since multiply operands are
+					 * zero & infinity
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(opnd2p1,opnd2p2);
+					Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+				    (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+				       		return(OPC_2E_INVALIDEXCEPTION);
+				       	Set_invalidflag();
+				       	Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * return infinity
+				 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Dbl_isone_signaling(opnd2p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd2p1);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd3p1)) {
+			       	/* trap if INVALIDTRAP enabled */
+			       	if (Is_invalidtrap_enabled())
+				   		return(OPC_2E_INVALIDEXCEPTION);
+			       	/* make NaN quiet */
+			       	Set_invalidflag();
+			       	Dbl_set_quiet(opnd3p1);
+				Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+		       		return(NOEXCEPTION);
+			}
+			/*
+			 * return quiet NaN
+			 */
+			Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check third operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd3p1)) {
+		if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+			/* return infinity */
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		} else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Dbl_isone_signaling(opnd3p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd3p1);
+			}
+			/*
+			 * return quiet NaN
+ 			 */
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+    	}
+
+	/*
+	 * Generate multiply mantissa
+	 */
+	if (Dbl_isnotzero_exponent(opnd1p1)) {
+		/* set hidden bit */
+		Dbl_clear_signexponent_set_hidden(opnd1p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Dbl_or_signs(opnd3p1,resultp1);
+				} else {
+					Dbl_and_signs(opnd3p1,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Dbl_iszero_exponent(opnd3p1) &&
+			         Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Dbl_signextendedsign(opnd3p1);
+				result_exponent = 0;
+                    		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+                    		Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+                    		Dbl_set_sign(opnd3p1,/*using*/sign_save);
+                    		Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+							unfl);
+                    		Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized, adjust exponent */
+		Dbl_clear_signexponent(opnd1p1);
+		Dbl_leftshiftby1(opnd1p1,opnd1p2);
+		Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Dbl_isnotzero_exponent(opnd2p1)) {
+		Dbl_clear_signexponent_set_hidden(opnd2p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Dbl_or_signs(opnd3p1,resultp1);
+				} else {
+					Dbl_and_signs(opnd3p1,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Dbl_iszero_exponent(opnd3p1) &&
+			    Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Dbl_signextendedsign(opnd3p1);
+				result_exponent = 0;
+                    		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+                    		Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+                    		Dbl_set_sign(opnd3p1,/*using*/sign_save);
+                    		Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+							unfl);
+                    		Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+                    		/* inexact = FALSE */
+				return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized; want to normalize */
+		Dbl_clear_signexponent(opnd2p1);
+		Dbl_leftshiftby1(opnd2p1,opnd2p2);
+		Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
+	}
+
+	/* Multiply the first two source mantissas together */
+
+	/* 
+	 * The intermediate result will be kept in tmpres,
+	 * which needs enough room for 106 bits of mantissa,
+	 * so lets call it a Double extended.
+	 */
+	Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+
+	/* 
+	 * Four bits at a time are inspected in each loop, and a 
+	 * simple shift and add multiply algorithm is used. 
+	 */ 
+	for (count = DBL_P-1; count >= 0; count -= 4) {
+		Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+		if (Dbit28p2(opnd1p2)) {
+	 		/* Fourword_add should be an ADD followed by 3 ADDC's */
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4, 
+			 opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
+		}
+		if (Dbit29p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
+		}
+		if (Dbit30p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
+		}
+		if (Dbit31p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1, opnd2p2, 0, 0);
+		}
+		Dbl_rightshiftby4(opnd1p1,opnd1p2);
+	}
+	if (Is_dexthiddenoverflow(tmpresp1)) {
+		/* result mantissa >= 2 (mantissa overflow) */
+		mpy_exponent++;
+		Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+	}
+
+	/*
+	 * Restore the sign of the mpy result which was saved in resultp1.
+	 * The exponent will continue to be kept in mpy_exponent.
+	 */
+	Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
+
+	/* 
+	 * No rounding is required, since the result of the multiply
+	 * is exact in the extended format.
+	 */
+
+	/*
+	 * Now we are ready to perform the add portion of the operation.
+	 *
+	 * The exponents need to be kept as integers for now, since the
+	 * multiply result might not fit into the exponent field.  We
+	 * can't overflow or underflow because of this yet, since the
+	 * add could bring the final result back into range.
+	 */
+	add_exponent = Dbl_exponent(opnd3p1);
+
+	/*
+	 * Check for denormalized or zero add operand.
+	 */
+	if (add_exponent == 0) {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+			/* right is zero */
+			/* Left can't be zero and must be result.
+			 *
+			 * The final result is now in tmpres and mpy_exponent,
+			 * and needs to be rounded and squeezed back into
+			 * double precision format from double extended.
+			 */
+			result_exponent = mpy_exponent;
+			Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+				resultp1,resultp2,resultp3,resultp4);
+			sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
+			goto round;
+		}
+
+		/* 
+		 * Neither are zeroes.  
+		 * Adjust exponent and normalize add operand.
+		 */
+		sign_save = Dbl_signextendedsign(opnd3p1);	/* save sign */
+		Dbl_clear_signexponent(opnd3p1);
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+		Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
+		Dbl_set_sign(opnd3p1,sign_save);	/* restore sign */
+	} else {
+		Dbl_clear_exponent_set_hidden(opnd3p1);
+	}
+	/*
+	 * Copy opnd3 to the double extended variable called right.
+	 */
+	Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
+
+	/*
+	 * A zero "save" helps discover equal operands (for later),
+	 * and is used in swapping operands (if needed).
+	 */
+	Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+	/*
+	 * Compare magnitude of operands.
+	 */
+	Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
+	Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
+	if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+	    Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
+		/*
+		 * Set the left operand to the larger one by XOR swap.
+		 * First finish the first word "save".
+		 */
+		Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
+		Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+		Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
+			rightp2,rightp3,rightp4);
+		/* also setup exponents used in rest of routine */
+		diff_exponent = add_exponent - mpy_exponent;
+		result_exponent = add_exponent;
+	} else {
+		/* also setup exponents used in rest of routine */
+		diff_exponent = mpy_exponent - add_exponent;
+		result_exponent = mpy_exponent;
+	}
+	/* Invariant: left is not smaller than right. */
+
+	/*
+	 * Special case alignment of operands that would force alignment
+	 * beyond the extent of the extension.  A further optimization
+	 * could special case this but only reduces the path length for
+	 * this infrequent case.
+	 */
+	if (diff_exponent > DBLEXT_THRESHOLD) {
+		diff_exponent = DBLEXT_THRESHOLD;
+	}
+
+	/* Align right operand by shifting it to the right */
+	Dblext_clear_sign(rightp1);
+	Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
+		/*shifted by*/diff_exponent);
+	
+	/* Treat sum and difference of the operands separately. */
+	if ((int)save < 0) {
+		/*
+		 * Difference of the two operands.  Overflow can occur if the
+		 * multiply overflowed.  A borrow can occur out of the hidden
+		 * bit and force a post normalization phase.
+		 */
+		Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+			rightp1,rightp2,rightp3,rightp4,
+			resultp1,resultp2,resultp3,resultp4);
+		sign_save = Dbl_signextendedsign(resultp1);
+		if (Dbl_iszero_hidden(resultp1)) {
+			/* Handle normalization */
+		/* A straightforward algorithm would now shift the
+		 * result and extension left until the hidden bit
+		 * becomes one.  Not all of the extension bits need
+		 * participate in the shift.  Only the two most 
+		 * significant bits (round and guard) are needed.
+		 * If only a single shift is needed then the guard
+		 * bit becomes a significant low order bit and the
+		 * extension must participate in the rounding.
+		 * If more than a single shift is needed, then all
+		 * bits to the right of the guard bit are zeros, 
+		 * and the guard bit may or may not be zero. */
+			Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+				resultp4);
+
+			/* Need to check for a zero result.  The sign and
+			 * exponent fields have already been zeroed.  The more
+			 * efficient test of the full object can be used.
+			 */
+			 if(Dblext_iszero(resultp1,resultp2,resultp3,resultp4)){
+				/* Must have been "x-x" or "x+(-x)". */
+				if (Is_rounding_mode(ROUNDMINUS))
+					Dbl_setone_sign(resultp1);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+			result_exponent--;
+
+			/* Look to see if normalization is finished. */
+			if (Dbl_isone_hidden(resultp1)) {
+				/* No further normalization is needed */
+				goto round;
+			}
+
+			/* Discover first one bit to determine shift amount.
+			 * Use a modified binary search.  We have already
+			 * shifted the result one position right and still
+			 * not found a one so the remainder of the extension
+			 * must be zero and simplifies rounding. */
+			/* Scan bytes */
+			while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
+				Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
+				result_exponent -= 8;
+			}
+			/* Now narrow it down to the nibble */
+			if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
+				/* The lower nibble contains the
+				 * normalizing one */
+				Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
+				result_exponent -= 4;
+			}
+			/* Select case where first bit is set (already
+			 * normalized) otherwise select the proper shift. */
+			jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
+			if (jumpsize <= 7) switch(jumpsize) {
+			case 1:
+				Dblext_leftshiftby3(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 3;
+				break;
+			case 2:
+			case 3:
+				Dblext_leftshiftby2(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 2;
+				break;
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 1;
+				break;
+			}
+		} /* end if (hidden...)... */
+	/* Fall through and round */
+	} /* end if (save < 0)... */
+	else {
+		/* Add magnitudes */
+		Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+			rightp1,rightp2,rightp3,rightp4,
+			/*to*/resultp1,resultp2,resultp3,resultp4);
+		sign_save = Dbl_signextendedsign(resultp1);
+		if (Dbl_isone_hiddenoverflow(resultp1)) {
+	    		/* Prenormalization required. */
+	    		Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
+				resultp4);
+	    		result_exponent++;
+		} /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+
+	/* Round the result.  If the extension and lower two words are
+	 * all zeros, then the result is exact.  Otherwise round in the
+	 * correct direction.  Underflow is possible. If a postnormalization
+	 * is necessary, then the mantissa is all zeros so no shift is needed.
+	 */
+  round:
+	if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+		Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
+			result_exponent,is_tiny);
+	}
+	Dbl_set_sign(resultp1,/*using*/sign_save);
+	if (Dblext_isnotzero_mantissap3(resultp3) || 
+	    Dblext_isnotzero_mantissap4(resultp4)) {
+		inexact = TRUE;
+		switch(Rounding_mode()) {
+		case ROUNDNEAREST: /* The default. */
+			if (Dblext_isone_highp3(resultp3)) {
+				/* at least 1/2 ulp */
+				if (Dblext_isnotzero_low31p3(resultp3) ||
+				    Dblext_isnotzero_mantissap4(resultp4) ||
+				    Dblext_isone_lowp2(resultp2)) {
+					/* either exactly half way and odd or
+					 * more than 1/2ulp */
+					Dbl_increment(resultp1,resultp2);
+				}
+			}
+	    		break;
+
+		case ROUNDPLUS:
+	    		if (Dbl_iszero_sign(resultp1)) {
+				/* Round up positive results */
+				Dbl_increment(resultp1,resultp2);
+			}
+			break;
+	    
+		case ROUNDMINUS:
+	    		if (Dbl_isone_sign(resultp1)) {
+				/* Round down negative results */
+				Dbl_increment(resultp1,resultp2);
+			}
+	    
+		case ROUNDZERO:;
+			/* truncate is simple */
+		} /* end switch... */
+		if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+	if (result_exponent >= DBL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+                        Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_OVERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return (OPC_2E_OVERFLOWEXCEPTION);
+                }
+                inexact = TRUE;
+                Set_overflowflag();
+                /* set result to infinity or largest number */
+                Dbl_setoverflow(resultp1,resultp2);
+
+	} else if (result_exponent <= 0) {	/* underflow case */
+		if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                	Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_UNDERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+	    		return(OPC_2E_UNDERFLOWEXCEPTION);
+		}
+		else if (inexact && is_tiny) Set_underflowflag();
+	}
+	else Dbl_set_exponent(resultp1,result_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) 
+		if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+		else Set_inexactflag();
+    	return(NOEXCEPTION);
+}
+
+/*
+ *  Double Floating-point Multiply Negate Fused Add
+ */
+
+dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+dbl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+	unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
+	register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
+	unsigned int rightp1, rightp2, rightp3, rightp4;
+	unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
+	register int mpy_exponent, add_exponent, count;
+	boolean inexact = FALSE, is_tiny = FALSE;
+
+	unsigned int signlessleft1, signlessright1, save;
+	register int result_exponent, diff_exponent;
+	int sign_save, jumpsize;
+	
+	Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
+	Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
+	Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
+
+	/* 
+	 * set sign bit of result of multiply
+	 */
+	if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1)) 
+		Dbl_setzerop1(resultp1);
+	else
+		Dbl_setnegativezerop1(resultp1); 
+
+	/*
+	 * Generate multiply exponent 
+	 */
+	mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
+
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd1p1)) {
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
+			    Dbl_isnotnan(opnd3p1,opnd3p2)) {
+				if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+				    (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+			 	 * return infinity
+			 	 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+		 	 * is NaN; signaling or quiet?
+		 	 */
+			if (Dbl_isone_signaling(opnd1p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled()) 
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd1p1);
+			}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd2p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd2p1);
+				Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd3p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd3p1);
+				Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/*
+		 	 * return quiet NaN
+		 	 */
+			Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd2p1)) {
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
+				if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
+					/* 
+					 * invalid since multiply operands are
+					 * zero & infinity
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Dbl_makequietnan(opnd2p1,opnd2p2);
+					Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
+				    (Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+				       		return(OPC_2E_INVALIDEXCEPTION);
+				       	Set_invalidflag();
+				       	Dbl_makequietnan(resultp1,resultp2);
+					Dbl_copytoptr(resultp1,resultp2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * return infinity
+				 */
+				Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Dbl_isone_signaling(opnd2p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd2p1);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Dbl_is_signalingnan(opnd3p1)) {
+			       	/* trap if INVALIDTRAP enabled */
+			       	if (Is_invalidtrap_enabled())
+				   		return(OPC_2E_INVALIDEXCEPTION);
+			       	/* make NaN quiet */
+			       	Set_invalidflag();
+			       	Dbl_set_quiet(opnd3p1);
+				Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+		       		return(NOEXCEPTION);
+			}
+			/*
+			 * return quiet NaN
+			 */
+			Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check third operand for NaN's or infinity
+	 */
+	if (Dbl_isinfinity_exponent(opnd3p1)) {
+		if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+			/* return infinity */
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		} else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Dbl_isone_signaling(opnd3p1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Dbl_set_quiet(opnd3p1);
+			}
+			/*
+			 * return quiet NaN
+ 			 */
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+    	}
+
+	/*
+	 * Generate multiply mantissa
+	 */
+	if (Dbl_isnotzero_exponent(opnd1p1)) {
+		/* set hidden bit */
+		Dbl_clear_signexponent_set_hidden(opnd1p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Dbl_or_signs(opnd3p1,resultp1);
+				} else {
+					Dbl_and_signs(opnd3p1,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Dbl_iszero_exponent(opnd3p1) &&
+			         Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Dbl_signextendedsign(opnd3p1);
+				result_exponent = 0;
+                    		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+                    		Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+                    		Dbl_set_sign(opnd3p1,/*using*/sign_save);
+                    		Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+							unfl);
+                    		Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized, adjust exponent */
+		Dbl_clear_signexponent(opnd1p1);
+		Dbl_leftshiftby1(opnd1p1,opnd1p2);
+		Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Dbl_isnotzero_exponent(opnd2p1)) {
+		Dbl_clear_signexponent_set_hidden(opnd2p1);
+	}
+	else {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Dbl_or_signs(opnd3p1,resultp1);
+				} else {
+					Dbl_and_signs(opnd3p1,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Dbl_iszero_exponent(opnd3p1) &&
+			    Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Dbl_signextendedsign(opnd3p1);
+				result_exponent = 0;
+                    		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+                    		Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
+                    		Dbl_set_sign(opnd3p1,/*using*/sign_save);
+                    		Dbl_setwrapped_exponent(opnd3p1,result_exponent,
+							unfl);
+                    		Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized; want to normalize */
+		Dbl_clear_signexponent(opnd2p1);
+		Dbl_leftshiftby1(opnd2p1,opnd2p2);
+		Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
+	}
+
+	/* Multiply the first two source mantissas together */
+
+	/* 
+	 * The intermediate result will be kept in tmpres,
+	 * which needs enough room for 106 bits of mantissa,
+	 * so lets call it a Double extended.
+	 */
+	Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+
+	/* 
+	 * Four bits at a time are inspected in each loop, and a 
+	 * simple shift and add multiply algorithm is used. 
+	 */ 
+	for (count = DBL_P-1; count >= 0; count -= 4) {
+		Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+		if (Dbit28p2(opnd1p2)) {
+	 		/* Fourword_add should be an ADD followed by 3 ADDC's */
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4, 
+			 opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
+		}
+		if (Dbit29p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
+		}
+		if (Dbit30p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
+		}
+		if (Dbit31p2(opnd1p2)) {
+			Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
+			 opnd2p1, opnd2p2, 0, 0);
+		}
+		Dbl_rightshiftby4(opnd1p1,opnd1p2);
+	}
+	if (Is_dexthiddenoverflow(tmpresp1)) {
+		/* result mantissa >= 2 (mantissa overflow) */
+		mpy_exponent++;
+		Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
+	}
+
+	/*
+	 * Restore the sign of the mpy result which was saved in resultp1.
+	 * The exponent will continue to be kept in mpy_exponent.
+	 */
+	Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
+
+	/* 
+	 * No rounding is required, since the result of the multiply
+	 * is exact in the extended format.
+	 */
+
+	/*
+	 * Now we are ready to perform the add portion of the operation.
+	 *
+	 * The exponents need to be kept as integers for now, since the
+	 * multiply result might not fit into the exponent field.  We
+	 * can't overflow or underflow because of this yet, since the
+	 * add could bring the final result back into range.
+	 */
+	add_exponent = Dbl_exponent(opnd3p1);
+
+	/*
+	 * Check for denormalized or zero add operand.
+	 */
+	if (add_exponent == 0) {
+		/* check for zero */
+		if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
+			/* right is zero */
+			/* Left can't be zero and must be result.
+			 *
+			 * The final result is now in tmpres and mpy_exponent,
+			 * and needs to be rounded and squeezed back into
+			 * double precision format from double extended.
+			 */
+			result_exponent = mpy_exponent;
+			Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+				resultp1,resultp2,resultp3,resultp4);
+			sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
+			goto round;
+		}
+
+		/* 
+		 * Neither are zeroes.  
+		 * Adjust exponent and normalize add operand.
+		 */
+		sign_save = Dbl_signextendedsign(opnd3p1);	/* save sign */
+		Dbl_clear_signexponent(opnd3p1);
+		Dbl_leftshiftby1(opnd3p1,opnd3p2);
+		Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
+		Dbl_set_sign(opnd3p1,sign_save);	/* restore sign */
+	} else {
+		Dbl_clear_exponent_set_hidden(opnd3p1);
+	}
+	/*
+	 * Copy opnd3 to the double extended variable called right.
+	 */
+	Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
+
+	/*
+	 * A zero "save" helps discover equal operands (for later),
+	 * and is used in swapping operands (if needed).
+	 */
+	Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+	/*
+	 * Compare magnitude of operands.
+	 */
+	Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
+	Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
+	if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+	    Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
+		/*
+		 * Set the left operand to the larger one by XOR swap.
+		 * First finish the first word "save".
+		 */
+		Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
+		Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+		Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
+			rightp2,rightp3,rightp4);
+		/* also setup exponents used in rest of routine */
+		diff_exponent = add_exponent - mpy_exponent;
+		result_exponent = add_exponent;
+	} else {
+		/* also setup exponents used in rest of routine */
+		diff_exponent = mpy_exponent - add_exponent;
+		result_exponent = mpy_exponent;
+	}
+	/* Invariant: left is not smaller than right. */
+
+	/*
+	 * Special case alignment of operands that would force alignment
+	 * beyond the extent of the extension.  A further optimization
+	 * could special case this but only reduces the path length for
+	 * this infrequent case.
+	 */
+	if (diff_exponent > DBLEXT_THRESHOLD) {
+		diff_exponent = DBLEXT_THRESHOLD;
+	}
+
+	/* Align right operand by shifting it to the right */
+	Dblext_clear_sign(rightp1);
+	Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
+		/*shifted by*/diff_exponent);
+	
+	/* Treat sum and difference of the operands separately. */
+	if ((int)save < 0) {
+		/*
+		 * Difference of the two operands.  Overflow can occur if the
+		 * multiply overflowed.  A borrow can occur out of the hidden
+		 * bit and force a post normalization phase.
+		 */
+		Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+			rightp1,rightp2,rightp3,rightp4,
+			resultp1,resultp2,resultp3,resultp4);
+		sign_save = Dbl_signextendedsign(resultp1);
+		if (Dbl_iszero_hidden(resultp1)) {
+			/* Handle normalization */
+		/* A straightforward algorithm would now shift the
+		 * result and extension left until the hidden bit
+		 * becomes one.  Not all of the extension bits need
+		 * participate in the shift.  Only the two most 
+		 * significant bits (round and guard) are needed.
+		 * If only a single shift is needed then the guard
+		 * bit becomes a significant low order bit and the
+		 * extension must participate in the rounding.
+		 * If more than a single shift is needed, then all
+		 * bits to the right of the guard bit are zeros, 
+		 * and the guard bit may or may not be zero. */
+			Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+				resultp4);
+
+			/* Need to check for a zero result.  The sign and
+			 * exponent fields have already been zeroed.  The more
+			 * efficient test of the full object can be used.
+			 */
+			 if (Dblext_iszero(resultp1,resultp2,resultp3,resultp4)) {
+				/* Must have been "x-x" or "x+(-x)". */
+				if (Is_rounding_mode(ROUNDMINUS))
+					Dbl_setone_sign(resultp1);
+				Dbl_copytoptr(resultp1,resultp2,dstptr);
+				return(NOEXCEPTION);
+			}
+			result_exponent--;
+
+			/* Look to see if normalization is finished. */
+			if (Dbl_isone_hidden(resultp1)) {
+				/* No further normalization is needed */
+				goto round;
+			}
+
+			/* Discover first one bit to determine shift amount.
+			 * Use a modified binary search.  We have already
+			 * shifted the result one position right and still
+			 * not found a one so the remainder of the extension
+			 * must be zero and simplifies rounding. */
+			/* Scan bytes */
+			while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
+				Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
+				result_exponent -= 8;
+			}
+			/* Now narrow it down to the nibble */
+			if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
+				/* The lower nibble contains the
+				 * normalizing one */
+				Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
+				result_exponent -= 4;
+			}
+			/* Select case where first bit is set (already
+			 * normalized) otherwise select the proper shift. */
+			jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
+			if (jumpsize <= 7) switch(jumpsize) {
+			case 1:
+				Dblext_leftshiftby3(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 3;
+				break;
+			case 2:
+			case 3:
+				Dblext_leftshiftby2(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 2;
+				break;
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				Dblext_leftshiftby1(resultp1,resultp2,resultp3,
+					resultp4);
+				result_exponent -= 1;
+				break;
+			}
+		} /* end if (hidden...)... */
+	/* Fall through and round */
+	} /* end if (save < 0)... */
+	else {
+		/* Add magnitudes */
+		Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
+			rightp1,rightp2,rightp3,rightp4,
+			/*to*/resultp1,resultp2,resultp3,resultp4);
+		sign_save = Dbl_signextendedsign(resultp1);
+		if (Dbl_isone_hiddenoverflow(resultp1)) {
+	    		/* Prenormalization required. */
+	    		Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
+				resultp4);
+	    		result_exponent++;
+		} /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+
+	/* Round the result.  If the extension and lower two words are
+	 * all zeros, then the result is exact.  Otherwise round in the
+	 * correct direction.  Underflow is possible. If a postnormalization
+	 * is necessary, then the mantissa is all zeros so no shift is needed.
+	 */
+  round:
+	if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+		Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
+			result_exponent,is_tiny);
+	}
+	Dbl_set_sign(resultp1,/*using*/sign_save);
+	if (Dblext_isnotzero_mantissap3(resultp3) || 
+	    Dblext_isnotzero_mantissap4(resultp4)) {
+		inexact = TRUE;
+		switch(Rounding_mode()) {
+		case ROUNDNEAREST: /* The default. */
+			if (Dblext_isone_highp3(resultp3)) {
+				/* at least 1/2 ulp */
+				if (Dblext_isnotzero_low31p3(resultp3) ||
+				    Dblext_isnotzero_mantissap4(resultp4) ||
+				    Dblext_isone_lowp2(resultp2)) {
+					/* either exactly half way and odd or
+					 * more than 1/2ulp */
+					Dbl_increment(resultp1,resultp2);
+				}
+			}
+	    		break;
+
+		case ROUNDPLUS:
+	    		if (Dbl_iszero_sign(resultp1)) {
+				/* Round up positive results */
+				Dbl_increment(resultp1,resultp2);
+			}
+			break;
+	    
+		case ROUNDMINUS:
+	    		if (Dbl_isone_sign(resultp1)) {
+				/* Round down negative results */
+				Dbl_increment(resultp1,resultp2);
+			}
+	    
+		case ROUNDZERO:;
+			/* truncate is simple */
+		} /* end switch... */
+		if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+	if (result_exponent >= DBL_INFINITY_EXPONENT) {
+		/* Overflow */
+		if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+                        Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_OVERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return (OPC_2E_OVERFLOWEXCEPTION);
+		}
+		inexact = TRUE;
+		Set_overflowflag();
+		Dbl_setoverflow(resultp1,resultp2);
+	} else if (result_exponent <= 0) {	/* underflow case */
+		if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                	Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
+			Dbl_copytoptr(resultp1,resultp2,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_UNDERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+	    		return(OPC_2E_UNDERFLOWEXCEPTION);
+		}
+		else if (inexact && is_tiny) Set_underflowflag();
+	}
+	else Dbl_set_exponent(resultp1,result_exponent);
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) 
+		if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+		else Set_inexactflag();
+    	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point Multiply Fused Add
+ */
+
+sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+	unsigned int opnd1, opnd2, opnd3;
+	register unsigned int tmpresp1, tmpresp2;
+	unsigned int rightp1, rightp2;
+	unsigned int resultp1, resultp2 = 0;
+	register int mpy_exponent, add_exponent, count;
+	boolean inexact = FALSE, is_tiny = FALSE;
+
+	unsigned int signlessleft1, signlessright1, save;
+	register int result_exponent, diff_exponent;
+	int sign_save, jumpsize;
+	
+	Sgl_copyfromptr(src1ptr,opnd1);
+	Sgl_copyfromptr(src2ptr,opnd2);
+	Sgl_copyfromptr(src3ptr,opnd3);
+
+	/* 
+	 * set sign bit of result of multiply
+	 */
+	if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) 
+		Sgl_setnegativezero(resultp1); 
+	else Sgl_setzero(resultp1);
+
+	/*
+	 * Generate multiply exponent 
+	 */
+	mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd1)) {
+		if (Sgl_iszero_mantissa(opnd1)) {
+			if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
+				if (Sgl_iszero_exponentmantissa(opnd2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Sgl_isinfinity(opnd3) &&
+				    (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+			 	 * return infinity
+			 	 */
+				Sgl_setinfinity_exponentmantissa(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+		 	 * is NaN; signaling or quiet?
+		 	 */
+			if (Sgl_isone_signaling(opnd1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled()) 
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd1);
+			}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd2)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd2);
+				Sgl_copytoptr(opnd2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd3)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd3);
+				Sgl_copytoptr(opnd3,dstptr);
+				return(NOEXCEPTION);
+			}
+			/*
+		 	 * return quiet NaN
+		 	 */
+			Sgl_copytoptr(opnd1,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd2)) {
+		if (Sgl_iszero_mantissa(opnd2)) {
+			if (Sgl_isnotnan(opnd3)) {
+				if (Sgl_iszero_exponentmantissa(opnd1)) {
+					/* 
+					 * invalid since multiply operands are
+					 * zero & infinity
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(opnd2);
+					Sgl_copytoptr(opnd2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Sgl_isinfinity(opnd3) &&
+				    (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+				       		return(OPC_2E_INVALIDEXCEPTION);
+				       	Set_invalidflag();
+				       	Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * return infinity
+				 */
+				Sgl_setinfinity_exponentmantissa(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Sgl_isone_signaling(opnd2)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd2);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd3)) {
+			       	/* trap if INVALIDTRAP enabled */
+			       	if (Is_invalidtrap_enabled())
+				   		return(OPC_2E_INVALIDEXCEPTION);
+			       	/* make NaN quiet */
+			       	Set_invalidflag();
+			       	Sgl_set_quiet(opnd3);
+				Sgl_copytoptr(opnd3,dstptr);
+		       		return(NOEXCEPTION);
+			}
+			/*
+			 * return quiet NaN
+			 */
+			Sgl_copytoptr(opnd2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check third operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd3)) {
+		if (Sgl_iszero_mantissa(opnd3)) {
+			/* return infinity */
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		} else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Sgl_isone_signaling(opnd3)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd3);
+			}
+			/*
+			 * return quiet NaN
+ 			 */
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+    	}
+
+	/*
+	 * Generate multiply mantissa
+	 */
+	if (Sgl_isnotzero_exponent(opnd1)) {
+		/* set hidden bit */
+		Sgl_clear_signexponent_set_hidden(opnd1);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd1)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Sgl_iszero_exponentmantissa(opnd3)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Sgl_or_signs(opnd3,resultp1);
+				} else {
+					Sgl_and_signs(opnd3,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Sgl_iszero_exponent(opnd3) &&
+			         Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Sgl_signextendedsign(opnd3);
+				result_exponent = 0;
+                    		Sgl_leftshiftby1(opnd3);
+                    		Sgl_normalize(opnd3,result_exponent);
+                    		Sgl_set_sign(opnd3,/*using*/sign_save);
+                    		Sgl_setwrapped_exponent(opnd3,result_exponent,
+							unfl);
+                    		Sgl_copytoptr(opnd3,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized, adjust exponent */
+		Sgl_clear_signexponent(opnd1);
+		Sgl_leftshiftby1(opnd1);
+		Sgl_normalize(opnd1,mpy_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Sgl_isnotzero_exponent(opnd2)) {
+		Sgl_clear_signexponent_set_hidden(opnd2);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Sgl_iszero_exponentmantissa(opnd3)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Sgl_or_signs(opnd3,resultp1);
+				} else {
+					Sgl_and_signs(opnd3,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Sgl_iszero_exponent(opnd3) &&
+			    Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Sgl_signextendedsign(opnd3);
+				result_exponent = 0;
+                    		Sgl_leftshiftby1(opnd3);
+                    		Sgl_normalize(opnd3,result_exponent);
+                    		Sgl_set_sign(opnd3,/*using*/sign_save);
+                    		Sgl_setwrapped_exponent(opnd3,result_exponent,
+							unfl);
+                    		Sgl_copytoptr(opnd3,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized; want to normalize */
+		Sgl_clear_signexponent(opnd2);
+		Sgl_leftshiftby1(opnd2);
+		Sgl_normalize(opnd2,mpy_exponent);
+	}
+
+	/* Multiply the first two source mantissas together */
+
+	/* 
+	 * The intermediate result will be kept in tmpres,
+	 * which needs enough room for 106 bits of mantissa,
+	 * so lets call it a Double extended.
+	 */
+	Sglext_setzero(tmpresp1,tmpresp2);
+
+	/* 
+	 * Four bits at a time are inspected in each loop, and a 
+	 * simple shift and add multiply algorithm is used. 
+	 */ 
+	for (count = SGL_P-1; count >= 0; count -= 4) {
+		Sglext_rightshiftby4(tmpresp1,tmpresp2);
+		if (Sbit28(opnd1)) {
+	 		/* Twoword_add should be an ADD followed by 2 ADDC's */
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
+		}
+		if (Sbit29(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
+		}
+		if (Sbit30(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
+		}
+		if (Sbit31(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
+		}
+		Sgl_rightshiftby4(opnd1);
+	}
+	if (Is_sexthiddenoverflow(tmpresp1)) {
+		/* result mantissa >= 2 (mantissa overflow) */
+		mpy_exponent++;
+		Sglext_rightshiftby4(tmpresp1,tmpresp2);
+	} else {
+		Sglext_rightshiftby3(tmpresp1,tmpresp2);
+	}
+
+	/*
+	 * Restore the sign of the mpy result which was saved in resultp1.
+	 * The exponent will continue to be kept in mpy_exponent.
+	 */
+	Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
+
+	/* 
+	 * No rounding is required, since the result of the multiply
+	 * is exact in the extended format.
+	 */
+
+	/*
+	 * Now we are ready to perform the add portion of the operation.
+	 *
+	 * The exponents need to be kept as integers for now, since the
+	 * multiply result might not fit into the exponent field.  We
+	 * can't overflow or underflow because of this yet, since the
+	 * add could bring the final result back into range.
+	 */
+	add_exponent = Sgl_exponent(opnd3);
+
+	/*
+	 * Check for denormalized or zero add operand.
+	 */
+	if (add_exponent == 0) {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd3)) {
+			/* right is zero */
+			/* Left can't be zero and must be result.
+			 *
+			 * The final result is now in tmpres and mpy_exponent,
+			 * and needs to be rounded and squeezed back into
+			 * double precision format from double extended.
+			 */
+			result_exponent = mpy_exponent;
+			Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
+			sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
+			goto round;
+		}
+
+		/* 
+		 * Neither are zeroes.  
+		 * Adjust exponent and normalize add operand.
+		 */
+		sign_save = Sgl_signextendedsign(opnd3);	/* save sign */
+		Sgl_clear_signexponent(opnd3);
+		Sgl_leftshiftby1(opnd3);
+		Sgl_normalize(opnd3,add_exponent);
+		Sgl_set_sign(opnd3,sign_save);		/* restore sign */
+	} else {
+		Sgl_clear_exponent_set_hidden(opnd3);
+	}
+	/*
+	 * Copy opnd3 to the double extended variable called right.
+	 */
+	Sgl_copyto_sglext(opnd3,rightp1,rightp2);
+
+	/*
+	 * A zero "save" helps discover equal operands (for later),
+	 * and is used in swapping operands (if needed).
+	 */
+	Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+	/*
+	 * Compare magnitude of operands.
+	 */
+	Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
+	Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
+	if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+	    Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
+		/*
+		 * Set the left operand to the larger one by XOR swap.
+		 * First finish the first word "save".
+		 */
+		Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
+		Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+		Sglext_swap_lower(tmpresp2,rightp2);
+		/* also setup exponents used in rest of routine */
+		diff_exponent = add_exponent - mpy_exponent;
+		result_exponent = add_exponent;
+	} else {
+		/* also setup exponents used in rest of routine */
+		diff_exponent = mpy_exponent - add_exponent;
+		result_exponent = mpy_exponent;
+	}
+	/* Invariant: left is not smaller than right. */
+
+	/*
+	 * Special case alignment of operands that would force alignment
+	 * beyond the extent of the extension.  A further optimization
+	 * could special case this but only reduces the path length for
+	 * this infrequent case.
+	 */
+	if (diff_exponent > SGLEXT_THRESHOLD) {
+		diff_exponent = SGLEXT_THRESHOLD;
+	}
+
+	/* Align right operand by shifting it to the right */
+	Sglext_clear_sign(rightp1);
+	Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
+	
+	/* Treat sum and difference of the operands separately. */
+	if ((int)save < 0) {
+		/*
+		 * Difference of the two operands.  Overflow can occur if the
+		 * multiply overflowed.  A borrow can occur out of the hidden
+		 * bit and force a post normalization phase.
+		 */
+		Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
+			resultp1,resultp2);
+		sign_save = Sgl_signextendedsign(resultp1);
+		if (Sgl_iszero_hidden(resultp1)) {
+			/* Handle normalization */
+		/* A straightforward algorithm would now shift the
+		 * result and extension left until the hidden bit
+		 * becomes one.  Not all of the extension bits need
+		 * participate in the shift.  Only the two most 
+		 * significant bits (round and guard) are needed.
+		 * If only a single shift is needed then the guard
+		 * bit becomes a significant low order bit and the
+		 * extension must participate in the rounding.
+		 * If more than a single shift is needed, then all
+		 * bits to the right of the guard bit are zeros, 
+		 * and the guard bit may or may not be zero. */
+			Sglext_leftshiftby1(resultp1,resultp2);
+
+			/* Need to check for a zero result.  The sign and
+			 * exponent fields have already been zeroed.  The more
+			 * efficient test of the full object can be used.
+			 */
+			 if (Sglext_iszero(resultp1,resultp2)) {
+				/* Must have been "x-x" or "x+(-x)". */
+				if (Is_rounding_mode(ROUNDMINUS))
+					Sgl_setone_sign(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+			result_exponent--;
+
+			/* Look to see if normalization is finished. */
+			if (Sgl_isone_hidden(resultp1)) {
+				/* No further normalization is needed */
+				goto round;
+			}
+
+			/* Discover first one bit to determine shift amount.
+			 * Use a modified binary search.  We have already
+			 * shifted the result one position right and still
+			 * not found a one so the remainder of the extension
+			 * must be zero and simplifies rounding. */
+			/* Scan bytes */
+			while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
+				Sglext_leftshiftby8(resultp1,resultp2);
+				result_exponent -= 8;
+			}
+			/* Now narrow it down to the nibble */
+			if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
+				/* The lower nibble contains the
+				 * normalizing one */
+				Sglext_leftshiftby4(resultp1,resultp2);
+				result_exponent -= 4;
+			}
+			/* Select case where first bit is set (already
+			 * normalized) otherwise select the proper shift. */
+			jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
+			if (jumpsize <= 7) switch(jumpsize) {
+			case 1:
+				Sglext_leftshiftby3(resultp1,resultp2);
+				result_exponent -= 3;
+				break;
+			case 2:
+			case 3:
+				Sglext_leftshiftby2(resultp1,resultp2);
+				result_exponent -= 2;
+				break;
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				Sglext_leftshiftby1(resultp1,resultp2);
+				result_exponent -= 1;
+				break;
+			}
+		} /* end if (hidden...)... */
+	/* Fall through and round */
+	} /* end if (save < 0)... */
+	else {
+		/* Add magnitudes */
+		Sglext_addition(tmpresp1,tmpresp2,
+			rightp1,rightp2, /*to*/resultp1,resultp2);
+		sign_save = Sgl_signextendedsign(resultp1);
+		if (Sgl_isone_hiddenoverflow(resultp1)) {
+	    		/* Prenormalization required. */
+	    		Sglext_arithrightshiftby1(resultp1,resultp2);
+	    		result_exponent++;
+		} /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+
+	/* Round the result.  If the extension and lower two words are
+	 * all zeros, then the result is exact.  Otherwise round in the
+	 * correct direction.  Underflow is possible. If a postnormalization
+	 * is necessary, then the mantissa is all zeros so no shift is needed.
+	 */
+  round:
+	if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+		Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
+	}
+	Sgl_set_sign(resultp1,/*using*/sign_save);
+	if (Sglext_isnotzero_mantissap2(resultp2)) {
+		inexact = TRUE;
+		switch(Rounding_mode()) {
+		case ROUNDNEAREST: /* The default. */
+			if (Sglext_isone_highp2(resultp2)) {
+				/* at least 1/2 ulp */
+				if (Sglext_isnotzero_low31p2(resultp2) ||
+				    Sglext_isone_lowp1(resultp1)) {
+					/* either exactly half way and odd or
+					 * more than 1/2ulp */
+					Sgl_increment(resultp1);
+				}
+			}
+	    		break;
+
+		case ROUNDPLUS:
+	    		if (Sgl_iszero_sign(resultp1)) {
+				/* Round up positive results */
+				Sgl_increment(resultp1);
+			}
+			break;
+	    
+		case ROUNDMINUS:
+	    		if (Sgl_isone_sign(resultp1)) {
+				/* Round down negative results */
+				Sgl_increment(resultp1);
+			}
+	    
+		case ROUNDZERO:;
+			/* truncate is simple */
+		} /* end switch... */
+		if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+	if (result_exponent >= SGL_INFINITY_EXPONENT) {
+		/* Overflow */
+		if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+                        Sgl_copytoptr(resultp1,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_OVERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return (OPC_2E_OVERFLOWEXCEPTION);
+		}
+		inexact = TRUE;
+		Set_overflowflag();
+		Sgl_setoverflow(resultp1);
+	} else if (result_exponent <= 0) {	/* underflow case */
+		if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                	Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
+			Sgl_copytoptr(resultp1,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_UNDERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+	    		return(OPC_2E_UNDERFLOWEXCEPTION);
+		}
+		else if (inexact && is_tiny) Set_underflowflag();
+	}
+	else Sgl_set_exponent(resultp1,result_exponent);
+	Sgl_copytoptr(resultp1,dstptr);
+	if (inexact) 
+		if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+		else Set_inexactflag();
+    	return(NOEXCEPTION);
+}
+
+/*
+ *  Single Floating-point Multiply Negate Fused Add
+ */
+
+sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
+
+sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
+unsigned int *status;
+{
+	unsigned int opnd1, opnd2, opnd3;
+	register unsigned int tmpresp1, tmpresp2;
+	unsigned int rightp1, rightp2;
+	unsigned int resultp1, resultp2 = 0;
+	register int mpy_exponent, add_exponent, count;
+	boolean inexact = FALSE, is_tiny = FALSE;
+
+	unsigned int signlessleft1, signlessright1, save;
+	register int result_exponent, diff_exponent;
+	int sign_save, jumpsize;
+	
+	Sgl_copyfromptr(src1ptr,opnd1);
+	Sgl_copyfromptr(src2ptr,opnd2);
+	Sgl_copyfromptr(src3ptr,opnd3);
+
+	/* 
+	 * set sign bit of result of multiply
+	 */
+	if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) 
+		Sgl_setzero(resultp1);
+	else 
+		Sgl_setnegativezero(resultp1); 
+
+	/*
+	 * Generate multiply exponent 
+	 */
+	mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd1)) {
+		if (Sgl_iszero_mantissa(opnd1)) {
+			if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
+				if (Sgl_iszero_exponentmantissa(opnd2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Sgl_isinfinity(opnd3) &&
+				    (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+			 	 * return infinity
+			 	 */
+				Sgl_setinfinity_exponentmantissa(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+		 	 * is NaN; signaling or quiet?
+		 	 */
+			if (Sgl_isone_signaling(opnd1)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled()) 
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd1);
+			}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd2)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd2);
+				Sgl_copytoptr(opnd2,dstptr);
+				return(NOEXCEPTION);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd3)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+			    		return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd3);
+				Sgl_copytoptr(opnd3,dstptr);
+				return(NOEXCEPTION);
+			}
+			/*
+		 	 * return quiet NaN
+		 	 */
+			Sgl_copytoptr(opnd1,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd2)) {
+		if (Sgl_iszero_mantissa(opnd2)) {
+			if (Sgl_isnotnan(opnd3)) {
+				if (Sgl_iszero_exponentmantissa(opnd1)) {
+					/* 
+					 * invalid since multiply operands are
+					 * zero & infinity
+					 */
+					if (Is_invalidtrap_enabled())
+						return(OPC_2E_INVALIDEXCEPTION);
+					Set_invalidflag();
+					Sgl_makequietnan(opnd2);
+					Sgl_copytoptr(opnd2,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * Check third operand for infinity with a
+				 *  sign opposite of the multiply result
+				 */
+				if (Sgl_isinfinity(opnd3) &&
+				    (Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
+					/* 
+					 * invalid since attempting a magnitude
+					 * subtraction of infinities
+					 */
+					if (Is_invalidtrap_enabled())
+				       		return(OPC_2E_INVALIDEXCEPTION);
+				       	Set_invalidflag();
+				       	Sgl_makequietnan(resultp1);
+					Sgl_copytoptr(resultp1,dstptr);
+					return(NOEXCEPTION);
+				}
+
+				/*
+				 * return infinity
+				 */
+				Sgl_setinfinity_exponentmantissa(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Sgl_isone_signaling(opnd2)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd2);
+			}
+			/* 
+			 * is third operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd3)) {
+			       	/* trap if INVALIDTRAP enabled */
+			       	if (Is_invalidtrap_enabled())
+				   		return(OPC_2E_INVALIDEXCEPTION);
+			       	/* make NaN quiet */
+			       	Set_invalidflag();
+			       	Sgl_set_quiet(opnd3);
+				Sgl_copytoptr(opnd3,dstptr);
+		       		return(NOEXCEPTION);
+			}
+			/*
+			 * return quiet NaN
+			 */
+			Sgl_copytoptr(opnd2,dstptr);
+			return(NOEXCEPTION);
+		}
+	}
+
+	/*
+	 * check third operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd3)) {
+		if (Sgl_iszero_mantissa(opnd3)) {
+			/* return infinity */
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		} else {
+			/*
+			 * is NaN; signaling or quiet?
+			 */
+			if (Sgl_isone_signaling(opnd3)) {
+				/* trap if INVALIDTRAP enabled */
+				if (Is_invalidtrap_enabled())
+					return(OPC_2E_INVALIDEXCEPTION);
+				/* make NaN quiet */
+				Set_invalidflag();
+				Sgl_set_quiet(opnd3);
+			}
+			/*
+			 * return quiet NaN
+ 			 */
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+    	}
+
+	/*
+	 * Generate multiply mantissa
+	 */
+	if (Sgl_isnotzero_exponent(opnd1)) {
+		/* set hidden bit */
+		Sgl_clear_signexponent_set_hidden(opnd1);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd1)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Sgl_iszero_exponentmantissa(opnd3)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Sgl_or_signs(opnd3,resultp1);
+				} else {
+					Sgl_and_signs(opnd3,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Sgl_iszero_exponent(opnd3) &&
+			         Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Sgl_signextendedsign(opnd3);
+				result_exponent = 0;
+                    		Sgl_leftshiftby1(opnd3);
+                    		Sgl_normalize(opnd3,result_exponent);
+                    		Sgl_set_sign(opnd3,/*using*/sign_save);
+                    		Sgl_setwrapped_exponent(opnd3,result_exponent,
+							unfl);
+                    		Sgl_copytoptr(opnd3,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized, adjust exponent */
+		Sgl_clear_signexponent(opnd1);
+		Sgl_leftshiftby1(opnd1);
+		Sgl_normalize(opnd1,mpy_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Sgl_isnotzero_exponent(opnd2)) {
+		Sgl_clear_signexponent_set_hidden(opnd2);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd2)) {
+			/*
+			 * Perform the add opnd3 with zero here.
+			 */
+			if (Sgl_iszero_exponentmantissa(opnd3)) {
+				if (Is_rounding_mode(ROUNDMINUS)) {
+					Sgl_or_signs(opnd3,resultp1);
+				} else {
+					Sgl_and_signs(opnd3,resultp1);
+				}
+			}
+			/*
+			 * Now let's check for trapped underflow case.
+			 */
+			else if (Sgl_iszero_exponent(opnd3) &&
+			    Is_underflowtrap_enabled()) {
+                    		/* need to normalize results mantissa */
+                    		sign_save = Sgl_signextendedsign(opnd3);
+				result_exponent = 0;
+                    		Sgl_leftshiftby1(opnd3);
+                    		Sgl_normalize(opnd3,result_exponent);
+                    		Sgl_set_sign(opnd3,/*using*/sign_save);
+                    		Sgl_setwrapped_exponent(opnd3,result_exponent,
+							unfl);
+                    		Sgl_copytoptr(opnd3,dstptr);
+                    		/* inexact = FALSE */
+                    		return(OPC_2E_UNDERFLOWEXCEPTION);
+			}
+			Sgl_copytoptr(opnd3,dstptr);
+			return(NOEXCEPTION);
+		}
+		/* is denormalized; want to normalize */
+		Sgl_clear_signexponent(opnd2);
+		Sgl_leftshiftby1(opnd2);
+		Sgl_normalize(opnd2,mpy_exponent);
+	}
+
+	/* Multiply the first two source mantissas together */
+
+	/* 
+	 * The intermediate result will be kept in tmpres,
+	 * which needs enough room for 106 bits of mantissa,
+	 * so lets call it a Double extended.
+	 */
+	Sglext_setzero(tmpresp1,tmpresp2);
+
+	/* 
+	 * Four bits at a time are inspected in each loop, and a 
+	 * simple shift and add multiply algorithm is used. 
+	 */ 
+	for (count = SGL_P-1; count >= 0; count -= 4) {
+		Sglext_rightshiftby4(tmpresp1,tmpresp2);
+		if (Sbit28(opnd1)) {
+	 		/* Twoword_add should be an ADD followed by 2 ADDC's */
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
+		}
+		if (Sbit29(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
+		}
+		if (Sbit30(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
+		}
+		if (Sbit31(opnd1)) {
+			Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
+		}
+		Sgl_rightshiftby4(opnd1);
+	}
+	if (Is_sexthiddenoverflow(tmpresp1)) {
+		/* result mantissa >= 2 (mantissa overflow) */
+		mpy_exponent++;
+		Sglext_rightshiftby4(tmpresp1,tmpresp2);
+	} else {
+		Sglext_rightshiftby3(tmpresp1,tmpresp2);
+	}
+
+	/*
+	 * Restore the sign of the mpy result which was saved in resultp1.
+	 * The exponent will continue to be kept in mpy_exponent.
+	 */
+	Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
+
+	/* 
+	 * No rounding is required, since the result of the multiply
+	 * is exact in the extended format.
+	 */
+
+	/*
+	 * Now we are ready to perform the add portion of the operation.
+	 *
+	 * The exponents need to be kept as integers for now, since the
+	 * multiply result might not fit into the exponent field.  We
+	 * can't overflow or underflow because of this yet, since the
+	 * add could bring the final result back into range.
+	 */
+	add_exponent = Sgl_exponent(opnd3);
+
+	/*
+	 * Check for denormalized or zero add operand.
+	 */
+	if (add_exponent == 0) {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd3)) {
+			/* right is zero */
+			/* Left can't be zero and must be result.
+			 *
+			 * The final result is now in tmpres and mpy_exponent,
+			 * and needs to be rounded and squeezed back into
+			 * double precision format from double extended.
+			 */
+			result_exponent = mpy_exponent;
+			Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
+			sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
+			goto round;
+		}
+
+		/* 
+		 * Neither are zeroes.  
+		 * Adjust exponent and normalize add operand.
+		 */
+		sign_save = Sgl_signextendedsign(opnd3);	/* save sign */
+		Sgl_clear_signexponent(opnd3);
+		Sgl_leftshiftby1(opnd3);
+		Sgl_normalize(opnd3,add_exponent);
+		Sgl_set_sign(opnd3,sign_save);		/* restore sign */
+	} else {
+		Sgl_clear_exponent_set_hidden(opnd3);
+	}
+	/*
+	 * Copy opnd3 to the double extended variable called right.
+	 */
+	Sgl_copyto_sglext(opnd3,rightp1,rightp2);
+
+	/*
+	 * A zero "save" helps discover equal operands (for later),
+	 * and is used in swapping operands (if needed).
+	 */
+	Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
+
+	/*
+	 * Compare magnitude of operands.
+	 */
+	Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
+	Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
+	if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
+	    Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
+		/*
+		 * Set the left operand to the larger one by XOR swap.
+		 * First finish the first word "save".
+		 */
+		Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
+		Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
+		Sglext_swap_lower(tmpresp2,rightp2);
+		/* also setup exponents used in rest of routine */
+		diff_exponent = add_exponent - mpy_exponent;
+		result_exponent = add_exponent;
+	} else {
+		/* also setup exponents used in rest of routine */
+		diff_exponent = mpy_exponent - add_exponent;
+		result_exponent = mpy_exponent;
+	}
+	/* Invariant: left is not smaller than right. */
+
+	/*
+	 * Special case alignment of operands that would force alignment
+	 * beyond the extent of the extension.  A further optimization
+	 * could special case this but only reduces the path length for
+	 * this infrequent case.
+	 */
+	if (diff_exponent > SGLEXT_THRESHOLD) {
+		diff_exponent = SGLEXT_THRESHOLD;
+	}
+
+	/* Align right operand by shifting it to the right */
+	Sglext_clear_sign(rightp1);
+	Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
+	
+	/* Treat sum and difference of the operands separately. */
+	if ((int)save < 0) {
+		/*
+		 * Difference of the two operands.  Overflow can occur if the
+		 * multiply overflowed.  A borrow can occur out of the hidden
+		 * bit and force a post normalization phase.
+		 */
+		Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
+			resultp1,resultp2);
+		sign_save = Sgl_signextendedsign(resultp1);
+		if (Sgl_iszero_hidden(resultp1)) {
+			/* Handle normalization */
+		/* A straightforward algorithm would now shift the
+		 * result and extension left until the hidden bit
+		 * becomes one.  Not all of the extension bits need
+		 * participate in the shift.  Only the two most 
+		 * significant bits (round and guard) are needed.
+		 * If only a single shift is needed then the guard
+		 * bit becomes a significant low order bit and the
+		 * extension must participate in the rounding.
+		 * If more than a single shift is needed, then all
+		 * bits to the right of the guard bit are zeros, 
+		 * and the guard bit may or may not be zero. */
+			Sglext_leftshiftby1(resultp1,resultp2);
+
+			/* Need to check for a zero result.  The sign and
+			 * exponent fields have already been zeroed.  The more
+			 * efficient test of the full object can be used.
+			 */
+			 if (Sglext_iszero(resultp1,resultp2)) {
+				/* Must have been "x-x" or "x+(-x)". */
+				if (Is_rounding_mode(ROUNDMINUS))
+					Sgl_setone_sign(resultp1);
+				Sgl_copytoptr(resultp1,dstptr);
+				return(NOEXCEPTION);
+			}
+			result_exponent--;
+
+			/* Look to see if normalization is finished. */
+			if (Sgl_isone_hidden(resultp1)) {
+				/* No further normalization is needed */
+				goto round;
+			}
+
+			/* Discover first one bit to determine shift amount.
+			 * Use a modified binary search.  We have already
+			 * shifted the result one position right and still
+			 * not found a one so the remainder of the extension
+			 * must be zero and simplifies rounding. */
+			/* Scan bytes */
+			while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
+				Sglext_leftshiftby8(resultp1,resultp2);
+				result_exponent -= 8;
+			}
+			/* Now narrow it down to the nibble */
+			if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
+				/* The lower nibble contains the
+				 * normalizing one */
+				Sglext_leftshiftby4(resultp1,resultp2);
+				result_exponent -= 4;
+			}
+			/* Select case where first bit is set (already
+			 * normalized) otherwise select the proper shift. */
+			jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
+			if (jumpsize <= 7) switch(jumpsize) {
+			case 1:
+				Sglext_leftshiftby3(resultp1,resultp2);
+				result_exponent -= 3;
+				break;
+			case 2:
+			case 3:
+				Sglext_leftshiftby2(resultp1,resultp2);
+				result_exponent -= 2;
+				break;
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				Sglext_leftshiftby1(resultp1,resultp2);
+				result_exponent -= 1;
+				break;
+			}
+		} /* end if (hidden...)... */
+	/* Fall through and round */
+	} /* end if (save < 0)... */
+	else {
+		/* Add magnitudes */
+		Sglext_addition(tmpresp1,tmpresp2,
+			rightp1,rightp2, /*to*/resultp1,resultp2);
+		sign_save = Sgl_signextendedsign(resultp1);
+		if (Sgl_isone_hiddenoverflow(resultp1)) {
+	    		/* Prenormalization required. */
+	    		Sglext_arithrightshiftby1(resultp1,resultp2);
+	    		result_exponent++;
+		} /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+
+	/* Round the result.  If the extension and lower two words are
+	 * all zeros, then the result is exact.  Otherwise round in the
+	 * correct direction.  Underflow is possible. If a postnormalization
+	 * is necessary, then the mantissa is all zeros so no shift is needed.
+	 */
+  round:
+	if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
+		Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
+	}
+	Sgl_set_sign(resultp1,/*using*/sign_save);
+	if (Sglext_isnotzero_mantissap2(resultp2)) {
+		inexact = TRUE;
+		switch(Rounding_mode()) {
+		case ROUNDNEAREST: /* The default. */
+			if (Sglext_isone_highp2(resultp2)) {
+				/* at least 1/2 ulp */
+				if (Sglext_isnotzero_low31p2(resultp2) ||
+				    Sglext_isone_lowp1(resultp1)) {
+					/* either exactly half way and odd or
+					 * more than 1/2ulp */
+					Sgl_increment(resultp1);
+				}
+			}
+	    		break;
+
+		case ROUNDPLUS:
+	    		if (Sgl_iszero_sign(resultp1)) {
+				/* Round up positive results */
+				Sgl_increment(resultp1);
+			}
+			break;
+	    
+		case ROUNDMINUS:
+	    		if (Sgl_isone_sign(resultp1)) {
+				/* Round down negative results */
+				Sgl_increment(resultp1);
+			}
+	    
+		case ROUNDZERO:;
+			/* truncate is simple */
+		} /* end switch... */
+		if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
+	}
+	if (result_exponent >= SGL_INFINITY_EXPONENT) {
+		/* Overflow */
+		if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
+                        Sgl_copytoptr(resultp1,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_OVERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return (OPC_2E_OVERFLOWEXCEPTION);
+		}
+		inexact = TRUE;
+		Set_overflowflag();
+		Sgl_setoverflow(resultp1);
+	} else if (result_exponent <= 0) {	/* underflow case */
+		if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                	Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
+			Sgl_copytoptr(resultp1,dstptr);
+                        if (inexact)
+                            if (Is_inexacttrap_enabled())
+                                return (OPC_2E_UNDERFLOWEXCEPTION |
+					OPC_2E_INEXACTEXCEPTION);
+                            else Set_inexactflag();
+	    		return(OPC_2E_UNDERFLOWEXCEPTION);
+		}
+		else if (inexact && is_tiny) Set_underflowflag();
+	}
+	else Sgl_set_exponent(resultp1,result_exponent);
+	Sgl_copytoptr(resultp1,dstptr);
+	if (inexact) 
+		if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
+		else Set_inexactflag();
+    	return(NOEXCEPTION);
+}
+
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fpbits.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpbits.h
new file mode 100644
index 0000000..cefad06
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpbits.h
@@ -0,0 +1,65 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+/*
+ *  These macros are designed to be portable to all machines that have
+ *  a wordsize greater than or equal to 32 bits that support the portable
+ *  C compiler and the standard C preprocessor.  Wordsize (default 32)
+ *  and bitfield assignment (default left-to-right,  unlike VAX, PDP-11)
+ *  should be predefined using the constants HOSTWDSZ and BITFRL and
+ *  the C compiler "-D" flag (e.g., -DHOSTWDSZ=36 -DBITFLR for the DEC-20).
+ *  Note that the macro arguments assume that the integer being referenced
+ *  is a 32-bit integer (right-justified on the 20) and that bit 0 is the
+ *  most significant bit.
+ */
+
+#ifndef HOSTWDSZ
+#define	HOSTWDSZ	32
+#endif
+
+
+/*###########################  Macros  ######################################*/
+
+/*-------------------------------------------------------------------------
+ * NewDeclareBitField_Reference - Declare a structure similar to the simulator
+ * function "DeclBitfR" except its use is restricted to occur within a larger
+ * enclosing structure or union definition.  This declaration is an unnamed
+ * structure with the argument, name, as the member name and the argument,
+ * uname, as the element name. 
+ *----------------------------------------------------------------------- */
+#define Bitfield_extract(start, length, object) 	\
+    ((object) >> (HOSTWDSZ - (start) - (length)) & 	\
+    ((unsigned)-1 >> (HOSTWDSZ - (length))))
+
+#define Bitfield_signed_extract(start, length, object) \
+    ((int)((object) << start) >> (HOSTWDSZ - (length)))
+
+#define Bitfield_mask(start, len, object)		\
+    ((object) & (((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len)))
+
+#define Bitfield_deposit(value,start,len,object)  object = \
+    ((object) & ~(((unsigned)-1 >> (HOSTWDSZ-len)) << (HOSTWDSZ-start-len))) | \
+    (((value) & ((unsigned)-1 >> (HOSTWDSZ-len))) << (HOSTWDSZ-start-len))
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fpu.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpu.h
new file mode 100644
index 0000000..0af5c3c
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpu.h
@@ -0,0 +1,76 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ * 
+ *  File: 
+ *      @(#)	pa/fp/fpu.h		$Revision: 1.1 $
+ * 
+ *  Purpose:
+ *      <<please update with a synopis of the functionality provided by this file>>
+ * 
+ * 
+ * END_DESC  
+*/
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+#ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
+#define _MACHINE_FPU_INCLUDED
+
+#if 0
+#ifndef _SYS_STDSYMS_INCLUDED
+#    include <sys/stdsyms.h>
+#endif   /* _SYS_STDSYMS_INCLUDED  */
+#include  <machine/pdc/pdc_rqsts.h>
+#endif
+
+#define PA83_FPU_FLAG    0x00000001
+#define PA89_FPU_FLAG    0x00000002
+#define PA2_0_FPU_FLAG   0x00000010
+
+#define TIMEX_EXTEN_FLAG 0x00000004
+
+#define ROLEX_EXTEN_FLAG 0x00000008
+#define COPR_FP 	0x00000080	/* Floating point -- Coprocessor 0 */
+#define SFU_MPY_DIVIDE	0x00008000	/* Multiply/Divide __ SFU 0 */
+
+
+#define EM_FPU_TYPE_OFFSET 272
+
+/* version of EMULATION software for COPR,0,0 instruction */
+#define EMULATION_VERSION 4
+
+/*
+ * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is thorough the potential type field from the PDC_MODEL call.  The 
+ * following flags are used at assist this differeniation.
+ */
+
+#define ROLEX_POTENTIAL_KEY_FLAGS	PDC_MODEL_CPU_KEY_WORD_TO_IO
+#define TIMEX_POTENTIAL_KEY_FLAGS	(PDC_MODEL_CPU_KEY_QUAD_STORE | \
+					 PDC_MODEL_CPU_KEY_RECIP_SQRT)
+
+
+#endif /* ! _MACHINE_FPU_INCLUDED */
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/fpudispatch.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpudispatch.c
new file mode 100644
index 0000000..18df123
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/fpudispatch.c
@@ -0,0 +1,1443 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/fp/fpudispatch.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	<<please update with a synopsis of the functionality provided by this file>>
+ *
+ *  External Interfaces:
+ *	<<the following list was autogenerated, please review>>
+ *	emfpudispatch(ir, dummy1, dummy2, fpregs)
+ *	fpudispatch(ir, excp_code, holder, fpregs)
+ *
+ *  Internal Interfaces:
+ *	<<the following list was autogenerated, please review>>
+ *	static u_int decode_06(u_int, u_int *)
+ *	static u_int decode_0c(u_int, u_int, u_int, u_int *)
+ *	static u_int decode_0e(u_int, u_int, u_int, u_int *)
+ *	static u_int decode_26(u_int, u_int *)
+ *	static u_int decode_2e(u_int, u_int *)
+ *	static void update_status_cbit(u_int *, u_int, u_int, u_int)
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+#define FPUDEBUG 0
+
+#include "float.h"
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <asm/processor.h>
+/* #include <sys/debug.h> */
+/* #include <machine/sys/mdep_private.h> */
+
+#define COPR_INST 0x30000000
+
+/*
+ * definition of extru macro.  If pos and len are constants, the compiler
+ * will generate an extru instruction when optimized
+ */
+#define extru(r,pos,len)	(((r) >> (31-(pos))) & (( 1 << (len)) - 1))
+/* definitions of bit field locations in the instruction */
+#define fpmajorpos 5
+#define fpr1pos	10
+#define fpr2pos 15
+#define fptpos	31
+#define fpsubpos 18
+#define fpclass1subpos 16
+#define fpclasspos 22
+#define fpfmtpos 20
+#define fpdfpos 18
+#define fpnulpos 26
+/*
+ * the following are the extra bits for the 0E major op
+ */
+#define fpxr1pos 24
+#define fpxr2pos 19
+#define fpxtpos 25
+#define fpxpos 23
+#define fp0efmtpos 20
+/*
+ * the following are for the multi-ops
+ */
+#define fprm1pos 10
+#define fprm2pos 15
+#define fptmpos 31
+#define fprapos 25
+#define fptapos 20
+#define fpmultifmt 26
+/*
+ * the following are for the fused FP instructions
+ */
+     /* fprm1pos 10 */
+     /* fprm2pos 15 */
+#define fpraupos 18
+#define fpxrm2pos 19
+     /* fpfmtpos 20 */
+#define fpralpos 23
+#define fpxrm1pos 24
+     /* fpxtpos 25 */
+#define fpfusedsubop 26
+     /* fptpos	31 */
+
+/*
+ * offset to constant zero in the FP emulation registers
+ */
+#define fpzeroreg (32*sizeof(double)/sizeof(u_int))
+
+/*
+ * extract the major opcode from the instruction
+ */
+#define get_major(op) extru(op,fpmajorpos,6)
+/*
+ * extract the two bit class field from the FP instruction. The class is at bit
+ * positions 21-22
+ */
+#define get_class(op) extru(op,fpclasspos,2)
+/*
+ * extract the 3 bit subop field.  For all but class 1 instructions, it is
+ * located at bit positions 16-18
+ */
+#define get_subop(op) extru(op,fpsubpos,3)
+/*
+ * extract the 2 or 3 bit subop field from class 1 instructions.  It is located
+ * at bit positions 15-16 (PA1.1) or 14-16 (PA2.0)
+ */
+#define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2)	/* PA89 (1.1) fmt */
+#define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3)	/* PA 2.0 fmt */
+
+/* definitions of unimplemented exceptions */
+#define MAJOR_0C_EXCP	0x09
+#define MAJOR_0E_EXCP	0x0b
+#define MAJOR_06_EXCP	0x03
+#define MAJOR_26_EXCP	0x23
+#define MAJOR_2E_EXCP	0x2b
+#define PA83_UNIMP_EXCP	0x01
+
+/*
+ * Special Defines for TIMEX specific code
+ */
+
+#define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2)
+#define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG)
+
+/*
+ * Static function definitions
+ */
+#define _PROTOTYPES
+#if defined(_PROTOTYPES) || defined(_lint)
+static u_int decode_0c(u_int, u_int, u_int, u_int *);
+static u_int decode_0e(u_int, u_int, u_int, u_int *);
+static u_int decode_06(u_int, u_int *);
+static u_int decode_26(u_int, u_int *);
+static u_int decode_2e(u_int, u_int *);
+static void update_status_cbit(u_int *, u_int, u_int, u_int);
+#else /* !_PROTOTYPES&&!_lint */
+static u_int decode_0c();
+static u_int decode_0e();
+static u_int decode_06();
+static u_int decode_26();
+static u_int decode_2e();
+static void update_status_cbit();
+#endif /* _PROTOTYPES&&!_lint */
+
+#define VASSERT(x)
+
+static void parisc_linux_get_fpu_type(u_int fpregs[])
+{
+	/* on pa-linux the fpu type is not filled in by the
+	 * caller; it is constructed here  
+	 */ 
+	if (boot_cpu_data.cpu_type == pcxs)
+		fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG;
+	else if (boot_cpu_data.cpu_type == pcxt ||
+	         boot_cpu_data.cpu_type == pcxt_)
+		fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG;
+	else if (boot_cpu_data.cpu_type >= pcxu)
+		fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG;
+}
+
+/*
+ * this routine will decode the excepting floating point instruction and
+ * call the appropriate emulation routine.
+ * It is called by decode_fpu with the following parameters:
+ * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register)
+ * where current_ir is the instruction to be emulated,
+ * unimplemented_code is the exception_code that the hardware generated
+ * and &Fpu_register is the address of emulated FP reg 0.
+ */
+u_int
+fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[])
+{
+	u_int class, subop;
+	u_int fpu_type_flags;
+
+	/* All FP emulation code assumes that ints are 4-bytes in length */
+	VASSERT(sizeof(int) == 4);
+
+	parisc_linux_get_fpu_type(fpregs);
+
+	fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];  /* get fpu type flags */
+
+	class = get_class(ir);
+	if (class == 1) {
+		if  (fpu_type_flags & PA2_0_FPU_FLAG)
+			subop = get_subop1_PA2_0(ir);
+		else
+			subop = get_subop1_PA1_1(ir);
+	}
+	else
+		subop = get_subop(ir);
+
+	if (FPUDEBUG) printk("class %d subop %d\n", class, subop);
+
+	switch (excp_code) {
+		case MAJOR_0C_EXCP:
+		case PA83_UNIMP_EXCP:
+			return(decode_0c(ir,class,subop,fpregs));
+		case MAJOR_0E_EXCP:
+			return(decode_0e(ir,class,subop,fpregs));
+		case MAJOR_06_EXCP:
+			return(decode_06(ir,fpregs));
+		case MAJOR_26_EXCP:
+			return(decode_26(ir,fpregs));
+		case MAJOR_2E_EXCP:
+			return(decode_2e(ir,fpregs));
+		default:
+			/* "crashme Night Gallery painting nr 2. (asm_crash.s).
+			 * This was fixed for multi-user kernels, but
+			 * workstation kernels had a panic here.  This allowed
+			 * any arbitrary user to panic the kernel by executing
+			 * setting the FP exception registers to strange values
+			 * and generating an emulation trap.  The emulation and
+			 * exception code must never be able to panic the
+			 * kernel.
+			 */
+			return(UNIMPLEMENTEDEXCEPTION);
+	}
+}
+
+/*
+ * this routine is called by $emulation_trap to emulate a coprocessor
+ * instruction if one doesn't exist
+ */
+u_int
+emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[])
+{
+	u_int class, subop, major;
+	u_int fpu_type_flags;
+
+	/* All FP emulation code assumes that ints are 4-bytes in length */
+	VASSERT(sizeof(int) == 4);
+
+	fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];  /* get fpu type flags */
+
+	major = get_major(ir);
+	class = get_class(ir);
+	if (class == 1) {
+		if  (fpu_type_flags & PA2_0_FPU_FLAG)
+			subop = get_subop1_PA2_0(ir);
+		else
+			subop = get_subop1_PA1_1(ir);
+	}
+	else
+		subop = get_subop(ir);
+	switch (major) {
+		case 0x0C:
+			return(decode_0c(ir,class,subop,fpregs));
+		case 0x0E:
+			return(decode_0e(ir,class,subop,fpregs));
+		case 0x06:
+			return(decode_06(ir,fpregs));
+		case 0x26:
+			return(decode_26(ir,fpregs));
+		case 0x2E:
+			return(decode_2e(ir,fpregs));
+		default:
+			return(PA83_UNIMP_EXCP);
+	}
+}
+	
+
+static u_int
+decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
+{
+	u_int r1,r2,t;		/* operand register offsets */ 
+	u_int fmt;		/* also sf for class 1 conversions */
+	u_int  df;		/* for class 1 conversions */
+	u_int *status;
+	u_int retval, local_status;
+	u_int fpu_type_flags;
+
+	if (ir == COPR_INST) {
+		fpregs[0] = EMULATION_VERSION << 11;
+		return(NOEXCEPTION);
+	}
+	status = &fpregs[0];	/* fp status register */
+	local_status = fpregs[0]; /* and local copy */
+	r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int);
+	if (r1 == 0)		/* map fr0 source to constant zero */
+		r1 = fpzeroreg;
+	t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
+	if (t == 0 && class != 2)	/* don't allow fr0 as a dest */
+		return(MAJOR_0C_EXCP);
+	fmt = extru(ir,fpfmtpos,2);	/* get fmt completer */
+
+	switch (class) {
+	    case 0:
+		switch (subop) {
+			case 0:	/* COPR 0,0 emulated above*/
+			case 1:
+				return(MAJOR_0C_EXCP);
+			case 2:	/* FCPY */
+				switch (fmt) {
+				    case 2: /* illegal */
+					return(MAJOR_0C_EXCP);
+				    case 3: /* quad */
+					t &= ~3;  /* force to even reg #s */
+					r1 &= ~3;
+					fpregs[t+3] = fpregs[r1+3];
+					fpregs[t+2] = fpregs[r1+2];
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					fpregs[t] = fpregs[r1];
+					return(NOEXCEPTION);
+				}
+			case 3: /* FABS */
+				switch (fmt) {
+				    case 2: /* illegal */
+					return(MAJOR_0C_EXCP);
+				    case 3: /* quad */
+					t &= ~3;  /* force to even reg #s */
+					r1 &= ~3;
+					fpregs[t+3] = fpregs[r1+3];
+					fpregs[t+2] = fpregs[r1+2];
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					/* copy and clear sign bit */
+					fpregs[t] = fpregs[r1] & 0x7fffffff;
+					return(NOEXCEPTION);
+				}
+			case 6: /* FNEG */
+				switch (fmt) {
+				    case 2: /* illegal */
+					return(MAJOR_0C_EXCP);
+				    case 3: /* quad */
+					t &= ~3;  /* force to even reg #s */
+					r1 &= ~3;
+					fpregs[t+3] = fpregs[r1+3];
+					fpregs[t+2] = fpregs[r1+2];
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					/* copy and invert sign bit */
+					fpregs[t] = fpregs[r1] ^ 0x80000000;
+					return(NOEXCEPTION);
+				}
+			case 7: /* FNEGABS */
+				switch (fmt) {
+				    case 2: /* illegal */
+					return(MAJOR_0C_EXCP);
+				    case 3: /* quad */
+					t &= ~3;  /* force to even reg #s */
+					r1 &= ~3;
+					fpregs[t+3] = fpregs[r1+3];
+					fpregs[t+2] = fpregs[r1+2];
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					/* copy and set sign bit */
+					fpregs[t] = fpregs[r1] | 0x80000000;
+					return(NOEXCEPTION);
+				}
+			case 4: /* FSQRT */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fsqrt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fsqrt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2:
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 5: /* FRND */
+				switch (fmt) {
+				    case 0:
+					return(sgl_frnd(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_frnd(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2:
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+		} /* end of switch (subop) */
+
+	case 1: /* class 1 */
+		df = extru(ir,fpdfpos,2); /* get dest format */
+		if ((df & 2) || (fmt & 2)) {
+			/*
+			 * fmt's 2 and 3 are illegal of not implemented
+			 * quad conversions
+			 */
+			return(MAJOR_0C_EXCP);
+		}
+		/*
+		 * encode source and dest formats into 2 bits.
+		 * high bit is source, low bit is dest.
+		 * bit = 1 --> double precision
+		 */
+		fmt = (fmt << 1) | df;
+		switch (subop) {
+			case 0: /* FCNVFF */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(MAJOR_0C_EXCP);
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(MAJOR_0C_EXCP);
+				}
+			case 1: /* FCNVXF */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 2: /* FCNVFX */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 3: /* FCNVFXT */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 5: /* FCNVUF (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 6: /* FCNVFU (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 7: /* FCNVFUT (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 4: /* undefined */
+				return(MAJOR_0C_EXCP);
+		} /* end of switch subop */
+
+	case 2: /* class 2 */
+		fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
+		r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
+		if (r2 == 0)
+			r2 = fpzeroreg;
+		if  (fpu_type_flags & PA2_0_FPU_FLAG) {
+			/* FTEST if nullify bit set, otherwise FCMP */
+			if (extru(ir, fpnulpos, 1)) {  /* FTEST */
+				switch (fmt) {
+				    case 0:
+					/*
+					 * arg0 is not used
+					 * second param is the t field used for
+					 * ftest,acc and ftest,rej
+					 * third param is the subop (y-field)
+					 */
+					BUG();
+					/* Unsupported
+					 * return(ftest(0L,extru(ir,fptpos,5),
+					 *	 &fpregs[0],subop));
+					 */
+				    case 1:
+				    case 2:
+				    case 3:
+					return(MAJOR_0C_EXCP);
+				}
+			} else {  /* FCMP */
+				switch (fmt) {
+				    case 0:
+					retval = sgl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 1:
+					retval = dbl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			}
+		}  /* end of if for PA2.0 */
+		else {	/* PA1.0 & PA1.1 */
+		    switch (subop) {
+			case 2:
+			case 3:
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				return(MAJOR_0C_EXCP);
+			case 0: /* FCMP */
+				switch (fmt) {
+				    case 0:
+					retval = sgl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 1:
+					retval = dbl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 1: /* FTEST */
+				switch (fmt) {
+				    case 0:
+					/*
+					 * arg0 is not used
+					 * second param is the t field used for
+					 * ftest,acc and ftest,rej
+					 * third param is the subop (y-field)
+					 */
+					BUG();
+					/* unsupported
+					 * return(ftest(0L,extru(ir,fptpos,5),
+					 *     &fpregs[0],subop));
+					 */
+				    case 1:
+				    case 2:
+				    case 3:
+					return(MAJOR_0C_EXCP);
+				}
+		    } /* end of switch subop */
+		} /* end of else for PA1.0 & PA1.1 */
+	case 3: /* class 3 */
+		r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
+		if (r2 == 0)
+			r2 = fpzeroreg;
+		switch (subop) {
+			case 5:
+			case 6:
+			case 7:
+				return(MAJOR_0C_EXCP);
+			
+			case 0: /* FADD */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fadd(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fadd(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 1: /* FSUB */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fsub(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fsub(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 2: /* FMPY */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fmpy(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fmpy(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 3: /* FDIV */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+			case 4: /* FREM */
+				switch (fmt) {
+				    case 0:
+					return(sgl_frem(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_frem(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 2: /* illegal */
+				    case 3: /* quad not implemented */
+					return(MAJOR_0C_EXCP);
+				}
+		} /* end of class 3 switch */
+	} /* end of switch(class) */
+
+	/* If we get here, something is really wrong! */
+	return(MAJOR_0C_EXCP);
+}
+
+static u_int
+decode_0e(ir,class,subop,fpregs)
+u_int ir,class,subop;
+u_int fpregs[];
+{
+	u_int r1,r2,t;		/* operand register offsets */
+	u_int fmt;		/* also sf for class 1 conversions */
+	u_int df;		/* dest format for class 1 conversions */
+	u_int *status;
+	u_int retval, local_status;
+	u_int fpu_type_flags;
+
+	status = &fpregs[0];
+	local_status = fpregs[0];
+	r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1)));
+	if (r1 == 0)
+		r1 = fpzeroreg;
+	t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
+	if (t == 0 && class != 2)
+		return(MAJOR_0E_EXCP);
+	if (class < 2)		/* class 0 or 1 has 2 bit fmt */
+		fmt = extru(ir,fpfmtpos,2);
+	else 			/* class 2 and 3 have 1 bit fmt */
+		fmt = extru(ir,fp0efmtpos,1);
+	/*
+	 * An undefined combination, double precision accessing the
+	 * right half of a FPR, can get us into trouble.  
+	 * Let's just force proper alignment on it.
+	 */
+	if (fmt == DBL) {
+		r1 &= ~1;
+		if (class != 1)
+			t &= ~1;
+	}
+
+	switch (class) {
+	    case 0:
+		switch (subop) {
+			case 0: /* unimplemented */
+			case 1:
+				return(MAJOR_0E_EXCP);
+			case 2: /* FCPY */
+				switch (fmt) {
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					fpregs[t] = fpregs[r1];
+					return(NOEXCEPTION);
+				}
+			case 3: /* FABS */
+				switch (fmt) {
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					fpregs[t] = fpregs[r1] & 0x7fffffff;
+					return(NOEXCEPTION);
+				}
+			case 6: /* FNEG */
+				switch (fmt) {
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					fpregs[t] = fpregs[r1] ^ 0x80000000;
+					return(NOEXCEPTION);
+				}
+			case 7: /* FNEGABS */
+				switch (fmt) {
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				    case 1: /* double */
+					fpregs[t+1] = fpregs[r1+1];
+				    case 0: /* single */
+					fpregs[t] = fpregs[r1] | 0x80000000;
+					return(NOEXCEPTION);
+				}
+			case 4: /* FSQRT */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fsqrt(&fpregs[r1],0,
+						&fpregs[t], status));
+				    case 1:
+					return(dbl_fsqrt(&fpregs[r1],0,
+						&fpregs[t], status));
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				}
+			case 5: /* FRMD */
+				switch (fmt) {
+				    case 0:
+					return(sgl_frnd(&fpregs[r1],0,
+						&fpregs[t], status));
+				    case 1:
+					return(dbl_frnd(&fpregs[r1],0,
+						&fpregs[t], status));
+				    case 2:
+				    case 3:
+					return(MAJOR_0E_EXCP);
+				}
+		} /* end of switch (subop */
+	
+	case 1: /* class 1 */
+		df = extru(ir,fpdfpos,2); /* get dest format */
+		/*
+		 * Fix Crashme problem (writing to 31R in double precision)
+		 * here too.
+		 */
+		if (df == DBL) {
+			t &= ~1;
+		}
+		if ((df & 2) || (fmt & 2))
+			return(MAJOR_0E_EXCP);
+		
+		fmt = (fmt << 1) | df;
+		switch (subop) {
+			case 0: /* FCNVFF */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(MAJOR_0E_EXCP);
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(MAJOR_0E_EXCP);
+				}
+			case 1: /* FCNVXF */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 2: /* FCNVFX */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 3: /* FCNVFXT */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 5: /* FCNVUF (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 6: /* FCNVFU (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 7: /* FCNVFUT (PA2.0 only) */
+				switch(fmt) {
+				    case 0: /* sgl/sgl */
+					return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 1: /* sgl/dbl */
+					return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 2: /* dbl/sgl */
+					return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				    case 3: /* dbl/dbl */
+					return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
+						&fpregs[t],status));
+				}
+			case 4: /* undefined */
+				return(MAJOR_0C_EXCP);
+		} /* end of switch subop */
+	case 2: /* class 2 */
+		/*
+		 * Be careful out there.
+		 * Crashme can generate cases where FR31R is specified
+		 * as the source or target of a double precision operation.
+		 * Since we just pass the address of the floating-point
+		 * register to the emulation routines, this can cause
+		 * corruption of fpzeroreg.
+		 */
+		if (fmt == DBL)
+			r2 = (extru(ir,fpr2pos,5)<<1);
+		else
+			r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
+		fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
+		if (r2 == 0)
+			r2 = fpzeroreg;
+		if  (fpu_type_flags & PA2_0_FPU_FLAG) {
+			/* FTEST if nullify bit set, otherwise FCMP */
+			if (extru(ir, fpnulpos, 1)) {  /* FTEST */
+				/* not legal */
+				return(MAJOR_0E_EXCP);
+			} else {  /* FCMP */
+			switch (fmt) {
+				    /*
+				     * fmt is only 1 bit long
+				     */
+				    case 0:
+					retval = sgl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 1:
+					retval = dbl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				}
+			}
+		}  /* end of if for PA2.0 */
+		else {  /* PA1.0 & PA1.1 */
+		    switch (subop) {
+			case 1:
+			case 2:
+			case 3:
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+				return(MAJOR_0E_EXCP);
+			case 0: /* FCMP */
+				switch (fmt) {
+				    /*
+				     * fmt is only 1 bit long
+				     */
+				    case 0:
+					retval = sgl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				    case 1:
+					retval = dbl_fcmp(&fpregs[r1],
+						&fpregs[r2],extru(ir,fptpos,5),
+						&local_status);
+					update_status_cbit(status,local_status,
+						fpu_type_flags, subop);
+					return(retval);
+				}
+		    } /* end of switch subop */
+		} /* end of else for PA1.0 & PA1.1 */
+	case 3: /* class 3 */
+		/*
+		 * Be careful out there.
+		 * Crashme can generate cases where FR31R is specified
+		 * as the source or target of a double precision operation.
+		 * Since we just pass the address of the floating-point
+		 * register to the emulation routines, this can cause
+		 * corruption of fpzeroreg.
+		 */
+		if (fmt == DBL)
+			r2 = (extru(ir,fpr2pos,5)<<1);
+		else
+			r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
+		if (r2 == 0)
+			r2 = fpzeroreg;
+		switch (subop) {
+			case 5:
+			case 6:
+			case 7:
+				return(MAJOR_0E_EXCP);
+			
+			/*
+			 * Note that fmt is only 1 bit for class 3 */
+			case 0: /* FADD */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fadd(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fadd(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				}
+			case 1: /* FSUB */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fsub(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fsub(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				}
+			case 2: /* FMPY or XMPYU */
+				/*
+				 * check for integer multiply (x bit set)
+				 */
+				if (extru(ir,fpxpos,1)) {
+				    /*
+				     * emulate XMPYU
+				     */
+				    switch (fmt) {
+					case 0:
+					    /*
+					     * bad instruction if t specifies
+					     * the right half of a register
+					     */
+					    if (t & 1)
+						return(MAJOR_0E_EXCP);
+					    BUG();
+					    /* unsupported
+					     * impyu(&fpregs[r1],&fpregs[r2],
+						 * &fpregs[t]);
+					     */
+					    return(NOEXCEPTION);
+					case 1:
+						return(MAJOR_0E_EXCP);
+				    }
+				}
+				else { /* FMPY */
+				    switch (fmt) {
+				        case 0:
+					    return(sgl_fmpy(&fpregs[r1],
+					       &fpregs[r2],&fpregs[t],status));
+				        case 1:
+					    return(dbl_fmpy(&fpregs[r1],
+					       &fpregs[r2],&fpregs[t],status));
+				    }
+				}
+			case 3: /* FDIV */
+				switch (fmt) {
+				    case 0:
+					return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				}
+			case 4: /* FREM */
+				switch (fmt) {
+				    case 0:
+					return(sgl_frem(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				    case 1:
+					return(dbl_frem(&fpregs[r1],&fpregs[r2],
+						&fpregs[t],status));
+				}
+		} /* end of class 3 switch */
+	} /* end of switch(class) */
+
+	/* If we get here, something is really wrong! */
+	return(MAJOR_0E_EXCP);
+}
+
+
+/*
+ * routine to decode the 06 (FMPYADD and FMPYCFXT) instruction
+ */
+static u_int
+decode_06(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+	u_int rm1, rm2, tm, ra, ta; /* operands */
+	u_int fmt;
+	u_int error = 0;
+	u_int status;
+	u_int fpu_type_flags;
+	union {
+		double dbl;
+		float flt;
+		struct { u_int i1; u_int i2; } ints;
+	} mtmp, atmp;
+
+
+	status = fpregs[0];		/* use a local copy of status reg */
+	fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];  /* get fpu type flags */
+	fmt = extru(ir, fpmultifmt, 1);	/* get sgl/dbl flag */
+	if (fmt == 0) { /* DBL */
+		rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
+		if (rm1 == 0)
+			rm1 = fpzeroreg;
+		rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
+		if (rm2 == 0)
+			rm2 = fpzeroreg;
+		tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
+		if (tm == 0)
+			return(MAJOR_06_EXCP);
+		ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
+		ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
+		if (ta == 0)
+			return(MAJOR_06_EXCP);
+
+		if  (fpu_type_flags & TIMEX_ROLEX_FPU_MASK)  {
+
+			if (ra == 0) {
+			 	/* special case FMPYCFXT, see sgl case below */
+				if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],
+					&mtmp.ints.i1,&status))
+					error = 1;
+				if (dbl_to_sgl_fcnvfxt(&fpregs[ta],
+					&atmp.ints.i1,&atmp.ints.i1,&status))
+					error = 1;
+				}
+			else {
+
+			if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+					&status))
+				error = 1;
+			if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+					&status))
+				error = 1;
+				}
+			}
+
+		else
+
+			{
+			if (ra == 0)
+				ra = fpzeroreg;
+
+			if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+					&status))
+				error = 1;
+			if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+					&status))
+				error = 1;
+
+			}
+
+		if (error)
+			return(MAJOR_06_EXCP);
+		else {
+			/* copy results */
+			fpregs[tm] = mtmp.ints.i1;
+			fpregs[tm+1] = mtmp.ints.i2;
+			fpregs[ta] = atmp.ints.i1;
+			fpregs[ta+1] = atmp.ints.i2;
+			fpregs[0] = status;
+			return(NOEXCEPTION);
+		}
+	}
+	else { /* SGL */
+		/*
+		 * calculate offsets for single precision numbers
+		 * See table 6-14 in PA-89 architecture for mapping
+		 */
+		rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1;	/* get offset */
+		rm1 |= extru(ir,fprm1pos-4,1);	/* add right word offset */
+
+		rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1;	/* get offset */
+		rm2 |= extru(ir,fprm2pos-4,1);	/* add right word offset */
+
+		tm = (extru(ir,fptmpos,4) | 0x10 ) << 1;	/* get offset */
+		tm |= extru(ir,fptmpos-4,1);	/* add right word offset */
+
+		ra = (extru(ir,fprapos,4) | 0x10 ) << 1;	/* get offset */
+		ra |= extru(ir,fprapos-4,1);	/* add right word offset */
+
+		ta = (extru(ir,fptapos,4) | 0x10 ) << 1;	/* get offset */
+		ta |= extru(ir,fptapos-4,1);	/* add right word offset */
+		
+		if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) {
+			/* special case FMPYCFXT (really 0)
+			  * This instruction is only present on the Timex and
+			  * Rolex fpu's in so if it is the special case and
+			  * one of these fpu's we run the FMPYCFXT instruction
+			  */
+			if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+					&status))
+				error = 1;
+			if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1,
+				&atmp.ints.i1,&status))
+				error = 1;
+		}
+		else {
+			if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
+					&status))
+				error = 1;
+			if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
+					&status))
+				error = 1;
+		}
+		if (error)
+			return(MAJOR_06_EXCP);
+		else {
+			/* copy results */
+			fpregs[tm] = mtmp.ints.i1;
+			fpregs[ta] = atmp.ints.i1;
+			fpregs[0] = status;
+			return(NOEXCEPTION);
+		}
+	}
+}
+
+/*
+ * routine to decode the 26 (FMPYSUB) instruction
+ */
+static u_int
+decode_26(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+	u_int rm1, rm2, tm, ra, ta; /* operands */
+	u_int fmt;
+	u_int error = 0;
+	u_int status;
+	union {
+		double dbl;
+		float flt;
+		struct { u_int i1; u_int i2; } ints;
+	} mtmp, atmp;
+
+
+	status = fpregs[0];
+	fmt = extru(ir, fpmultifmt, 1);	/* get sgl/dbl flag */
+	if (fmt == 0) { /* DBL */
+		rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
+		if (rm1 == 0)
+			rm1 = fpzeroreg;
+		rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
+		if (rm2 == 0)
+			rm2 = fpzeroreg;
+		tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
+		if (tm == 0)
+			return(MAJOR_26_EXCP);
+		ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
+		if (ra == 0)
+			return(MAJOR_26_EXCP);
+		ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
+		if (ta == 0)
+			return(MAJOR_26_EXCP);
+		
+		if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
+			error = 1;
+		if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
+			error = 1;
+		if (error)
+			return(MAJOR_26_EXCP);
+		else {
+			/* copy results */
+			fpregs[tm] = mtmp.ints.i1;
+			fpregs[tm+1] = mtmp.ints.i2;
+			fpregs[ta] = atmp.ints.i1;
+			fpregs[ta+1] = atmp.ints.i2;
+			fpregs[0] = status;
+			return(NOEXCEPTION);
+		}
+	}
+	else { /* SGL */
+		/*
+		 * calculate offsets for single precision numbers
+		 * See table 6-14 in PA-89 architecture for mapping
+		 */
+		rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1;	/* get offset */
+		rm1 |= extru(ir,fprm1pos-4,1);	/* add right word offset */
+
+		rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1;	/* get offset */
+		rm2 |= extru(ir,fprm2pos-4,1);	/* add right word offset */
+
+		tm = (extru(ir,fptmpos,4) | 0x10 ) << 1;	/* get offset */
+		tm |= extru(ir,fptmpos-4,1);	/* add right word offset */
+
+		ra = (extru(ir,fprapos,4) | 0x10 ) << 1;	/* get offset */
+		ra |= extru(ir,fprapos-4,1);	/* add right word offset */
+
+		ta = (extru(ir,fptapos,4) | 0x10 ) << 1;	/* get offset */
+		ta |= extru(ir,fptapos-4,1);	/* add right word offset */
+		
+		if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
+			error = 1;
+		if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
+			error = 1;
+		if (error)
+			return(MAJOR_26_EXCP);
+		else {
+			/* copy results */
+			fpregs[tm] = mtmp.ints.i1;
+			fpregs[ta] = atmp.ints.i1;
+			fpregs[0] = status;
+			return(NOEXCEPTION);
+		}
+	}
+
+}
+
+/*
+ * routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions
+ */
+static u_int
+decode_2e(ir,fpregs)
+u_int ir;
+u_int fpregs[];
+{
+	u_int rm1, rm2, ra, t; /* operands */
+	u_int fmt;
+
+	fmt = extru(ir,fpfmtpos,1);	/* get fmt completer */
+	if (fmt == DBL) { /* DBL */
+		rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int);
+		if (rm1 == 0)
+			rm1 = fpzeroreg;
+		rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int);
+		if (rm2 == 0)
+			rm2 = fpzeroreg;
+		ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) *
+		     sizeof(double)/sizeof(u_int);
+		if (ra == 0)
+			ra = fpzeroreg;
+		t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
+		if (t == 0)
+			return(MAJOR_2E_EXCP);
+
+		if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
+			return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
+					&fpregs[ra], &fpregs[0], &fpregs[t]));
+		} else {
+			return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
+					&fpregs[ra], &fpregs[0], &fpregs[t]));
+		}
+	} /* end DBL */
+	else { /* SGL */
+		rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1));
+		if (rm1 == 0)
+			rm1 = fpzeroreg;
+		rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1));
+		if (rm2 == 0)
+			rm2 = fpzeroreg;
+		ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3);
+		if (ra == 0)
+			ra = fpzeroreg;
+		t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
+		if (t == 0)
+			return(MAJOR_2E_EXCP);
+
+		if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
+			return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
+					&fpregs[ra], &fpregs[0], &fpregs[t]));
+		} else {
+			return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
+					&fpregs[ra], &fpregs[0], &fpregs[t]));
+		}
+	} /* end SGL */
+}
+
+/*
+ * update_status_cbit
+ *
+ *	This routine returns the correct FP status register value in
+ *	*status, based on the C-bit & V-bit returned by the FCMP
+ *	emulation routine in new_status.  The architecture type
+ *	(PA83, PA89 or PA2.0) is available in fpu_type.  The y_field
+ *	and the architecture type are used to determine what flavor
+ *	of FCMP is being emulated.
+ */
+static void
+update_status_cbit(status, new_status, fpu_type, y_field)
+u_int *status, new_status;
+u_int fpu_type;
+u_int y_field;
+{
+	/*
+	 * For PA89 FPU's which implement the Compare Queue and
+	 * for PA2.0 FPU's, update the Compare Queue if the y-field = 0,
+	 * otherwise update the specified bit in the Compare Array.
+	 * Note that the y-field will always be 0 for non-PA2.0 FPU's.
+	 */
+	if ((fpu_type & TIMEX_EXTEN_FLAG) || 
+	    (fpu_type & ROLEX_EXTEN_FLAG) ||
+	    (fpu_type & PA2_0_FPU_FLAG)) {
+		if (y_field == 0) {
+			*status = ((*status & 0x04000000) >> 5) | /* old Cbit */
+				  ((*status & 0x003ff000) >> 1) | /* old CQ   */
+				  (new_status & 0xffc007ff); /* all other bits*/
+		} else {
+			*status = (*status & 0x04000000) |     /* old Cbit */
+				  ((new_status & 0x04000000) >> (y_field+4)) |
+				  (new_status & ~0x04000000 &  /* other bits */
+				   ~(0x04000000 >> (y_field+4)));
+		}
+	}
+	/* if PA83, just update the C-bit */
+	else {
+		*status = new_status;
+	}
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/frnd.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/frnd.c
new file mode 100644
index 0000000..904b384
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/frnd.c
@@ -0,0 +1,252 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  Purpose:
+ *	Single Floating-point Round to Integer
+ *	Double Floating-point Round to Integer
+ *	Quad Floating-point Round to Integer (returns unimplemented)
+ *
+ *  External Interfaces:
+ *	dbl_frnd(srcptr,nullptr,dstptr,status)
+ *	sgl_frnd(srcptr,nullptr,dstptr,status)
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+#include "dbl_float.h"
+#include "cnv_float.h"
+
+/*
+ *  Single Floating-point Round to Integer
+ */
+
+/*ARGSUSED*/
+int
+sgl_frnd(sgl_floating_point *srcptr,
+	unsigned int *nullptr,
+	sgl_floating_point *dstptr,
+	unsigned int *status)
+{
+	register unsigned int src, result;
+	register int src_exponent;
+	register boolean inexact = FALSE;
+
+	src = *srcptr;
+        /*
+         * check source operand for NaN or infinity
+         */
+        if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
+                /*
+                 * is signaling NaN?
+                 */
+                if (Sgl_isone_signaling(src)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Sgl_set_quiet(src);
+                }
+                /*
+                 * return quiet NaN or infinity
+                 */
+                *dstptr = src;
+                return(NOEXCEPTION);
+        }
+	/* 
+	 * Need to round?
+	 */
+	if ((src_exponent -= SGL_BIAS) >= SGL_P - 1) {
+		*dstptr = src;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		Sgl_clear_exponent_set_hidden(src);
+		result = src;
+		Sgl_rightshift(result,(SGL_P-1) - (src_exponent));
+		/* check for inexact */
+		if (Sgl_isinexact_to_fix(src,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Sgl_iszero_sign(src)) Sgl_increment(result);
+			     break;
+			case ROUNDMINUS:
+			     if (Sgl_isone_sign(src)) Sgl_increment(result);
+			     break;
+			case ROUNDNEAREST:
+			     if (Sgl_isone_roundbit(src,src_exponent))
+			        if (Sgl_isone_stickybit(src,src_exponent) 
+				|| (Sgl_isone_lowmantissa(result))) 
+					Sgl_increment(result);
+			} 
+		}
+		Sgl_leftshift(result,(SGL_P-1) - (src_exponent));
+		if (Sgl_isone_hiddenoverflow(result)) 
+			Sgl_set_exponent(result,src_exponent + (SGL_BIAS+1));
+		else Sgl_set_exponent(result,src_exponent + SGL_BIAS);
+	}
+	else {
+		result = src;  		/* set sign */
+		Sgl_setzero_exponentmantissa(result);
+		/* check for inexact */
+		if (Sgl_isnotzero_exponentmantissa(src)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Sgl_iszero_sign(src)) 
+				Sgl_set_exponent(result,SGL_BIAS);
+			     break;
+			case ROUNDMINUS:
+			     if (Sgl_isone_sign(src)) 
+				Sgl_set_exponent(result,SGL_BIAS);
+			     break;
+			case ROUNDNEAREST:
+			     if (src_exponent == -1)
+			        if (Sgl_isnotzero_mantissa(src))
+				   Sgl_set_exponent(result,SGL_BIAS);
+			} 
+		}
+	}
+	*dstptr = result;
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+} 
+
+/*
+ *  Double Floating-point Round to Integer
+ */
+
+/*ARGSUSED*/
+int
+dbl_frnd(
+	dbl_floating_point *srcptr,
+	unsigned int *nullptr,
+	dbl_floating_point *dstptr,
+	unsigned int *status)
+{
+	register unsigned int srcp1, srcp2, resultp1, resultp2;
+	register int src_exponent;
+	register boolean inexact = FALSE;
+
+	Dbl_copyfromptr(srcptr,srcp1,srcp2);
+        /*
+         * check source operand for NaN or infinity
+         */
+        if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
+                /*
+                 * is signaling NaN?
+                 */
+                if (Dbl_isone_signaling(srcp1)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Dbl_set_quiet(srcp1);
+                }
+                /*
+                 * return quiet NaN or infinity
+                 */
+                Dbl_copytoptr(srcp1,srcp2,dstptr);
+                return(NOEXCEPTION);
+        }
+	/* 
+	 * Need to round?
+	 */
+	if ((src_exponent -= DBL_BIAS) >= DBL_P - 1) {
+		Dbl_copytoptr(srcp1,srcp2,dstptr);
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate result
+	 */
+	if (src_exponent >= 0) {
+		Dbl_clear_exponent_set_hidden(srcp1);
+		resultp1 = srcp1;
+		resultp2 = srcp2;
+		Dbl_rightshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
+		/* check for inexact */
+		if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Dbl_iszero_sign(srcp1)) 
+				Dbl_increment(resultp1,resultp2);
+			     break;
+			case ROUNDMINUS:
+			     if (Dbl_isone_sign(srcp1)) 
+				Dbl_increment(resultp1,resultp2);
+			     break;
+			case ROUNDNEAREST:
+			     if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
+			      if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) 
+				  || (Dbl_isone_lowmantissap2(resultp2))) 
+					Dbl_increment(resultp1,resultp2);
+			} 
+		}
+		Dbl_leftshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
+		if (Dbl_isone_hiddenoverflow(resultp1))
+			Dbl_set_exponent(resultp1,src_exponent + (DBL_BIAS+1));
+		else Dbl_set_exponent(resultp1,src_exponent + DBL_BIAS);
+	}
+	else {
+		resultp1 = srcp1;  /* set sign */
+		Dbl_setzero_exponentmantissa(resultp1,resultp2);
+		/* check for inexact */
+		if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
+			inexact = TRUE;
+			/*  round result  */
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+			     if (Dbl_iszero_sign(srcp1)) 
+				Dbl_set_exponent(resultp1,DBL_BIAS);
+			     break;
+			case ROUNDMINUS:
+			     if (Dbl_isone_sign(srcp1)) 
+				Dbl_set_exponent(resultp1,DBL_BIAS);
+			     break;
+			case ROUNDNEAREST:
+			     if (src_exponent == -1)
+			        if (Dbl_isnotzero_mantissa(srcp1,srcp2))
+				   Dbl_set_exponent(resultp1,DBL_BIAS);
+			} 
+		}
+	}
+	Dbl_copytoptr(resultp1,resultp2,dstptr);
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/hppa.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/hppa.h
new file mode 100644
index 0000000..5d3d52f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/hppa.h
@@ -0,0 +1,42 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+
+/* amount is assumed to be a constant between 0 and 32 (non-inclusive) */
+#define Shiftdouble(left,right,amount,dest)			\
+    /* int left, right, amount, dest; */			\
+    dest = ((left) << (32-(amount))) | ((unsigned int)(right) >> (amount))
+
+/* amount must be less than 32 */
+#define Variableshiftdouble(left,right,amount,dest)		\
+    /* unsigned int left, right;  int amount, dest; */		\
+    if (amount == 0) dest = right;				\
+    else dest = ((((unsigned) left)&0x7fffffff) << (32-(amount))) |	\
+          ((unsigned) right >> (amount))
+
+/* amount must be between 0 and 32 (non-inclusive) */
+#define Variable_shift_double(left,right,amount,dest)		\
+    /* unsigned int left, right;  int amount, dest; */		\
+    dest = (left << (32-(amount))) | ((unsigned) right >> (amount))
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/math-emu.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/math-emu.h
new file mode 100644
index 0000000..3a99f59
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/math-emu.h
@@ -0,0 +1,27 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef _PARISC_MATH_EMU_H
+#define _PARISC_MATH_EMU_H
+
+#include <asm/ptrace.h>
+extern int handle_fpe(struct pt_regs *regs);
+
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfadd.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfadd.c
new file mode 100644
index 0000000..f802cd6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfadd.c
@@ -0,0 +1,518 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfadd.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single_add: add two single precision values.
+ *
+ *  External Interfaces:
+ *	sgl_fadd(leftptr, rightptr, dstptr, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single_add: add two single precision values.
+ */
+int
+sgl_fadd(
+    sgl_floating_point *leftptr,
+    sgl_floating_point *rightptr,
+    sgl_floating_point *dstptr,
+    unsigned int *status)
+    {
+    register unsigned int left, right, result, extent;
+    register unsigned int signless_upper_left, signless_upper_right, save;
+    
+    
+    register int result_exponent, right_exponent, diff_exponent;
+    register int sign_save, jumpsize;
+    register boolean inexact = FALSE;
+    register boolean underflowtrap;
+        
+    /* Create local copies of the numbers */
+    left = *leftptr;
+    right = *rightptr;
+
+    /* A zero "save" helps discover equal operands (for later),  *
+     * and is used in swapping operands (if needed).             */
+    Sgl_xortointp1(left,right,/*to*/save);
+
+    /*
+     * check first operand for NaN's or infinity
+     */
+    if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
+	{
+	if (Sgl_iszero_mantissa(left)) 
+	    {
+	    if (Sgl_isnotnan(right)) 
+		{
+		if (Sgl_isinfinity(right) && save!=0) 
+		    {
+		    /* 
+		     * invalid since operands are opposite signed infinity's
+		     */
+		    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                    Set_invalidflag();
+                    Sgl_makequietnan(result);
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		/*
+	 	 * return infinity
+	 	 */
+		*dstptr = left;
+		return(NOEXCEPTION);
+		}
+	    }
+	else 
+	    {
+            /*
+             * is NaN; signaling or quiet?
+             */
+            if (Sgl_isone_signaling(left)) 
+		{
+               	/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+        	/* make NaN quiet */
+        	Set_invalidflag();
+        	Sgl_set_quiet(left);
+        	}
+	    /* 
+	     * is second operand a signaling NaN? 
+	     */
+	    else if (Sgl_is_signalingnan(right)) 
+		{
+        	/* trap if INVALIDTRAP enabled */
+               	if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Sgl_set_quiet(right);
+		*dstptr = right;
+		return(NOEXCEPTION);
+		}
+	    /*
+ 	     * return quiet NaN
+ 	     */
+ 	    *dstptr = left;
+ 	    return(NOEXCEPTION);
+	    }
+	} /* End left NaN or Infinity processing */
+    /*
+     * check second operand for NaN's or infinity
+     */
+    if (Sgl_isinfinity_exponent(right)) 
+	{
+	if (Sgl_iszero_mantissa(right)) 
+	    {
+	    /* return infinity */
+	    *dstptr = right;
+	    return(NOEXCEPTION);
+	    }
+        /*
+         * is NaN; signaling or quiet?
+         */
+        if (Sgl_isone_signaling(right)) 
+	    {
+            /* trap if INVALIDTRAP enabled */
+	    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+	    /* make NaN quiet */
+	    Set_invalidflag();
+	    Sgl_set_quiet(right);
+	    }
+	/*
+	 * return quiet NaN
+ 	 */
+	*dstptr = right;
+	return(NOEXCEPTION);
+    	} /* End right NaN or Infinity processing */
+
+    /* Invariant: Must be dealing with finite numbers */
+
+    /* Compare operands by removing the sign */
+    Sgl_copytoint_exponentmantissa(left,signless_upper_left);
+    Sgl_copytoint_exponentmantissa(right,signless_upper_right);
+
+    /* sign difference selects add or sub operation. */
+    if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
+	{
+	/* Set the left operand to the larger one by XOR swap *
+	 *  First finish the first word using "save"          */
+	Sgl_xorfromintp1(save,right,/*to*/right);
+	Sgl_xorfromintp1(save,left,/*to*/left);
+	result_exponent = Sgl_exponent(left);
+	}
+    /* Invariant:  left is not smaller than right. */ 
+
+    if((right_exponent = Sgl_exponent(right)) == 0)
+        {
+	/* Denormalized operands.  First look for zeroes */
+	if(Sgl_iszero_mantissa(right)) 
+	    {
+	    /* right is zero */
+	    if(Sgl_iszero_exponentmantissa(left))
+		{
+		/* Both operands are zeros */
+		if(Is_rounding_mode(ROUNDMINUS))
+		    {
+		    Sgl_or_signs(left,/*with*/right);
+		    }
+		else
+		    {
+		    Sgl_and_signs(left,/*with*/right);
+		    }
+		}
+	    else 
+		{
+		/* Left is not a zero and must be the result.  Trapped
+		 * underflows are signaled if left is denormalized.  Result
+		 * is always exact. */
+		if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+		    {
+		    /* need to normalize results mantissa */
+	    	    sign_save = Sgl_signextendedsign(left);
+		    Sgl_leftshiftby1(left);
+		    Sgl_normalize(left,result_exponent);
+		    Sgl_set_sign(left,/*using*/sign_save);
+		    Sgl_setwrapped_exponent(left,result_exponent,unfl);
+		    *dstptr = left;
+		    return(UNDERFLOWEXCEPTION);
+		    }
+		}
+	    *dstptr = left;
+	    return(NOEXCEPTION);
+	    }
+
+	/* Neither are zeroes */
+	Sgl_clear_sign(right);	/* Exponent is already cleared */
+	if(result_exponent == 0 )
+	    {
+	    /* Both operands are denormalized.  The result must be exact
+	     * and is simply calculated.  A sum could become normalized and a
+	     * difference could cancel to a true zero. */
+	    if( (/*signed*/int) save < 0 )
+		{
+		Sgl_subtract(left,/*minus*/right,/*into*/result);
+		if(Sgl_iszero_mantissa(result))
+		    {
+		    if(Is_rounding_mode(ROUNDMINUS))
+			{
+			Sgl_setone_sign(result);
+			}
+		    else
+			{
+			Sgl_setzero_sign(result);
+			}
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		}
+	    else
+		{
+		Sgl_addition(left,right,/*into*/result);
+		if(Sgl_isone_hidden(result))
+		    {
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		}
+	    if(Is_underflowtrap_enabled())
+		{
+		/* need to normalize result */
+	    	sign_save = Sgl_signextendedsign(result);
+		Sgl_leftshiftby1(result);
+		Sgl_normalize(result,result_exponent);
+		Sgl_set_sign(result,/*using*/sign_save);
+                Sgl_setwrapped_exponent(result,result_exponent,unfl);
+		*dstptr = result;
+		return(UNDERFLOWEXCEPTION);
+		}
+	    *dstptr = result;
+	    return(NOEXCEPTION);
+	    }
+	right_exponent = 1;	/* Set exponent to reflect different bias
+				 * with denomalized numbers. */
+	}
+    else
+	{
+	Sgl_clear_signexponent_set_hidden(right);
+	}
+    Sgl_clear_exponent_set_hidden(left);
+    diff_exponent = result_exponent - right_exponent;
+
+    /* 
+     * Special case alignment of operands that would force alignment 
+     * beyond the extent of the extension.  A further optimization
+     * could special case this but only reduces the path length for this
+     * infrequent case.
+     */
+    if(diff_exponent > SGL_THRESHOLD)
+	{
+	diff_exponent = SGL_THRESHOLD;
+	}
+    
+    /* Align right operand by shifting to right */
+    Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
+     /*and lower to*/extent);
+
+    /* Treat sum and difference of the operands separately. */
+    if( (/*signed*/int) save < 0 )
+	{
+	/*
+	 * Difference of the two operands.  Their can be no overflow.  A
+	 * borrow can occur out of the hidden bit and force a post
+	 * normalization phase.
+	 */
+	Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
+	if(Sgl_iszero_hidden(result))
+	    {
+	    /* Handle normalization */
+	    /* A straightforward algorithm would now shift the result
+	     * and extension left until the hidden bit becomes one.  Not
+	     * all of the extension bits need participate in the shift.
+	     * Only the two most significant bits (round and guard) are
+	     * needed.  If only a single shift is needed then the guard
+	     * bit becomes a significant low order bit and the extension
+	     * must participate in the rounding.  If more than a single 
+	     * shift is needed, then all bits to the right of the guard 
+	     * bit are zeros, and the guard bit may or may not be zero. */
+	    sign_save = Sgl_signextendedsign(result);
+            Sgl_leftshiftby1_withextent(result,extent,result);
+
+            /* Need to check for a zero result.  The sign and exponent
+	     * fields have already been zeroed.  The more efficient test
+	     * of the full object can be used.
+	     */
+    	    if(Sgl_iszero(result))
+		/* Must have been "x-x" or "x+(-x)". */
+		{
+		if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
+		*dstptr = result;
+		return(NOEXCEPTION);
+		}
+	    result_exponent--;
+	    /* Look to see if normalization is finished. */
+	    if(Sgl_isone_hidden(result))
+		{
+		if(result_exponent==0)
+		    {
+		    /* Denormalized, exponent should be zero.  Left operand *
+ 		     * was normalized, so extent (guard, round) was zero    */
+		    goto underflow;
+		    }
+		else
+		    {
+		    /* No further normalization is needed. */
+		    Sgl_set_sign(result,/*using*/sign_save);
+	    	    Ext_leftshiftby1(extent);
+		    goto round;
+		    }
+		}
+
+	    /* Check for denormalized, exponent should be zero.  Left    * 
+	     * operand was normalized, so extent (guard, round) was zero */
+	    if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+	       result_exponent==0) goto underflow;
+
+	    /* Shift extension to complete one bit of normalization and
+	     * update exponent. */
+	    Ext_leftshiftby1(extent);
+
+	    /* Discover first one bit to determine shift amount.  Use a
+	     * modified binary search.  We have already shifted the result
+	     * one position right and still not found a one so the remainder
+	     * of the extension must be zero and simplifies rounding. */
+	    /* Scan bytes */
+	    while(Sgl_iszero_hiddenhigh7mantissa(result))
+		{
+		Sgl_leftshiftby8(result);
+		if((result_exponent -= 8) <= 0  && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Now narrow it down to the nibble */
+	    if(Sgl_iszero_hiddenhigh3mantissa(result))
+		{
+		/* The lower nibble contains the normalizing one */
+		Sgl_leftshiftby4(result);
+		if((result_exponent -= 4) <= 0 && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Select case were first bit is set (already normalized)
+	     * otherwise select the proper shift. */
+	    if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
+		{
+		/* Already normalized */
+		if(result_exponent <= 0) goto underflow;
+		Sgl_set_sign(result,/*using*/sign_save);
+		Sgl_set_exponent(result,/*using*/result_exponent);
+		*dstptr = result;
+		return(NOEXCEPTION);
+		}
+	    Sgl_sethigh4bits(result,/*using*/sign_save);
+	    switch(jumpsize) 
+		{
+		case 1:
+		    {
+		    Sgl_leftshiftby3(result);
+		    result_exponent -= 3;
+		    break;
+		    }
+		case 2:
+		case 3:
+		    {
+		    Sgl_leftshiftby2(result);
+		    result_exponent -= 2;
+		    break;
+		    }
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+		    {
+		    Sgl_leftshiftby1(result);
+		    result_exponent -= 1;
+		    break;
+		    }
+		}
+	    if(result_exponent > 0) 
+		{
+		Sgl_set_exponent(result,/*using*/result_exponent);
+		*dstptr = result;
+		return(NOEXCEPTION); /* Sign bit is already set */
+		}
+	    /* Fixup potential underflows */
+	  underflow:
+	    if(Is_underflowtrap_enabled())
+		{
+		Sgl_set_sign(result,sign_save);
+                Sgl_setwrapped_exponent(result,result_exponent,unfl);
+		*dstptr = result;
+		/* inexact = FALSE; */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    /* 
+	     * Since we cannot get an inexact denormalized result,
+	     * we can now return.
+	     */
+	    Sgl_right_align(result,/*by*/(1-result_exponent),extent);
+	    Sgl_clear_signexponent(result);
+	    Sgl_set_sign(result,sign_save);
+	    *dstptr = result;
+	    return(NOEXCEPTION);
+	    } /* end if(hidden...)... */
+	/* Fall through and round */
+	} /* end if(save < 0)... */
+    else 
+	{
+	/* Add magnitudes */
+	Sgl_addition(left,right,/*to*/result);
+	if(Sgl_isone_hiddenoverflow(result))
+	    {
+	    /* Prenormalization required. */
+	    Sgl_rightshiftby1_withextent(result,extent,extent);
+	    Sgl_arithrightshiftby1(result);
+	    result_exponent++;
+	    } /* end if hiddenoverflow... */
+	} /* end else ...add magnitudes... */
+    
+    /* Round the result.  If the extension is all zeros,then the result is
+     * exact.  Otherwise round in the correct direction.  No underflow is
+     * possible. If a postnormalization is necessary, then the mantissa is
+     * all zeros so no shift is needed. */
+  round:
+    if(Ext_isnotzero(extent))
+	{
+	inexact = TRUE;
+	switch(Rounding_mode())
+	    {
+	    case ROUNDNEAREST: /* The default. */
+	    if(Ext_isone_sign(extent))
+		{
+		/* at least 1/2 ulp */
+		if(Ext_isnotzero_lower(extent)  ||
+		  Sgl_isone_lowmantissa(result))
+		    {
+		    /* either exactly half way and odd or more than 1/2ulp */
+		    Sgl_increment(result);
+		    }
+		}
+	    break;
+
+	    case ROUNDPLUS:
+	    if(Sgl_iszero_sign(result))
+		{
+		/* Round up positive results */
+		Sgl_increment(result);
+		}
+	    break;
+	    
+	    case ROUNDMINUS:
+	    if(Sgl_isone_sign(result))
+		{
+		/* Round down negative results */
+		Sgl_increment(result);
+		}
+	    
+	    case ROUNDZERO:;
+	    /* truncate is simple */
+	    } /* end switch... */
+	if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
+	}
+    if(result_exponent == SGL_INFINITY_EXPONENT)
+        {
+        /* Overflow */
+        if(Is_overflowtrap_enabled())
+	    {
+	    Sgl_setwrapped_exponent(result,result_exponent,ovfl);
+	    *dstptr = result;
+	    if (inexact)
+		if (Is_inexacttrap_enabled())
+		    return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+		else Set_inexactflag();
+	    return(OVERFLOWEXCEPTION);
+	    }
+        else
+	    {
+	    Set_overflowflag();
+	    inexact = TRUE;
+	    Sgl_setoverflow(result);
+	    }
+	}
+    else Sgl_set_exponent(result,result_exponent);
+    *dstptr = result;
+    if(inexact) 
+	if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+	else Set_inexactflag();
+    return(NOEXCEPTION);
+    }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfcmp.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfcmp.c
new file mode 100644
index 0000000..1466fb4
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfcmp.c
@@ -0,0 +1,155 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfcmp.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	sgl_cmp: compare two values
+ *
+ *  External Interfaces:
+ *	sgl_fcmp(leftptr, rightptr, cond, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+    
+/*
+ * sgl_cmp: compare two values
+ */
+int
+sgl_fcmp (sgl_floating_point * leftptr, sgl_floating_point * rightptr,
+	  unsigned int cond, unsigned int *status)
+                                           
+                       /* The predicate to be tested */
+                         
+    {
+    register unsigned int left, right;
+    register int xorresult;
+        
+    /* Create local copies of the numbers */
+    left = *leftptr;
+    right = *rightptr;
+
+    /*
+     * Test for NaN
+     */
+    if(    (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+        || (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) )
+	{
+	/* Check if a NaN is involved.  Signal an invalid exception when 
+	 * comparing a signaling NaN or when comparing quiet NaNs and the
+	 * low bit of the condition is set */
+        if( (  (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+	    && Sgl_isnotzero_mantissa(left) 
+	    && (Exception(cond) || Sgl_isone_signaling(left)))
+	   ||
+	    (  (Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
+	    && Sgl_isnotzero_mantissa(right) 
+	    && (Exception(cond) || Sgl_isone_signaling(right)) ) )
+	    {
+	    if( Is_invalidtrap_enabled() ) {
+	    	Set_status_cbit(Unordered(cond));
+		return(INVALIDEXCEPTION);
+	    }
+	    else Set_invalidflag();
+	    Set_status_cbit(Unordered(cond));
+	    return(NOEXCEPTION);
+	    }
+	/* All the exceptional conditions are handled, now special case
+	   NaN compares */
+        else if( ((Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
+	    && Sgl_isnotzero_mantissa(left))
+	   ||
+	    ((Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
+	    && Sgl_isnotzero_mantissa(right)) )
+	    {
+	    /* NaNs always compare unordered. */
+	    Set_status_cbit(Unordered(cond));
+	    return(NOEXCEPTION);
+	    }
+	/* infinities will drop down to the normal compare mechanisms */
+	}
+    /* First compare for unequal signs => less or greater or
+     * special equal case */
+    Sgl_xortointp1(left,right,xorresult);
+    if( xorresult < 0 )
+        {
+        /* left negative => less, left positive => greater.
+         * equal is possible if both operands are zeros. */
+        if( Sgl_iszero_exponentmantissa(left) 
+	  && Sgl_iszero_exponentmantissa(right) )
+            {
+	    Set_status_cbit(Equal(cond));
+	    }
+	else if( Sgl_isone_sign(left) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+	else
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+        }
+    /* Signs are the same.  Treat negative numbers separately
+     * from the positives because of the reversed sense.  */
+    else if( Sgl_all(left) == Sgl_all(right) )
+        {
+        Set_status_cbit(Equal(cond));
+        }
+    else if( Sgl_iszero_sign(left) )
+        {
+        /* Positive compare */
+        if( Sgl_all(left) < Sgl_all(right) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+	else
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+	}
+    else
+        {
+        /* Negative compare.  Signed or unsigned compares
+         * both work the same.  That distinction is only
+         * important when the sign bits differ. */
+        if( Sgl_all(left) > Sgl_all(right) )
+	    {
+	    Set_status_cbit(Lessthan(cond));
+	    }
+        else
+	    {
+	    Set_status_cbit(Greaterthan(cond));
+	    }
+        }
+	return(NOEXCEPTION);
+    }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfdiv.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfdiv.c
new file mode 100644
index 0000000..3e2a4d6
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfdiv.c
@@ -0,0 +1,392 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfdiv.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Precision Floating-point Divide
+ *
+ *  External Interfaces:
+ *	sgl_fdiv(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ *  Single Precision Floating-point Divide
+ */
+
+int
+sgl_fdiv (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
+	  sgl_floating_point * dstptr, unsigned int *status)
+{
+	register unsigned int opnd1, opnd2, opnd3, result;
+	register int dest_exponent, count;
+	register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+	boolean is_tiny;
+
+	opnd1 = *srcptr1;
+	opnd2 = *srcptr2;
+	/* 
+	 * set sign bit of result 
+	 */
+	if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);  
+	else Sgl_setzero(result);
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd1)) {
+		if (Sgl_iszero_mantissa(opnd1)) {
+			if (Sgl_isnotnan(opnd2)) {
+				if (Sgl_isinfinity(opnd2)) {
+					/* 
+					 * invalid since both operands 
+					 * are infinity 
+					 */
+					if (Is_invalidtrap_enabled()) 
+                                		return(INVALIDEXCEPTION);
+                                	Set_invalidflag();
+                                	Sgl_makequietnan(result);
+					*dstptr = result;
+					return(NOEXCEPTION);
+				}
+				/*
+			 	 * return infinity
+			 	 */
+				Sgl_setinfinity_exponentmantissa(result);
+				*dstptr = result;
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Sgl_isone_signaling(opnd1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd2)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled())
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd2);
+                		*dstptr = opnd2;
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+                	*dstptr = opnd1;
+                	return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd2)) {
+		if (Sgl_iszero_mantissa(opnd2)) {
+			/*
+			 * return zero
+			 */
+			Sgl_setzero_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Sgl_isone_signaling(opnd2)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Sgl_set_quiet(opnd2);
+                }
+                /*
+                 * return quiet NaN
+                 */
+                *dstptr = opnd2;
+                return(NOEXCEPTION);
+	}
+	/*
+	 * check for division by zero
+	 */
+	if (Sgl_iszero_exponentmantissa(opnd2)) {
+		if (Sgl_iszero_exponentmantissa(opnd1)) {
+			/* invalid since both operands are zero */
+			if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        Set_invalidflag();
+                        Sgl_makequietnan(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+		if (Is_divisionbyzerotrap_enabled())
+                        return(DIVISIONBYZEROEXCEPTION);
+                Set_divisionbyzeroflag();
+                Sgl_setinfinity_exponentmantissa(result);
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent 
+	 */
+	dest_exponent = Sgl_exponent(opnd1) - Sgl_exponent(opnd2) + SGL_BIAS;
+
+	/*
+	 * Generate mantissa
+	 */
+	if (Sgl_isnotzero_exponent(opnd1)) {
+		/* set hidden bit */
+		Sgl_clear_signexponent_set_hidden(opnd1);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd1)) {
+			Sgl_setzero_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                /* is denormalized; want to normalize */
+                Sgl_clear_signexponent(opnd1);
+                Sgl_leftshiftby1(opnd1);
+		Sgl_normalize(opnd1,dest_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Sgl_isnotzero_exponent(opnd2)) {
+		Sgl_clear_signexponent_set_hidden(opnd2);
+	}
+	else {
+                /* is denormalized; want to normalize */
+                Sgl_clear_signexponent(opnd2);
+                Sgl_leftshiftby1(opnd2);
+		while(Sgl_iszero_hiddenhigh7mantissa(opnd2)) {
+			Sgl_leftshiftby8(opnd2);
+			dest_exponent += 8;
+		}
+		if(Sgl_iszero_hiddenhigh3mantissa(opnd2)) {
+			Sgl_leftshiftby4(opnd2);
+			dest_exponent += 4;
+		}
+		while(Sgl_iszero_hidden(opnd2)) {
+			Sgl_leftshiftby1(opnd2);
+			dest_exponent += 1;
+		}
+	}
+
+	/* Divide the source mantissas */
+
+	/*
+	 * A non_restoring divide algorithm is used.
+	 */
+	Sgl_subtract(opnd1,opnd2,opnd1);
+	Sgl_setzero(opnd3);
+	for (count=1;count<=SGL_P && Sgl_all(opnd1);count++) {
+		Sgl_leftshiftby1(opnd1);
+		Sgl_leftshiftby1(opnd3);
+		if (Sgl_iszero_sign(opnd1)) {
+			Sgl_setone_lowmantissa(opnd3);
+			Sgl_subtract(opnd1,opnd2,opnd1);
+		}
+		else Sgl_addition(opnd1,opnd2,opnd1);
+	}
+	if (count <= SGL_P) {
+		Sgl_leftshiftby1(opnd3);
+		Sgl_setone_lowmantissa(opnd3);
+		Sgl_leftshift(opnd3,SGL_P-count);
+		if (Sgl_iszero_hidden(opnd3)) {
+			Sgl_leftshiftby1(opnd3);
+			dest_exponent--;
+		}
+	}
+	else {
+		if (Sgl_iszero_hidden(opnd3)) {
+			/* need to get one more bit of result */
+			Sgl_leftshiftby1(opnd1);
+			Sgl_leftshiftby1(opnd3);
+			if (Sgl_iszero_sign(opnd1)) {
+				Sgl_setone_lowmantissa(opnd3);
+				Sgl_subtract(opnd1,opnd2,opnd1);
+			}
+			else Sgl_addition(opnd1,opnd2,opnd1);
+			dest_exponent--;
+		}
+		if (Sgl_iszero_sign(opnd1)) guardbit = TRUE;
+		stickybit = Sgl_all(opnd1);
+	}
+	inexact = guardbit | stickybit;
+
+	/* 
+	 * round result 
+	 */
+	if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
+		Sgl_clear_signexponent(opnd3);
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) 
+					Sgl_increment_mantissa(opnd3);
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) 
+					Sgl_increment_mantissa(opnd3);
+				break;
+			case ROUNDNEAREST:
+				if (guardbit) {
+			   	if (stickybit || Sgl_isone_lowmantissa(opnd3))
+			      	    Sgl_increment_mantissa(opnd3);
+				}
+		}
+		if (Sgl_isone_hidden(opnd3)) dest_exponent++;
+	}
+	Sgl_set_mantissa(result,opnd3);
+
+        /* 
+         * Test for overflow
+         */
+	if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+                        *dstptr = result;
+                        if (inexact) 
+                            if (Is_inexacttrap_enabled())
+                                return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return(OVERFLOWEXCEPTION);
+                }
+		Set_overflowflag();
+                /* set result to infinity or largest number */
+		Sgl_setoverflow(result);
+		inexact = TRUE;
+	}
+        /* 
+         * Test for underflow
+         */
+	else if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+                        *dstptr = result;
+                        if (inexact) 
+                            if (Is_inexacttrap_enabled())
+                                return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+                            else Set_inexactflag();
+                        return(UNDERFLOWEXCEPTION);
+                }
+
+		/* Determine if should set underflow flag */
+		is_tiny = TRUE;
+		if (dest_exponent == 0 && inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) {
+					Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) {
+					Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Sgl_isone_lowmantissa(opnd3))) {
+				      	Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			}
+		}
+
+                /*
+                 * denormalize result or set to signed zero
+                 */
+		stickybit = inexact;
+		Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
+
+		/* return rounded number */ 
+		if (inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS:
+				if (Sgl_iszero_sign(result)) {
+					Sgl_increment(opnd3);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result))  {
+					Sgl_increment(opnd3);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Sgl_isone_lowmantissa(opnd3))) {
+			      		Sgl_increment(opnd3);
+				}
+				break;
+			}
+                	if (is_tiny) Set_underflowflag();
+                }
+		Sgl_set_exponentmantissa(result,opnd3);
+	}
+	else Sgl_set_exponent(result,dest_exponent);
+	*dstptr = result;
+	/* check for inexact */
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else  Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfmpy.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfmpy.c
new file mode 100644
index 0000000..afa4069
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfmpy.c
@@ -0,0 +1,380 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfmpy.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Precision Floating-point Multiply
+ *
+ *  External Interfaces:
+ *	sgl_fmpy(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ *  Single Precision Floating-point Multiply
+ */
+
+int
+sgl_fmpy(
+    sgl_floating_point *srcptr1,
+    sgl_floating_point *srcptr2,
+    sgl_floating_point *dstptr,
+    unsigned int *status)
+{
+	register unsigned int opnd1, opnd2, opnd3, result;
+	register int dest_exponent, count;
+	register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
+	boolean is_tiny;
+
+	opnd1 = *srcptr1;
+	opnd2 = *srcptr2;
+	/* 
+	 * set sign bit of result 
+	 */
+	if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);  
+	else Sgl_setzero(result);
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd1)) {
+		if (Sgl_iszero_mantissa(opnd1)) {
+			if (Sgl_isnotnan(opnd2)) {
+				if (Sgl_iszero_exponentmantissa(opnd2)) {
+					/* 
+					 * invalid since operands are infinity 
+					 * and zero 
+					 */
+					if (Is_invalidtrap_enabled()) 
+                                		return(INVALIDEXCEPTION);
+                                	Set_invalidflag();
+                                	Sgl_makequietnan(result);
+					*dstptr = result;
+					return(NOEXCEPTION);
+				}
+				/*
+			 	 * return infinity
+			 	 */
+				Sgl_setinfinity_exponentmantissa(result);
+				*dstptr = result;
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Sgl_isone_signaling(opnd1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd2)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd2);
+                		*dstptr = opnd2;
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+                	*dstptr = opnd1;
+                	return(NOEXCEPTION);
+		}
+	}
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if (Sgl_isinfinity_exponent(opnd2)) {
+		if (Sgl_iszero_mantissa(opnd2)) {
+			if (Sgl_iszero_exponentmantissa(opnd1)) {
+				/* invalid since operands are zero & infinity */
+				if (Is_invalidtrap_enabled()) 
+                                	return(INVALIDEXCEPTION);
+                                Set_invalidflag();
+                                Sgl_makequietnan(opnd2);
+				*dstptr = opnd2;
+				return(NOEXCEPTION);
+			}
+			/*
+			 * return infinity
+			 */
+			Sgl_setinfinity_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Sgl_isone_signaling(opnd2)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Sgl_set_quiet(opnd2);
+                }
+                /*
+                 * return quiet NaN
+                 */
+                *dstptr = opnd2;
+                return(NOEXCEPTION);
+	}
+	/*
+	 * Generate exponent 
+	 */
+	dest_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
+
+	/*
+	 * Generate mantissa
+	 */
+	if (Sgl_isnotzero_exponent(opnd1)) {
+		/* set hidden bit */
+		Sgl_clear_signexponent_set_hidden(opnd1);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd1)) {
+			Sgl_setzero_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                /* is denormalized, adjust exponent */
+                Sgl_clear_signexponent(opnd1);
+		Sgl_leftshiftby1(opnd1);
+		Sgl_normalize(opnd1,dest_exponent);
+	}
+	/* opnd2 needs to have hidden bit set with msb in hidden bit */
+	if (Sgl_isnotzero_exponent(opnd2)) {
+		Sgl_clear_signexponent_set_hidden(opnd2);
+	}
+	else {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd2)) {
+			Sgl_setzero_exponentmantissa(result);
+			*dstptr = result;
+			return(NOEXCEPTION);
+		}
+                /* is denormalized; want to normalize */
+                Sgl_clear_signexponent(opnd2);
+                Sgl_leftshiftby1(opnd2);
+		Sgl_normalize(opnd2,dest_exponent);
+	}
+
+	/* Multiply two source mantissas together */
+
+	Sgl_leftshiftby4(opnd2);     /* make room for guard bits */
+	Sgl_setzero(opnd3);
+	/*
+	 * Four bits at a time are inspected in each loop, and a
+	 * simple shift and add multiply algorithm is used.
+	 */
+	for (count=1;count<SGL_P;count+=4) {
+		stickybit |= Slow4(opnd3);
+		Sgl_rightshiftby4(opnd3);
+		if (Sbit28(opnd1)) Sall(opnd3) += (Sall(opnd2) << 3);
+		if (Sbit29(opnd1)) Sall(opnd3) += (Sall(opnd2) << 2);
+		if (Sbit30(opnd1)) Sall(opnd3) += (Sall(opnd2) << 1);
+		if (Sbit31(opnd1)) Sall(opnd3) += Sall(opnd2);
+		Sgl_rightshiftby4(opnd1);
+	}
+	/* make sure result is left-justified */
+	if (Sgl_iszero_sign(opnd3)) {
+		Sgl_leftshiftby1(opnd3);
+	}
+	else {
+		/* result mantissa >= 2. */
+		dest_exponent++;
+	}
+	/* check for denormalized result */
+	while (Sgl_iszero_sign(opnd3)) {
+		Sgl_leftshiftby1(opnd3);
+		dest_exponent--;
+	}
+	/*
+	 * check for guard, sticky and inexact bits
+	 */
+	stickybit |= Sgl_all(opnd3) << (SGL_BITLENGTH - SGL_EXP_LENGTH + 1);
+	guardbit = Sbit24(opnd3);
+	inexact = guardbit | stickybit;
+
+	/* re-align mantissa */
+	Sgl_rightshiftby8(opnd3);
+
+	/* 
+	 * round result 
+	 */
+	if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
+		Sgl_clear_signexponent(opnd3);
+		switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) 
+					Sgl_increment(opnd3);
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) 
+					Sgl_increment(opnd3);
+				break;
+			case ROUNDNEAREST:
+				if (guardbit) {
+			   	if (stickybit || Sgl_isone_lowmantissa(opnd3))
+			      	Sgl_increment(opnd3);
+				}
+		}
+		if (Sgl_isone_hidden(opnd3)) dest_exponent++;
+	}
+	Sgl_set_mantissa(result,opnd3);
+
+        /* 
+         * Test for overflow
+         */
+	if (dest_exponent >= SGL_INFINITY_EXPONENT) {
+                /* trap if OVERFLOWTRAP enabled */
+                if (Is_overflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+			Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
+			*dstptr = result;
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+			    else Set_inexactflag();
+			return(OVERFLOWEXCEPTION);
+                }
+		inexact = TRUE;
+		Set_overflowflag();
+                /* set result to infinity or largest number */
+		Sgl_setoverflow(result);
+	}
+        /* 
+         * Test for underflow
+         */
+	else if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+			Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+			*dstptr = result;
+			if (inexact) 
+			    if (Is_inexacttrap_enabled())
+				return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
+			    else Set_inexactflag();
+			return(UNDERFLOWEXCEPTION);
+                }
+
+		/* Determine if should set underflow flag */
+		is_tiny = TRUE;
+		if (dest_exponent == 0 && inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) {
+					Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) {
+					Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Sgl_isone_lowmantissa(opnd3))) {
+				      	Sgl_increment(opnd3);
+					if (Sgl_isone_hiddenoverflow(opnd3))
+                			    is_tiny = FALSE;
+					Sgl_decrement(opnd3);
+				}
+				break;
+			}
+		}
+
+                /*
+                 * denormalize result or set to signed zero
+                 */
+		stickybit = inexact;
+		Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
+
+		/* return zero or smallest number */
+		if (inexact) {
+			switch (Rounding_mode()) {
+			case ROUNDPLUS: 
+				if (Sgl_iszero_sign(result)) {
+					Sgl_increment(opnd3);
+				}
+				break;
+			case ROUNDMINUS: 
+				if (Sgl_isone_sign(result)) {
+					Sgl_increment(opnd3);
+				}
+				break;
+			case ROUNDNEAREST:
+				if (guardbit && (stickybit || 
+				    Sgl_isone_lowmantissa(opnd3))) {
+			      		Sgl_increment(opnd3);
+				}
+				break;
+			}
+                if (is_tiny) Set_underflowflag();
+		}
+		Sgl_set_exponentmantissa(result,opnd3);
+	}
+	else Sgl_set_exponent(result,dest_exponent);
+	*dstptr = result;
+
+	/* check for inexact */
+	if (inexact) {
+		if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+		else Set_inexactflag();
+	}
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfrem.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfrem.c
new file mode 100644
index 0000000..3a1b7a3
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfrem.c
@@ -0,0 +1,290 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfrem.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Precision Floating-point Remainder
+ *
+ *  External Interfaces:
+ *	sgl_frem(srcptr1,srcptr2,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ *  Single Precision Floating-point Remainder
+ */
+
+int
+sgl_frem (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
+	  sgl_floating_point * dstptr, unsigned int *status)
+{
+	register unsigned int opnd1, opnd2, result;
+	register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
+	register boolean roundup = FALSE;
+
+	opnd1 = *srcptr1;
+	opnd2 = *srcptr2;
+	/*
+	 * check first operand for NaN's or infinity
+	 */
+	if ((opnd1_exponent = Sgl_exponent(opnd1)) == SGL_INFINITY_EXPONENT) {
+		if (Sgl_iszero_mantissa(opnd1)) {
+			if (Sgl_isnotnan(opnd2)) {
+				/* invalid since first operand is infinity */
+				if (Is_invalidtrap_enabled()) 
+                                	return(INVALIDEXCEPTION);
+                                Set_invalidflag();
+                                Sgl_makequietnan(result);
+				*dstptr = result;
+				return(NOEXCEPTION);
+			}
+		}
+		else {
+                	/*
+                 	 * is NaN; signaling or quiet?
+                 	 */
+                	if (Sgl_isone_signaling(opnd1)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd1);
+                	}
+			/* 
+			 * is second operand a signaling NaN? 
+			 */
+			else if (Sgl_is_signalingnan(opnd2)) {
+                        	/* trap if INVALIDTRAP enabled */
+                        	if (Is_invalidtrap_enabled()) 
+                            		return(INVALIDEXCEPTION);
+                        	/* make NaN quiet */
+                        	Set_invalidflag();
+                        	Sgl_set_quiet(opnd2);
+                		*dstptr = opnd2;
+                		return(NOEXCEPTION);
+			}
+                	/*
+                 	 * return quiet NaN
+                 	 */
+                	*dstptr = opnd1;
+                	return(NOEXCEPTION);
+		}
+	} 
+	/*
+	 * check second operand for NaN's or infinity
+	 */
+	if ((opnd2_exponent = Sgl_exponent(opnd2)) == SGL_INFINITY_EXPONENT) {
+		if (Sgl_iszero_mantissa(opnd2)) {
+			/*
+			 * return first operand
+			 */
+                	*dstptr = opnd1;
+			return(NOEXCEPTION);
+		}
+                /*
+                 * is NaN; signaling or quiet?
+                 */
+                if (Sgl_isone_signaling(opnd2)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Sgl_set_quiet(opnd2);
+                }
+                /*
+                 * return quiet NaN
+                 */
+                *dstptr = opnd2;
+                return(NOEXCEPTION);
+	}
+	/*
+	 * check second operand for zero
+	 */
+	if (Sgl_iszero_exponentmantissa(opnd2)) {
+		/* invalid since second operand is zero */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                Set_invalidflag();
+                Sgl_makequietnan(result);
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+
+	/* 
+	 * get sign of result
+	 */
+	result = opnd1;  
+
+	/* 
+	 * check for denormalized operands
+	 */
+	if (opnd1_exponent == 0) {
+		/* check for zero */
+		if (Sgl_iszero_mantissa(opnd1)) {
+			*dstptr = opnd1;
+			return(NOEXCEPTION);
+		}
+		/* normalize, then continue */
+		opnd1_exponent = 1;
+		Sgl_normalize(opnd1,opnd1_exponent);
+	}
+	else {
+		Sgl_clear_signexponent_set_hidden(opnd1);
+	}
+	if (opnd2_exponent == 0) {
+		/* normalize, then continue */
+		opnd2_exponent = 1;
+		Sgl_normalize(opnd2,opnd2_exponent);
+	}
+	else {
+		Sgl_clear_signexponent_set_hidden(opnd2);
+	}
+
+	/* find result exponent and divide step loop count */
+	dest_exponent = opnd2_exponent - 1;
+	stepcount = opnd1_exponent - opnd2_exponent;
+
+	/*
+	 * check for opnd1/opnd2 < 1
+	 */
+	if (stepcount < 0) {
+		/*
+		 * check for opnd1/opnd2 > 1/2
+		 *
+		 * In this case n will round to 1, so 
+		 *    r = opnd1 - opnd2 
+		 */
+		if (stepcount == -1 && Sgl_isgreaterthan(opnd1,opnd2)) {
+			Sgl_all(result) = ~Sgl_all(result);   /* set sign */
+			/* align opnd2 with opnd1 */
+			Sgl_leftshiftby1(opnd2); 
+			Sgl_subtract(opnd2,opnd1,opnd2);
+			/* now normalize */
+                	while (Sgl_iszero_hidden(opnd2)) {
+                        	Sgl_leftshiftby1(opnd2);
+                        	dest_exponent--;
+			}
+			Sgl_set_exponentmantissa(result,opnd2);
+			goto testforunderflow;
+		}
+		/*
+		 * opnd1/opnd2 <= 1/2
+		 *
+		 * In this case n will round to zero, so 
+		 *    r = opnd1
+		 */
+		Sgl_set_exponentmantissa(result,opnd1);
+		dest_exponent = opnd1_exponent;
+		goto testforunderflow;
+	}
+
+	/*
+	 * Generate result
+	 *
+	 * Do iterative subtract until remainder is less than operand 2.
+	 */
+	while (stepcount-- > 0 && Sgl_all(opnd1)) {
+		if (Sgl_isnotlessthan(opnd1,opnd2))
+			Sgl_subtract(opnd1,opnd2,opnd1);
+		Sgl_leftshiftby1(opnd1);
+	}
+	/*
+	 * Do last subtract, then determine which way to round if remainder 
+	 * is exactly 1/2 of opnd2 
+	 */
+	if (Sgl_isnotlessthan(opnd1,opnd2)) {
+		Sgl_subtract(opnd1,opnd2,opnd1);
+		roundup = TRUE;
+	}
+	if (stepcount > 0 || Sgl_iszero(opnd1)) {
+		/* division is exact, remainder is zero */
+		Sgl_setzero_exponentmantissa(result);
+		*dstptr = result;
+		return(NOEXCEPTION);
+	}
+
+	/* 
+	 * Check for cases where opnd1/opnd2 < n 
+	 *
+	 * In this case the result's sign will be opposite that of
+	 * opnd1.  The mantissa also needs some correction.
+	 */
+	Sgl_leftshiftby1(opnd1);
+	if (Sgl_isgreaterthan(opnd1,opnd2)) {
+		Sgl_invert_sign(result);
+		Sgl_subtract((opnd2<<1),opnd1,opnd1);
+	}
+	/* check for remainder being exactly 1/2 of opnd2 */
+	else if (Sgl_isequal(opnd1,opnd2) && roundup) { 
+		Sgl_invert_sign(result);
+	}
+
+	/* normalize result's mantissa */
+        while (Sgl_iszero_hidden(opnd1)) {
+                dest_exponent--;
+                Sgl_leftshiftby1(opnd1);
+        }
+	Sgl_set_exponentmantissa(result,opnd1);
+
+        /* 
+         * Test for underflow
+         */
+    testforunderflow:
+	if (dest_exponent <= 0) {
+                /* trap if UNDERFLOWTRAP enabled */
+                if (Is_underflowtrap_enabled()) {
+                        /*
+                         * Adjust bias of result
+                         */
+                        Sgl_setwrapped_exponent(result,dest_exponent,unfl);
+			*dstptr = result;
+			/* frem is always exact */
+			return(UNDERFLOWEXCEPTION);
+                }
+                /*
+                 * denormalize result or set to signed zero
+                 */
+                if (dest_exponent >= (1 - SGL_P)) {
+			Sgl_rightshift_exponentmantissa(result,1-dest_exponent);
+                }
+                else {
+			Sgl_setzero_exponentmantissa(result);
+		}
+	}
+	else Sgl_set_exponent(result,dest_exponent);
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsqrt.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsqrt.c
new file mode 100644
index 0000000..4657a12
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsqrt.c
@@ -0,0 +1,187 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfsqrt.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single Floating-point Square Root
+ *
+ *  External Interfaces:
+ *	sgl_fsqrt(srcptr,nullptr,dstptr,status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ *  Single Floating-point Square Root
+ */
+
+/*ARGSUSED*/
+unsigned int
+sgl_fsqrt(
+    sgl_floating_point *srcptr,
+    unsigned int *nullptr,
+    sgl_floating_point *dstptr,
+    unsigned int *status)
+{
+	register unsigned int src, result;
+	register int src_exponent;
+	register unsigned int newbit, sum;
+	register boolean guardbit = FALSE, even_exponent;
+
+	src = *srcptr;
+        /*
+         * check source operand for NaN or infinity
+         */
+        if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
+                /*
+                 * is signaling NaN?
+                 */
+                if (Sgl_isone_signaling(src)) {
+                        /* trap if INVALIDTRAP enabled */
+                        if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                        /* make NaN quiet */
+                        Set_invalidflag();
+                        Sgl_set_quiet(src);
+                }
+                /*
+                 * Return quiet NaN or positive infinity.
+		 *  Fall through to negative test if negative infinity.
+                 */
+		if (Sgl_iszero_sign(src) || Sgl_isnotzero_mantissa(src)) {
+                	*dstptr = src;
+                	return(NOEXCEPTION);
+		}
+        }
+
+        /*
+         * check for zero source operand
+         */
+	if (Sgl_iszero_exponentmantissa(src)) {
+		*dstptr = src;
+		return(NOEXCEPTION);
+	}
+
+        /*
+         * check for negative source operand 
+         */
+	if (Sgl_isone_sign(src)) {
+		/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Sgl_makequietnan(src);
+		*dstptr = src;
+		return(NOEXCEPTION);
+	}
+
+	/*
+	 * Generate result
+	 */
+	if (src_exponent > 0) {
+		even_exponent = Sgl_hidden(src);
+		Sgl_clear_signexponent_set_hidden(src);
+	}
+	else {
+		/* normalize operand */
+		Sgl_clear_signexponent(src);
+		src_exponent++;
+		Sgl_normalize(src,src_exponent);
+		even_exponent = src_exponent & 1;
+	}
+	if (even_exponent) {
+		/* exponent is even */
+		/* Add comment here.  Explain why odd exponent needs correction */
+		Sgl_leftshiftby1(src);
+	}
+	/*
+	 * Add comment here.  Explain following algorithm.
+	 * 
+	 * Trust me, it works.
+	 *
+	 */
+	Sgl_setzero(result);
+	newbit = 1 << SGL_P;
+	while (newbit && Sgl_isnotzero(src)) {
+		Sgl_addition(result,newbit,sum);
+		if(sum <= Sgl_all(src)) {
+			/* update result */
+			Sgl_addition(result,(newbit<<1),result);
+			Sgl_subtract(src,sum,src);
+		}
+		Sgl_rightshiftby1(newbit);
+		Sgl_leftshiftby1(src);
+	}
+	/* correct exponent for pre-shift */
+	if (even_exponent) {
+		Sgl_rightshiftby1(result);
+	}
+
+	/* check for inexact */
+	if (Sgl_isnotzero(src)) {
+		if (!even_exponent && Sgl_islessthan(result,src)) 
+			Sgl_increment(result);
+		guardbit = Sgl_lowmantissa(result);
+		Sgl_rightshiftby1(result);
+
+		/*  now round result  */
+		switch (Rounding_mode()) {
+		case ROUNDPLUS:
+		     Sgl_increment(result);
+		     break;
+		case ROUNDNEAREST:
+		     /* stickybit is always true, so guardbit 
+		      * is enough to determine rounding */
+		     if (guardbit) {
+			Sgl_increment(result);
+		     }
+		     break;
+		}
+		/* increment result exponent by 1 if mantissa overflowed */
+		if (Sgl_isone_hiddenoverflow(result)) src_exponent+=2;
+
+		if (Is_inexacttrap_enabled()) {
+			Sgl_set_exponent(result,
+			 ((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
+			*dstptr = result;
+			return(INEXACTEXCEPTION);
+		}
+		else Set_inexactflag();
+	}
+	else {
+		Sgl_rightshiftby1(result);
+	}
+	Sgl_set_exponent(result,((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
+	*dstptr = result;
+	return(NOEXCEPTION);
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsub.c b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsub.c
new file mode 100644
index 0000000..5f90d0f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sfsub.c
@@ -0,0 +1,521 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+/*
+ * BEGIN_DESC
+ *
+ *  File:
+ *	@(#)	pa/spmath/sfsub.c		$Revision: 1.1 $
+ *
+ *  Purpose:
+ *	Single_subtract: subtract two single precision values.
+ *
+ *  External Interfaces:
+ *	sgl_fsub(leftptr, rightptr, dstptr, status)
+ *
+ *  Internal Interfaces:
+ *
+ *  Theory:
+ *	<<please update with a overview of the operation of this file>>
+ *
+ * END_DESC
+*/
+
+
+#include "float.h"
+#include "sgl_float.h"
+
+/*
+ * Single_subtract: subtract two single precision values.
+ */
+int
+sgl_fsub(
+	    sgl_floating_point *leftptr,
+	    sgl_floating_point *rightptr,
+	    sgl_floating_point *dstptr,
+	    unsigned int *status)
+    {
+    register unsigned int left, right, result, extent;
+    register unsigned int signless_upper_left, signless_upper_right, save;
+    
+    register int result_exponent, right_exponent, diff_exponent;
+    register int sign_save, jumpsize;
+    register boolean inexact = FALSE, underflowtrap;
+        
+    /* Create local copies of the numbers */
+    left = *leftptr;
+    right = *rightptr;
+
+    /* A zero "save" helps discover equal operands (for later),  *
+     * and is used in swapping operands (if needed).             */
+    Sgl_xortointp1(left,right,/*to*/save);
+
+    /*
+     * check first operand for NaN's or infinity
+     */
+    if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
+	{
+	if (Sgl_iszero_mantissa(left)) 
+	    {
+	    if (Sgl_isnotnan(right)) 
+		{
+		if (Sgl_isinfinity(right) && save==0) 
+		    {
+		    /* 
+		     * invalid since operands are same signed infinity's
+		     */
+		    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+                    Set_invalidflag();
+                    Sgl_makequietnan(result);
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		/*
+	 	 * return infinity
+	 	 */
+		*dstptr = left;
+		return(NOEXCEPTION);
+		}
+	    }
+	else 
+	    {
+            /*
+             * is NaN; signaling or quiet?
+             */
+            if (Sgl_isone_signaling(left)) 
+		{
+               	/* trap if INVALIDTRAP enabled */
+		if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+        	/* make NaN quiet */
+        	Set_invalidflag();
+        	Sgl_set_quiet(left);
+        	}
+	    /* 
+	     * is second operand a signaling NaN? 
+	     */
+	    else if (Sgl_is_signalingnan(right)) 
+		{
+        	/* trap if INVALIDTRAP enabled */
+               	if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+		/* make NaN quiet */
+		Set_invalidflag();
+		Sgl_set_quiet(right);
+		*dstptr = right;
+		return(NOEXCEPTION);
+		}
+	    /*
+ 	     * return quiet NaN
+ 	     */
+ 	    *dstptr = left;
+ 	    return(NOEXCEPTION);
+	    }
+	} /* End left NaN or Infinity processing */
+    /*
+     * check second operand for NaN's or infinity
+     */
+    if (Sgl_isinfinity_exponent(right)) 
+	{
+	if (Sgl_iszero_mantissa(right)) 
+	    {
+	    /* return infinity */
+	    Sgl_invert_sign(right);
+	    *dstptr = right;
+	    return(NOEXCEPTION);
+	    }
+        /*
+         * is NaN; signaling or quiet?
+         */
+        if (Sgl_isone_signaling(right)) 
+	    {
+            /* trap if INVALIDTRAP enabled */
+	    if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
+	    /* make NaN quiet */
+	    Set_invalidflag();
+	    Sgl_set_quiet(right);
+	    }
+	/*
+	 * return quiet NaN
+ 	 */
+	*dstptr = right;
+	return(NOEXCEPTION);
+    	} /* End right NaN or Infinity processing */
+
+    /* Invariant: Must be dealing with finite numbers */
+
+    /* Compare operands by removing the sign */
+    Sgl_copytoint_exponentmantissa(left,signless_upper_left);
+    Sgl_copytoint_exponentmantissa(right,signless_upper_right);
+
+    /* sign difference selects sub or add operation. */
+    if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
+	{
+	/* Set the left operand to the larger one by XOR swap *
+	 *  First finish the first word using "save"          */
+	Sgl_xorfromintp1(save,right,/*to*/right);
+	Sgl_xorfromintp1(save,left,/*to*/left);
+	result_exponent = Sgl_exponent(left);
+	Sgl_invert_sign(left);
+	}
+    /* Invariant:  left is not smaller than right. */ 
+
+    if((right_exponent = Sgl_exponent(right)) == 0)
+        {
+	/* Denormalized operands.  First look for zeroes */
+	if(Sgl_iszero_mantissa(right)) 
+	    {
+	    /* right is zero */
+	    if(Sgl_iszero_exponentmantissa(left))
+		{
+		/* Both operands are zeros */
+		Sgl_invert_sign(right);
+		if(Is_rounding_mode(ROUNDMINUS))
+		    {
+		    Sgl_or_signs(left,/*with*/right);
+		    }
+		else
+		    {
+		    Sgl_and_signs(left,/*with*/right);
+		    }
+		}
+	    else 
+		{
+		/* Left is not a zero and must be the result.  Trapped
+		 * underflows are signaled if left is denormalized.  Result
+		 * is always exact. */
+		if( (result_exponent == 0) && Is_underflowtrap_enabled() )
+		    {
+		    /* need to normalize results mantissa */
+	    	    sign_save = Sgl_signextendedsign(left);
+		    Sgl_leftshiftby1(left);
+		    Sgl_normalize(left,result_exponent);
+		    Sgl_set_sign(left,/*using*/sign_save);
+                    Sgl_setwrapped_exponent(left,result_exponent,unfl);
+		    *dstptr = left;
+		    /* inexact = FALSE */
+		    return(UNDERFLOWEXCEPTION);
+		    }
+		}
+	    *dstptr = left;
+	    return(NOEXCEPTION);
+	    }
+
+	/* Neither are zeroes */
+	Sgl_clear_sign(right);	/* Exponent is already cleared */
+	if(result_exponent == 0 )
+	    {
+	    /* Both operands are denormalized.  The result must be exact
+	     * and is simply calculated.  A sum could become normalized and a
+	     * difference could cancel to a true zero. */
+	    if( (/*signed*/int) save >= 0 )
+		{
+		Sgl_subtract(left,/*minus*/right,/*into*/result);
+		if(Sgl_iszero_mantissa(result))
+		    {
+		    if(Is_rounding_mode(ROUNDMINUS))
+			{
+			Sgl_setone_sign(result);
+			}
+		    else
+			{
+			Sgl_setzero_sign(result);
+			}
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		}
+	    else
+		{
+		Sgl_addition(left,right,/*into*/result);
+		if(Sgl_isone_hidden(result))
+		    {
+		    *dstptr = result;
+		    return(NOEXCEPTION);
+		    }
+		}
+	    if(Is_underflowtrap_enabled())
+		{
+		/* need to normalize result */
+	    	sign_save = Sgl_signextendedsign(result);
+		Sgl_leftshiftby1(result);
+		Sgl_normalize(result,result_exponent);
+		Sgl_set_sign(result,/*using*/sign_save);
+                Sgl_setwrapped_exponent(result,result_exponent,unfl);
+		*dstptr = result;
+		/* inexact = FALSE */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    *dstptr = result;
+	    return(NOEXCEPTION);
+	    }
+	right_exponent = 1;	/* Set exponent to reflect different bias
+				 * with denomalized numbers. */
+	}
+    else
+	{
+	Sgl_clear_signexponent_set_hidden(right);
+	}
+    Sgl_clear_exponent_set_hidden(left);
+    diff_exponent = result_exponent - right_exponent;
+
+    /* 
+     * Special case alignment of operands that would force alignment 
+     * beyond the extent of the extension.  A further optimization
+     * could special case this but only reduces the path length for this
+     * infrequent case.
+     */
+    if(diff_exponent > SGL_THRESHOLD)
+	{
+	diff_exponent = SGL_THRESHOLD;
+	}
+    
+    /* Align right operand by shifting to right */
+    Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
+      /*and lower to*/extent);
+
+    /* Treat sum and difference of the operands separately. */
+    if( (/*signed*/int) save >= 0 )
+	{
+	/*
+	 * Difference of the two operands.  Their can be no overflow.  A
+	 * borrow can occur out of the hidden bit and force a post
+	 * normalization phase.
+	 */
+	Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
+	if(Sgl_iszero_hidden(result))
+	    {
+	    /* Handle normalization */
+	    /* A straightforward algorithm would now shift the result
+	     * and extension left until the hidden bit becomes one.  Not
+	     * all of the extension bits need participate in the shift.
+	     * Only the two most significant bits (round and guard) are
+	     * needed.  If only a single shift is needed then the guard
+	     * bit becomes a significant low order bit and the extension
+	     * must participate in the rounding.  If more than a single 
+	     * shift is needed, then all bits to the right of the guard 
+	     * bit are zeros, and the guard bit may or may not be zero. */
+	    sign_save = Sgl_signextendedsign(result);
+            Sgl_leftshiftby1_withextent(result,extent,result);
+
+            /* Need to check for a zero result.  The sign and exponent
+	     * fields have already been zeroed.  The more efficient test
+	     * of the full object can be used.
+	     */
+    	    if(Sgl_iszero(result))
+		/* Must have been "x-x" or "x+(-x)". */
+		{
+		if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
+		*dstptr = result;
+		return(NOEXCEPTION);
+		}
+	    result_exponent--;
+	    /* Look to see if normalization is finished. */
+	    if(Sgl_isone_hidden(result))
+		{
+		if(result_exponent==0)
+		    {
+		    /* Denormalized, exponent should be zero.  Left operand *
+ 		     * was normalized, so extent (guard, round) was zero    */
+		    goto underflow;
+		    }
+		else
+		    {
+		    /* No further normalization is needed. */
+		    Sgl_set_sign(result,/*using*/sign_save);
+	    	    Ext_leftshiftby1(extent);
+		    goto round;
+		    }
+		}
+
+	    /* Check for denormalized, exponent should be zero.  Left    *
+	     * operand was normalized, so extent (guard, round) was zero */
+	    if(!(underflowtrap = Is_underflowtrap_enabled()) &&
+	       result_exponent==0) goto underflow;
+
+	    /* Shift extension to complete one bit of normalization and
+	     * update exponent. */
+	    Ext_leftshiftby1(extent);
+
+	    /* Discover first one bit to determine shift amount.  Use a
+	     * modified binary search.  We have already shifted the result
+	     * one position right and still not found a one so the remainder
+	     * of the extension must be zero and simplifies rounding. */
+	    /* Scan bytes */
+	    while(Sgl_iszero_hiddenhigh7mantissa(result))
+		{
+		Sgl_leftshiftby8(result);
+		if((result_exponent -= 8) <= 0  && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Now narrow it down to the nibble */
+	    if(Sgl_iszero_hiddenhigh3mantissa(result))
+		{
+		/* The lower nibble contains the normalizing one */
+		Sgl_leftshiftby4(result);
+		if((result_exponent -= 4) <= 0 && !underflowtrap)
+		    goto underflow;
+		}
+	    /* Select case were first bit is set (already normalized)
+	     * otherwise select the proper shift. */
+	    if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
+		{
+		/* Already normalized */
+		if(result_exponent <= 0) goto underflow;
+		Sgl_set_sign(result,/*using*/sign_save);
+		Sgl_set_exponent(result,/*using*/result_exponent);
+		*dstptr = result;
+		return(NOEXCEPTION);
+		}
+	    Sgl_sethigh4bits(result,/*using*/sign_save);
+	    switch(jumpsize) 
+		{
+		case 1:
+		    {
+		    Sgl_leftshiftby3(result);
+		    result_exponent -= 3;
+		    break;
+		    }
+		case 2:
+		case 3:
+		    {
+		    Sgl_leftshiftby2(result);
+		    result_exponent -= 2;
+		    break;
+		    }
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+		    {
+		    Sgl_leftshiftby1(result);
+		    result_exponent -= 1;
+		    break;
+		    }
+		}
+	    if(result_exponent > 0) 
+		{
+		Sgl_set_exponent(result,/*using*/result_exponent);
+		*dstptr = result;	/* Sign bit is already set */
+		return(NOEXCEPTION);
+		}
+	    /* Fixup potential underflows */
+	  underflow:
+	    if(Is_underflowtrap_enabled())
+		{
+		Sgl_set_sign(result,sign_save);
+                Sgl_setwrapped_exponent(result,result_exponent,unfl);
+		*dstptr = result;
+		/* inexact = FALSE */
+		return(UNDERFLOWEXCEPTION);
+		}
+	    /*
+	     * Since we cannot get an inexact denormalized result,
+	     * we can now return.
+	     */
+	    Sgl_right_align(result,/*by*/(1-result_exponent),extent);
+	    Sgl_clear_signexponent(result);
+	    Sgl_set_sign(result,sign_save);
+	    *dstptr = result;
+	    return(NOEXCEPTION);
+	    } /* end if(hidden...)... */
+	/* Fall through and round */
+	} /* end if(save >= 0)... */
+    else 
+	{
+	/* Add magnitudes */
+	Sgl_addition(left,right,/*to*/result);
+	if(Sgl_isone_hiddenoverflow(result))
+	    {
+	    /* Prenormalization required. */
+	    Sgl_rightshiftby1_withextent(result,extent,extent);
+	    Sgl_arithrightshiftby1(result);
+	    result_exponent++;
+	    } /* end if hiddenoverflow... */
+	} /* end else ...sub magnitudes... */
+    
+    /* Round the result.  If the extension is all zeros,then the result is
+     * exact.  Otherwise round in the correct direction.  No underflow is
+     * possible. If a postnormalization is necessary, then the mantissa is
+     * all zeros so no shift is needed. */
+  round:
+    if(Ext_isnotzero(extent))
+	{
+	inexact = TRUE;
+	switch(Rounding_mode())
+	    {
+	    case ROUNDNEAREST: /* The default. */
+	    if(Ext_isone_sign(extent))
+		{
+		/* at least 1/2 ulp */
+		if(Ext_isnotzero_lower(extent)  ||
+		  Sgl_isone_lowmantissa(result))
+		    {
+		    /* either exactly half way and odd or more than 1/2ulp */
+		    Sgl_increment(result);
+		    }
+		}
+	    break;
+
+	    case ROUNDPLUS:
+	    if(Sgl_iszero_sign(result))
+		{
+		/* Round up positive results */
+		Sgl_increment(result);
+		}
+	    break;
+	    
+	    case ROUNDMINUS:
+	    if(Sgl_isone_sign(result))
+		{
+		/* Round down negative results */
+		Sgl_increment(result);
+		}
+	    
+	    case ROUNDZERO:;
+	    /* truncate is simple */
+	    } /* end switch... */
+	if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
+	}
+    if(result_exponent == SGL_INFINITY_EXPONENT)
+        {
+        /* Overflow */
+        if(Is_overflowtrap_enabled())
+	    {
+	    Sgl_setwrapped_exponent(result,result_exponent,ovfl);
+	    *dstptr = result;
+	    if (inexact)
+		if (Is_inexacttrap_enabled())
+		    return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
+		else Set_inexactflag();
+	    return(OVERFLOWEXCEPTION);
+	    }
+        else
+	    {
+	    Set_overflowflag();
+	    inexact = TRUE;
+	    Sgl_setoverflow(result);
+	    }
+	}
+    else Sgl_set_exponent(result,result_exponent);
+    *dstptr = result;
+    if(inexact) 
+	if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
+	else Set_inexactflag();
+    return(NOEXCEPTION);
+    }
diff --git a/src/kernel/linux/v4.14/arch/parisc/math-emu/sgl_float.h b/src/kernel/linux/v4.14/arch/parisc/math-emu/sgl_float.h
new file mode 100644
index 0000000..4ee4cc9
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/math-emu/sgl_float.h
@@ -0,0 +1,486 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * Floating-point emulation code
+ *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2, or (at your option)
+ *    any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *    GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef __NO_PA_HDRS
+    PA header file -- do not include this header file for non-PA builds.
+#endif
+
+/* 32-bit word grabbing functions */
+#define Sgl_firstword(value) Sall(value)
+#define Sgl_secondword(value) dummy_location
+#define Sgl_thirdword(value) dummy_location
+#define Sgl_fourthword(value) dummy_location
+
+#define Sgl_sign(object) Ssign(object)
+#define Sgl_exponent(object) Sexponent(object)
+#define Sgl_signexponent(object) Ssignexponent(object)
+#define Sgl_mantissa(object) Smantissa(object)
+#define Sgl_exponentmantissa(object) Sexponentmantissa(object)
+#define Sgl_all(object) Sall(object)
+
+/* sgl_and_signs ANDs the sign bits of each argument and puts the result
+ * into the first argument. sgl_or_signs ors those same sign bits */
+#define Sgl_and_signs( src1dst, src2)		\
+    Sall(src1dst) = (Sall(src2)|~((unsigned int)1<<31)) & Sall(src1dst)
+#define Sgl_or_signs( src1dst, src2)		\
+    Sall(src1dst) = (Sall(src2)&((unsigned int)1<<31)) | Sall(src1dst)
+
+/* The hidden bit is always the low bit of the exponent */
+#define Sgl_clear_exponent_set_hidden(srcdst) Deposit_sexponent(srcdst,1)
+#define Sgl_clear_signexponent_set_hidden(srcdst) \
+    Deposit_ssignexponent(srcdst,1)
+#define Sgl_clear_sign(srcdst) Sall(srcdst) &= ~((unsigned int)1<<31)
+#define Sgl_clear_signexponent(srcdst) Sall(srcdst) &= 0x007fffff
+
+/* varamount must be less than 32 for the next three functions */
+#define Sgl_rightshift(srcdst, varamount)	\
+    Sall(srcdst) >>= varamount
+#define Sgl_leftshift(srcdst, varamount)	\
+    Sall(srcdst) <<= varamount
+#define Sgl_rightshift_exponentmantissa(srcdst, varamount) \
+    Sall(srcdst) = \
+	(Sexponentmantissa(srcdst) >> varamount) | \
+	(Sall(srcdst) & ((unsigned int)1<<31))
+
+#define Sgl_leftshiftby1_withextent(left,right,result) \
+    Shiftdouble(Sall(left),Extall(right),31,Sall(result))
+    
+#define Sgl_rightshiftby1_withextent(left,right,dst)		\
+    Shiftdouble(Sall(left),Extall(right),1,Extall(right))
+#define Sgl_arithrightshiftby1(srcdst)	\
+    Sall(srcdst) = (int)Sall(srcdst) >> 1
+    
+/* Sign extend the sign bit with an integer destination */
+#define Sgl_signextendedsign(value) Ssignedsign(value)
+
+#define Sgl_isone_hidden(sgl_value) (Shidden(sgl_value))
+#define Sgl_increment(sgl_value) Sall(sgl_value) += 1
+#define Sgl_increment_mantissa(sgl_value) \
+    Deposit_smantissa(sgl_value,sgl_value+1)
+#define Sgl_decrement(sgl_value) Sall(sgl_value) -= 1
+
+#define Sgl_isone_sign(sgl_value) (Is_ssign(sgl_value)!=0)
+#define Sgl_isone_hiddenoverflow(sgl_value) \
+    (Is_shiddenoverflow(sgl_value)!=0)
+#define Sgl_isone_lowmantissa(sgl_value) (Is_slow(sgl_value)!=0)
+#define Sgl_isone_signaling(sgl_value) (Is_ssignaling(sgl_value)!=0)
+#define Sgl_is_signalingnan(sgl_value) (Ssignalingnan(sgl_value)==0x1ff)
+#define Sgl_isnotzero(sgl_value) (Sall(sgl_value)!=0)
+#define Sgl_isnotzero_hiddenhigh7mantissa(sgl_value) \
+    (Shiddenhigh7mantissa(sgl_value)!=0)
+#define Sgl_isnotzero_low4(sgl_value) (Slow4(sgl_value)!=0)
+#define Sgl_isnotzero_exponent(sgl_value) (Sexponent(sgl_value)!=0)
+#define Sgl_isnotzero_mantissa(sgl_value) (Smantissa(sgl_value)!=0)
+#define Sgl_isnotzero_exponentmantissa(sgl_value) \
+    (Sexponentmantissa(sgl_value)!=0)
+#define Sgl_iszero(sgl_value) (Sall(sgl_value)==0)
+#define Sgl_iszero_signaling(sgl_value) (Is_ssignaling(sgl_value)==0)
+#define Sgl_iszero_hidden(sgl_value) (Is_shidden(sgl_value)==0)
+#define Sgl_iszero_hiddenoverflow(sgl_value) \
+    (Is_shiddenoverflow(sgl_value)==0)
+#define Sgl_iszero_hiddenhigh3mantissa(sgl_value) \
+    (Shiddenhigh3mantissa(sgl_value)==0)
+#define Sgl_iszero_hiddenhigh7mantissa(sgl_value) \
+    (Shiddenhigh7mantissa(sgl_value)==0)
+#define Sgl_iszero_sign(sgl_value) (Is_ssign(sgl_value)==0)
+#define Sgl_iszero_exponent(sgl_value) (Sexponent(sgl_value)==0)
+#define Sgl_iszero_mantissa(sgl_value) (Smantissa(sgl_value)==0)
+#define Sgl_iszero_exponentmantissa(sgl_value) \
+    (Sexponentmantissa(sgl_value)==0)
+#define Sgl_isinfinity_exponent(sgl_value) 		\
+    (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT)
+#define Sgl_isnotinfinity_exponent(sgl_value) 		\
+    (Sgl_exponent(sgl_value)!=SGL_INFINITY_EXPONENT)
+#define Sgl_isinfinity(sgl_value)			\
+    (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT &&	\
+    Sgl_mantissa(sgl_value)==0)
+#define Sgl_isnan(sgl_value)				\
+    (Sgl_exponent(sgl_value)==SGL_INFINITY_EXPONENT &&	\
+    Sgl_mantissa(sgl_value)!=0)
+#define Sgl_isnotnan(sgl_value)				\
+    (Sgl_exponent(sgl_value)!=SGL_INFINITY_EXPONENT ||	\
+    Sgl_mantissa(sgl_value)==0)
+#define Sgl_islessthan(sgl_op1,sgl_op2)			\
+    (Sall(sgl_op1) < Sall(sgl_op2))
+#define Sgl_isgreaterthan(sgl_op1,sgl_op2)		\
+    (Sall(sgl_op1) > Sall(sgl_op2))
+#define Sgl_isnotlessthan(sgl_op1,sgl_op2)		\
+    (Sall(sgl_op1) >= Sall(sgl_op2))
+#define Sgl_isequal(sgl_op1,sgl_op2)			\
+    (Sall(sgl_op1) == Sall(sgl_op2))
+
+#define Sgl_leftshiftby8(sgl_value) \
+    Sall(sgl_value) <<= 8
+#define Sgl_leftshiftby4(sgl_value) \
+    Sall(sgl_value) <<= 4
+#define Sgl_leftshiftby3(sgl_value) \
+    Sall(sgl_value) <<= 3
+#define Sgl_leftshiftby2(sgl_value) \
+    Sall(sgl_value) <<= 2
+#define Sgl_leftshiftby1(sgl_value) \
+    Sall(sgl_value) <<= 1
+#define Sgl_rightshiftby1(sgl_value) \
+    Sall(sgl_value) >>= 1
+#define Sgl_rightshiftby4(sgl_value) \
+    Sall(sgl_value) >>= 4
+#define Sgl_rightshiftby8(sgl_value) \
+    Sall(sgl_value) >>= 8
+    
+#define Sgl_ismagnitudeless(signlessleft,signlessright)			\
+/*  unsigned int signlessleft, signlessright; */			\
+      (signlessleft < signlessright)  
+    
+
+#define Sgl_copytoint_exponentmantissa(source,dest)     \
+    dest = Sexponentmantissa(source)
+
+/* A quiet NaN has the high mantissa bit clear and at least on other (in this
+ * case the adjacent bit) bit set. */
+#define Sgl_set_quiet(sgl_value) Deposit_shigh2mantissa(sgl_value,1)
+#define Sgl_set_exponent(sgl_value,exp) Deposit_sexponent(sgl_value,exp)
+
+#define Sgl_set_mantissa(dest,value) Deposit_smantissa(dest,value)
+#define Sgl_set_exponentmantissa(dest,value) \
+    Deposit_sexponentmantissa(dest,value)
+
+/*  An infinity is represented with the max exponent and a zero mantissa */
+#define Sgl_setinfinity_exponent(sgl_value) \
+    Deposit_sexponent(sgl_value,SGL_INFINITY_EXPONENT)
+#define Sgl_setinfinity_exponentmantissa(sgl_value)	\
+    Deposit_sexponentmantissa(sgl_value, \
+	(SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))))
+#define Sgl_setinfinitypositive(sgl_value)		\
+    Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH)))
+#define Sgl_setinfinitynegative(sgl_value)		\
+    Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))) \
+    | ((unsigned int)1<<31)
+#define Sgl_setinfinity(sgl_value,sign)					\
+    Sall(sgl_value) = (SGL_INFINITY_EXPONENT << (32-(1+SGL_EXP_LENGTH))) | \
+     ((unsigned int)sign << 31)
+#define Sgl_sethigh4bits(sgl_value, extsign)  \
+    Deposit_shigh4(sgl_value,extsign)
+#define Sgl_set_sign(sgl_value,sign) Deposit_ssign(sgl_value,sign)
+#define Sgl_invert_sign(sgl_value)  \
+    Deposit_ssign(sgl_value,~Ssign(sgl_value))
+#define Sgl_setone_sign(sgl_value) Deposit_ssign(sgl_value,1)
+#define Sgl_setone_lowmantissa(sgl_value) Deposit_slow(sgl_value,1)
+#define Sgl_setzero_sign(sgl_value)  Sall(sgl_value) &= 0x7fffffff
+#define Sgl_setzero_exponent(sgl_value) Sall(sgl_value) &= 0x807fffff
+#define Sgl_setzero_mantissa(sgl_value) Sall(sgl_value) &= 0xff800000
+#define Sgl_setzero_exponentmantissa(sgl_value)  Sall(sgl_value) &= 0x80000000
+#define Sgl_setzero(sgl_value) Sall(sgl_value) = 0
+#define Sgl_setnegativezero(sgl_value) Sall(sgl_value) = (unsigned int)1 << 31
+
+/* Use following macro for both overflow & underflow conditions */
+#define ovfl -
+#define unfl +
+#define Sgl_setwrapped_exponent(sgl_value,exponent,op) \
+    Deposit_sexponent(sgl_value,(exponent op SGL_WRAP))
+
+#define Sgl_setlargestpositive(sgl_value) 				\
+    Sall(sgl_value) = ((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH)))	\
+                      | ((1<<(32-(1+SGL_EXP_LENGTH))) - 1 )
+#define Sgl_setlargestnegative(sgl_value)				\
+    Sall(sgl_value) = ((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH)))	\
+                      | ((1<<(32-(1+SGL_EXP_LENGTH))) - 1 )		\
+		      | ((unsigned int)1<<31)
+
+#define Sgl_setnegativeinfinity(sgl_value)	\
+    Sall(sgl_value) = 				\
+    ((1<<SGL_EXP_LENGTH) | SGL_INFINITY_EXPONENT) << (32-(1+SGL_EXP_LENGTH))
+#define Sgl_setlargest(sgl_value,sign) 					\
+    Sall(sgl_value) = (unsigned int)sign << 31 |			\
+        (((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH)))		\
+	  | ((1 << (32-(1+SGL_EXP_LENGTH))) - 1 ))
+#define Sgl_setlargest_exponentmantissa(sgl_value)			\
+    Sall(sgl_value) = Sall(sgl_value) & ((unsigned int)1<<31) |		\
+        (((SGL_EMAX+SGL_BIAS) << (32-(1+SGL_EXP_LENGTH)))		\
+	  | ((1 << (32-(1+SGL_EXP_LENGTH))) - 1 ))
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Sgl_right_align(srcdst,shift,extent)				\
+    /* sgl_floating_point srcdst; int shift; extension extent */	\
+    if (shift < 32) {							\
+	Extall(extent) = Sall(srcdst) << (32-(shift));			\
+    	Sall(srcdst) >>= shift;						\
+    }									\
+    else {								\
+	Extall(extent) = Sall(srcdst);					\
+	Sall(srcdst) = 0;						\
+    }
+#define Sgl_hiddenhigh3mantissa(sgl_value) Shiddenhigh3mantissa(sgl_value)
+#define Sgl_hidden(sgl_value) Shidden(sgl_value)
+#define Sgl_lowmantissa(sgl_value) Slow(sgl_value)
+
+/* The left argument is never smaller than the right argument */
+#define Sgl_subtract(sgl_left,sgl_right,sgl_result) \
+    Sall(sgl_result) = Sall(sgl_left) - Sall(sgl_right)
+
+/* Subtract right augmented with extension from left augmented with zeros and
+ * store into result and extension. */
+#define Sgl_subtract_withextension(left,right,extent,result)		\
+    /* sgl_floating_point left,right,result; extension extent */	\
+  Sgl_subtract(left,right,result);					\
+  if((Extall(extent) = 0-Extall(extent)))				\
+      Sall(result) = Sall(result)-1
+
+#define Sgl_addition(sgl_left,sgl_right,sgl_result) \
+    Sall(sgl_result) = Sall(sgl_left) + Sall(sgl_right)
+
+#define Sgl_xortointp1(left,right,result)			\
+    result = Sall(left) XOR Sall(right);
+
+#define Sgl_xorfromintp1(left,right,result)			\
+    Sall(result) = left XOR Sall(right)
+
+/* Need to Initialize */
+#define Sgl_makequietnan(dest)						\
+    Sall(dest) = ((SGL_EMAX+SGL_BIAS)+1)<< (32-(1+SGL_EXP_LENGTH))	\
+                 | (1<<(32-(1+SGL_EXP_LENGTH+2)))
+#define Sgl_makesignalingnan(dest)					\
+    Sall(dest) = ((SGL_EMAX+SGL_BIAS)+1)<< (32-(1+SGL_EXP_LENGTH))	\
+                 | (1<<(32-(1+SGL_EXP_LENGTH+1)))
+
+#define Sgl_normalize(sgl_opnd,exponent)			\
+	while(Sgl_iszero_hiddenhigh7mantissa(sgl_opnd)) {	\
+		Sgl_leftshiftby8(sgl_opnd);			\
+		exponent -= 8;					\
+	}							\
+	if(Sgl_iszero_hiddenhigh3mantissa(sgl_opnd)) {		\
+		Sgl_leftshiftby4(sgl_opnd);			\
+		exponent -= 4;					\
+	}							\
+	while(Sgl_iszero_hidden(sgl_opnd)) {			\
+		Sgl_leftshiftby1(sgl_opnd);			\
+		exponent -= 1;					\
+	}
+
+#define Sgl_setoverflow(sgl_opnd)				\
+	/* set result to infinity or largest number */		\
+	switch (Rounding_mode()) {				\
+		case ROUNDPLUS:					\
+			if (Sgl_isone_sign(sgl_opnd)) {		\
+				Sgl_setlargestnegative(sgl_opnd); \
+			}					\
+			else {					\
+				Sgl_setinfinitypositive(sgl_opnd); \
+			}					\
+			break;					\
+		case ROUNDMINUS:				\
+			if (Sgl_iszero_sign(sgl_opnd)) {	\
+				Sgl_setlargestpositive(sgl_opnd); \
+			}					\
+			else {					\
+				Sgl_setinfinitynegative(sgl_opnd); \
+			}					\
+			break;					\
+		case ROUNDNEAREST:				\
+			Sgl_setinfinity_exponentmantissa(sgl_opnd); \
+			break;					\
+		case ROUNDZERO:					\
+			Sgl_setlargest_exponentmantissa(sgl_opnd); \
+	}
+
+#define Sgl_denormalize(opnd,exponent,guard,sticky,inexact)		\
+	Sgl_clear_signexponent_set_hidden(opnd);			\
+	if (exponent >= (1 - SGL_P)) {					\
+		guard = (Sall(opnd) >> -exponent) & 1;			\
+		if (exponent < 0) sticky |= Sall(opnd) << (32+exponent); \
+		inexact = guard | sticky;				\
+		Sall(opnd) >>= (1-exponent);				\
+	}								\
+	else {								\
+		guard = 0;						\
+		sticky |= Sall(opnd);					\
+		inexact = sticky;					\
+		Sgl_setzero(opnd);					\
+	}
+
+/* 
+ * The fused multiply add instructions requires a single extended format,
+ * with 48 bits of mantissa.
+ */
+#define SGLEXT_THRESHOLD 48
+
+#define Sglext_setzero(valA,valB)	\
+    Sextallp1(valA) = 0; Sextallp2(valB) = 0
+
+#define Sglext_isnotzero_mantissap2(valB) (Sextallp2(valB)!=0)
+#define Sglext_isone_lowp1(val) (Sextlowp1(val)!=0)
+#define Sglext_isone_highp2(val) (Sexthighp2(val)!=0)
+#define Sglext_isnotzero_low31p2(val) (Sextlow31p2(val)!=0)
+#define Sglext_iszero(valA,valB) (Sextallp1(valA)==0 && Sextallp2(valB)==0)
+
+#define Sgl_copytoptr(src,destptr) *destptr = src
+#define Sgl_copyfromptr(srcptr,dest) dest = *srcptr
+#define Sglext_copy(srca,srcb,desta,destb) \
+    Sextallp1(desta) = Sextallp1(srca);	\
+    Sextallp2(destb) = Sextallp2(srcb)
+#define Sgl_copyto_sglext(src1,dest1,dest2) \
+	Sextallp1(dest1) = Sall(src1); Sextallp2(dest2) = 0
+
+#define Sglext_swap_lower(leftp2,rightp2)  \
+    Sextallp2(leftp2)  = Sextallp2(leftp2) XOR Sextallp2(rightp2);  \
+    Sextallp2(rightp2) = Sextallp2(leftp2) XOR Sextallp2(rightp2);  \
+    Sextallp2(leftp2)  = Sextallp2(leftp2) XOR Sextallp2(rightp2)
+
+#define Sglext_setone_lowmantissap2(value) Deposit_dlowp2(value,1)
+
+/* The high bit is always zero so arithmetic or logical shifts will work. */
+#define Sglext_right_align(srcdstA,srcdstB,shift) \
+  {int shiftamt, sticky;						\
+    shiftamt = shift % 32;						\
+    sticky = 0;								\
+    switch (shift/32) {							\
+     case 0: if (shiftamt > 0) {					\
+	        sticky = Sextallp2(srcdstB) << 32 - (shiftamt);		\
+                Variable_shift_double(Sextallp1(srcdstA),		\
+		 Sextallp2(srcdstB),shiftamt,Sextallp2(srcdstB));	\
+	        Sextallp1(srcdstA) >>= shiftamt;			\
+	     }								\
+	     break;							\
+     case 1: if (shiftamt > 0) {					\
+	        sticky = (Sextallp1(srcdstA) << 32 - (shiftamt)) |	\
+			 Sextallp2(srcdstB);				\
+	     }								\
+	     else {							\
+		sticky = Sextallp2(srcdstB);				\
+	     }								\
+	     Sextallp2(srcdstB) = Sextallp1(srcdstA) >> shiftamt;	\
+	     Sextallp1(srcdstA) = 0;					\
+	     break;							\
+    }									\
+    if (sticky) Sglext_setone_lowmantissap2(srcdstB);			\
+  }
+
+/* The left argument is never smaller than the right argument */
+#define Sglext_subtract(lefta,leftb,righta,rightb,resulta,resultb) \
+    if( Sextallp2(rightb) > Sextallp2(leftb) ) Sextallp1(lefta)--; \
+    Sextallp2(resultb) = Sextallp2(leftb) - Sextallp2(rightb);	\
+    Sextallp1(resulta) = Sextallp1(lefta) - Sextallp1(righta)
+
+#define Sglext_addition(lefta,leftb,righta,rightb,resulta,resultb) \
+    /* If the sum of the low words is less than either source, then \
+     * an overflow into the next word occurred. */ \
+    if ((Sextallp2(resultb) = Sextallp2(leftb)+Sextallp2(rightb)) < \
+        Sextallp2(rightb)) \
+	    Sextallp1(resulta) = Sextallp1(lefta)+Sextallp1(righta)+1; \
+    else Sextallp1(resulta) = Sextallp1(lefta)+Sextallp1(righta)
+
+
+#define Sglext_arithrightshiftby1(srcdstA,srcdstB)	\
+    Shiftdouble(Sextallp1(srcdstA),Sextallp2(srcdstB),1,Sextallp2(srcdstB)); \
+    Sextallp1(srcdstA) = (int)Sextallp1(srcdstA) >> 1
+   
+#define Sglext_leftshiftby8(valA,valB) \
+    Shiftdouble(Sextallp1(valA),Sextallp2(valB),24,Sextallp1(valA)); \
+    Sextallp2(valB) <<= 8
+#define Sglext_leftshiftby4(valA,valB) \
+    Shiftdouble(Sextallp1(valA),Sextallp2(valB),28,Sextallp1(valA)); \
+    Sextallp2(valB) <<= 4
+#define Sglext_leftshiftby3(valA,valB) \
+    Shiftdouble(Sextallp1(valA),Sextallp2(valB),29,Sextallp1(valA)); \
+    Sextallp2(valB) <<= 3
+#define Sglext_leftshiftby2(valA,valB) \
+    Shiftdouble(Sextallp1(valA),Sextallp2(valB),30,Sextallp1(valA)); \
+    Sextallp2(valB) <<= 2
+#define Sglext_leftshiftby1(valA,valB) \
+    Shiftdouble(Sextallp1(valA),Sextallp2(valB),31,Sextallp1(valA)); \
+    Sextallp2(valB) <<= 1
+
+#define Sglext_rightshiftby4(valueA,valueB) \
+    Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),4,Sextallp2(valueB)); \
+    Sextallp1(valueA) >>= 4
+#define Sglext_rightshiftby3(valueA,valueB) \
+    Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),3,Sextallp2(valueB)); \
+    Sextallp1(valueA) >>= 3
+#define Sglext_rightshiftby1(valueA,valueB) \
+    Shiftdouble(Sextallp1(valueA),Sextallp2(valueB),1,Sextallp2(valueB)); \
+    Sextallp1(valueA) >>= 1
+
+#define Sglext_xortointp1(left,right,result) Sgl_xortointp1(left,right,result)
+#define Sglext_xorfromintp1(left,right,result) \
+	Sgl_xorfromintp1(left,right,result)
+#define Sglext_copytoint_exponentmantissa(src,dest) \
+	Sgl_copytoint_exponentmantissa(src,dest)
+#define Sglext_ismagnitudeless(signlessleft,signlessright) \
+	Sgl_ismagnitudeless(signlessleft,signlessright)
+
+#define Sglext_set_sign(dbl_value,sign)  Sgl_set_sign(dbl_value,sign)  
+#define Sglext_clear_signexponent_set_hidden(srcdst) \
+	Sgl_clear_signexponent_set_hidden(srcdst) 
+#define Sglext_clear_signexponent(srcdst) Sgl_clear_signexponent(srcdst) 
+#define Sglext_clear_sign(srcdst) Sgl_clear_sign(srcdst) 
+#define Sglext_isone_hidden(dbl_value) Sgl_isone_hidden(dbl_value) 
+
+#define Sglext_denormalize(opndp1,opndp2,exponent,is_tiny)		\
+  {int sticky;								\
+    is_tiny = TRUE;							\
+    if (exponent == 0 && Sextallp2(opndp2)) {				\
+	switch (Rounding_mode()) {					\
+	case ROUNDPLUS:							\
+		if (Sgl_iszero_sign(opndp1))				\
+			if (Sgl_isone_hiddenoverflow(opndp1 + 1))	\
+				is_tiny = FALSE;			\
+		break;							\
+	case ROUNDMINUS:						\
+		if (Sgl_isone_sign(opndp1)) {				\
+			if (Sgl_isone_hiddenoverflow(opndp1 + 1))	\
+				is_tiny = FALSE;			\
+		}							\
+		break;							\
+	case ROUNDNEAREST:						\
+		if (Sglext_isone_highp2(opndp2) &&			\
+		    (Sglext_isone_lowp1(opndp1) || 			\
+		     Sglext_isnotzero_low31p2(opndp2)))			\
+			if (Sgl_isone_hiddenoverflow(opndp1 + 1))	\
+				is_tiny = FALSE;			\
+		break;							\
+	}								\
+    }									\
+    Sglext_clear_signexponent_set_hidden(opndp1);			\
+    if (exponent >= (1-DBL_P)) {					\
+	if (exponent >= -31) {						\
+	    if (exponent > -31) {					\
+		sticky = Sextallp2(opndp2) << 31+exponent;		\
+		Variable_shift_double(opndp1,opndp2,1-exponent,opndp2);	\
+		Sextallp1(opndp1) >>= 1-exponent;			\
+	    }								\
+	    else {							\
+		sticky = Sextallp2(opndp2);				\
+		Sextallp2(opndp2) = Sextallp1(opndp1);			\
+		Sextallp1(opndp1) = 0;					\
+	    }								\
+	}								\
+	else {								\
+	    sticky = (Sextallp1(opndp1) << 31+exponent) | 		\
+		     Sextallp2(opndp2);					\
+	    Sextallp2(opndp2) = Sextallp1(opndp1) >> -31-exponent;	\
+	    Sextallp1(opndp1) = 0;					\
+	}								\
+    }									\
+    else {								\
+	sticky = Sextallp1(opndp1) | Sextallp2(opndp2);			\
+	Sglext_setzero(opndp1,opndp2);					\
+    }									\
+    if (sticky) Sglext_setone_lowmantissap2(opndp2);			\
+    exponent = 0;							\
+  }
diff --git a/src/kernel/linux/v4.14/arch/parisc/mm/Makefile b/src/kernel/linux/v4.14/arch/parisc/mm/Makefile
new file mode 100644
index 0000000..134393d
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/mm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for arch/parisc/mm
+#
+
+obj-y	 := init.o fault.o ioremap.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/src/kernel/linux/v4.14/arch/parisc/mm/fault.c b/src/kernel/linux/v4.14/arch/parisc/mm/fault.c
new file mode 100644
index 0000000..e247edb
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/mm/fault.c
@@ -0,0 +1,433 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
+ * Copyright 1999 Hewlett Packard Co.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/interrupt.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+#include <linux/hugetlb.h>
+
+#include <asm/traps.h>
+
+/* Various important other fields */
+#define bit22set(x)		(x & 0x00000200)
+#define bits23_25set(x)		(x & 0x000001c0)
+#define isGraphicsFlushRead(x)	((x & 0xfc003fdf) == 0x04001a80)
+				/* extended opcode is 0x6a */
+
+#define BITSSET		0x1c0	/* for identifying LDCW */
+
+
+int show_unhandled_signals = 1;
+
+/*
+ * parisc_acctyp(unsigned int inst) --
+ *    Given a PA-RISC memory access instruction, determine if the
+ *    the instruction would perform a memory read or memory write
+ *    operation.
+ *
+ *    This function assumes that the given instruction is a memory access
+ *    instruction (i.e. you should really only call it if you know that
+ *    the instruction has generated some sort of a memory access fault).
+ *
+ * Returns:
+ *   VM_READ  if read operation
+ *   VM_WRITE if write operation
+ *   VM_EXEC  if execute operation
+ */
+static unsigned long
+parisc_acctyp(unsigned long code, unsigned int inst)
+{
+	if (code == 6 || code == 16)
+	    return VM_EXEC;
+
+	switch (inst & 0xf0000000) {
+	case 0x40000000: /* load */
+	case 0x50000000: /* new load */
+		return VM_READ;
+
+	case 0x60000000: /* store */
+	case 0x70000000: /* new store */
+		return VM_WRITE;
+
+	case 0x20000000: /* coproc */
+	case 0x30000000: /* coproc2 */
+		if (bit22set(inst))
+			return VM_WRITE;
+
+	case 0x0: /* indexed/memory management */
+		if (bit22set(inst)) {
+			/*
+			 * Check for the 'Graphics Flush Read' instruction.
+			 * It resembles an FDC instruction, except for bits
+			 * 20 and 21. Any combination other than zero will
+			 * utilize the block mover functionality on some
+			 * older PA-RISC platforms.  The case where a block
+			 * move is performed from VM to graphics IO space
+			 * should be treated as a READ.
+			 *
+			 * The significance of bits 20,21 in the FDC
+			 * instruction is:
+			 *
+			 *   00  Flush data cache (normal instruction behavior)
+			 *   01  Graphics flush write  (IO space -> VM)
+			 *   10  Graphics flush read   (VM -> IO space)
+			 *   11  Graphics flush read/write (VM <-> IO space)
+			 */
+			if (isGraphicsFlushRead(inst))
+				return VM_READ;
+			return VM_WRITE;
+		} else {
+			/*
+			 * Check for LDCWX and LDCWS (semaphore instructions).
+			 * If bits 23 through 25 are all 1's it is one of
+			 * the above two instructions and is a write.
+			 *
+			 * Note: With the limited bits we are looking at,
+			 * this will also catch PROBEW and PROBEWI. However,
+			 * these should never get in here because they don't
+			 * generate exceptions of the type:
+			 *   Data TLB miss fault/data page fault
+			 *   Data memory protection trap
+			 */
+			if (bits23_25set(inst) == BITSSET)
+				return VM_WRITE;
+		}
+		return VM_READ; /* Default */
+	}
+	return VM_READ; /* Default */
+}
+
+#undef bit22set
+#undef bits23_25set
+#undef isGraphicsFlushRead
+#undef BITSSET
+
+
+#if 0
+/* This is the treewalk to find a vma which is the highest that has
+ * a start < addr.  We're using find_vma_prev instead right now, but
+ * we might want to use this at some point in the future.  Probably
+ * not, but I want it committed to CVS so I don't lose it :-)
+ */
+			while (tree != vm_avl_empty) {
+				if (tree->vm_start > addr) {
+					tree = tree->vm_avl_left;
+				} else {
+					prev = tree;
+					if (prev->vm_next == NULL)
+						break;
+					if (prev->vm_next->vm_start > addr)
+						break;
+					tree = tree->vm_avl_right;
+				}
+			}
+#endif
+
+int fixup_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fix;
+
+	fix = search_exception_tables(regs->iaoq[0]);
+	if (fix) {
+		/*
+		 * Fix up get_user() and put_user().
+		 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+		 * bit in the relative address of the fixup routine to indicate
+		 * that %r8 should be loaded with -EFAULT to report a userspace
+		 * access error.
+		 */
+		if (fix->fixup & 1) {
+			regs->gr[8] = -EFAULT;
+
+			/* zero target register for get_user() */
+			if (parisc_acctyp(0, regs->iir) == VM_READ) {
+				int treg = regs->iir & 0x1f;
+				BUG_ON(treg == 0);
+				regs->gr[treg] = 0;
+			}
+		}
+
+		regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
+		regs->iaoq[0] &= ~3;
+		/*
+		 * NOTE: In some cases the faulting instruction
+		 * may be in the delay slot of a branch. We
+		 * don't want to take the branch, so we don't
+		 * increment iaoq[1], instead we set it to be
+		 * iaoq[0]+4, and clear the B bit in the PSW
+		 */
+		regs->iaoq[1] = regs->iaoq[0] + 4;
+		regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * parisc hardware trap list
+ *
+ * Documented in section 3 "Addressing and Access Control" of the
+ * "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
+ * https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
+ *
+ * For implementation see handle_interruption() in traps.c
+ */
+static const char * const trap_description[] = {
+	[1] "High-priority machine check (HPMC)",
+	[2] "Power failure interrupt",
+	[3] "Recovery counter trap",
+	[5] "Low-priority machine check",
+	[6] "Instruction TLB miss fault",
+	[7] "Instruction access rights / protection trap",
+	[8] "Illegal instruction trap",
+	[9] "Break instruction trap",
+	[10] "Privileged operation trap",
+	[11] "Privileged register trap",
+	[12] "Overflow trap",
+	[13] "Conditional trap",
+	[14] "FP Assist Exception trap",
+	[15] "Data TLB miss fault",
+	[16] "Non-access ITLB miss fault",
+	[17] "Non-access DTLB miss fault",
+	[18] "Data memory protection/unaligned access trap",
+	[19] "Data memory break trap",
+	[20] "TLB dirty bit trap",
+	[21] "Page reference trap",
+	[22] "Assist emulation trap",
+	[25] "Taken branch trap",
+	[26] "Data memory access rights trap",
+	[27] "Data memory protection ID trap",
+	[28] "Unaligned data reference trap",
+};
+
+const char *trap_name(unsigned long code)
+{
+	const char *t = NULL;
+
+	if (code < ARRAY_SIZE(trap_description))
+		t = trap_description[code];
+
+	return t ? t : "Unknown trap";
+}
+
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+		unsigned long address, struct task_struct *tsk,
+		struct vm_area_struct *vma)
+{
+	if (!unhandled_signal(tsk, SIGSEGV))
+		return;
+
+	if (!printk_ratelimit())
+		return;
+
+	pr_warn("\n");
+	pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+	    tsk->comm, code, address);
+	print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+
+	pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
+		vma ? ',':'\n');
+
+	if (vma)
+		pr_cont(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+			vma->vm_start, vma->vm_end);
+
+	show_regs(regs);
+}
+
+void do_page_fault(struct pt_regs *regs, unsigned long code,
+			      unsigned long address)
+{
+	struct vm_area_struct *vma, *prev_vma;
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	unsigned long acc_type;
+	int fault = 0;
+	unsigned int flags;
+
+	if (faulthandler_disabled())
+		goto no_context;
+
+	tsk = current;
+	mm = tsk->mm;
+	if (!mm)
+		goto no_context;
+
+	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+	if (user_mode(regs))
+		flags |= FAULT_FLAG_USER;
+
+	acc_type = parisc_acctyp(code, regs->iir);
+	if (acc_type & VM_WRITE)
+		flags |= FAULT_FLAG_WRITE;
+retry:
+	down_read(&mm->mmap_sem);
+	vma = find_vma_prev(mm, address, &prev_vma);
+	if (!vma || address < vma->vm_start)
+		goto check_expansion;
+/*
+ * Ok, we have a good vm_area for this memory access. We still need to
+ * check the access permissions.
+ */
+
+good_area:
+
+	if ((vma->vm_flags & acc_type) != acc_type)
+		goto bad_area;
+
+	/*
+	 * If for any reason at all we couldn't handle the fault, make
+	 * sure we exit gracefully rather than endlessly redo the
+	 * fault.
+	 */
+
+	fault = handle_mm_fault(vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		/*
+		 * We hit a shared mapping outside of the file, or some
+		 * other thing happened to us that made us unable to
+		 * handle the page fault gracefully.
+		 */
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGSEGV)
+			goto bad_area;
+		else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+				  VM_FAULT_HWPOISON_LARGE))
+			goto bad_area;
+		BUG();
+	}
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+	up_read(&mm->mmap_sem);
+	return;
+
+check_expansion:
+	vma = prev_vma;
+	if (vma && (expand_stack(vma, address) == 0))
+		goto good_area;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ */
+bad_area:
+	up_read(&mm->mmap_sem);
+
+	if (user_mode(regs)) {
+		struct siginfo si;
+		unsigned int lsb = 0;
+
+		switch (code) {
+		case 15:	/* Data TLB miss fault/Data page fault */
+			/* send SIGSEGV when outside of vma */
+			if (!vma ||
+			    address < vma->vm_start || address >= vma->vm_end) {
+				si.si_signo = SIGSEGV;
+				si.si_code = SEGV_MAPERR;
+				break;
+			}
+
+			/* send SIGSEGV for wrong permissions */
+			if ((vma->vm_flags & acc_type) != acc_type) {
+				si.si_signo = SIGSEGV;
+				si.si_code = SEGV_ACCERR;
+				break;
+			}
+
+			/* probably address is outside of mapped file */
+			/* fall through */
+		case 17:	/* NA data TLB miss / page fault */
+		case 18:	/* Unaligned access - PCXS only */
+			si.si_signo = SIGBUS;
+			si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
+			break;
+		case 16:	/* Non-access instruction TLB miss fault */
+		case 26:	/* PCXL: Data memory access rights trap */
+		default:
+			si.si_signo = SIGSEGV;
+			si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
+			break;
+		}
+
+#ifdef CONFIG_MEMORY_FAILURE
+		if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+			printk(KERN_ERR
+	"MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
+			tsk->comm, tsk->pid, address);
+			si.si_signo = SIGBUS;
+			si.si_code = BUS_MCEERR_AR;
+		}
+#endif
+
+		/*
+		 * Either small page or large page may be poisoned.
+		 * In other words, VM_FAULT_HWPOISON_LARGE and
+		 * VM_FAULT_HWPOISON are mutually exclusive.
+		 */
+		if (fault & VM_FAULT_HWPOISON_LARGE)
+			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+		else if (fault & VM_FAULT_HWPOISON)
+			lsb = PAGE_SHIFT;
+		else
+			show_signal_msg(regs, code, address, tsk, vma);
+		si.si_addr_lsb = lsb;
+
+		si.si_errno = 0;
+		si.si_addr = (void __user *) address;
+		force_sig_info(si.si_signo, &si, current);
+		return;
+	}
+
+no_context:
+
+	if (!user_mode(regs) && fixup_exception(regs)) {
+		return;
+	}
+
+	parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
+
+  out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (!user_mode(regs))
+		goto no_context;
+	pagefault_out_of_memory();
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/mm/hugetlbpage.c b/src/kernel/linux/v4.14/arch/parisc/mm/hugetlbpage.c
new file mode 100644
index 0000000..d77479a
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/mm/hugetlbpage.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PARISC64 Huge TLB page support.
+ *
+ * This parisc implementation is heavily based on the SPARC and x86 code.
+ *
+ * Copyright (C) 2015 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct hstate *h = hstate_file(file);
+
+	if (len & ~huge_page_mask(h))
+		return -EINVAL;
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED)
+		if (prepare_hugepage_range(file, addr, len))
+			return -EINVAL;
+
+	if (addr)
+		addr = ALIGN(addr, huge_page_size(h));
+
+	/* we need to make sure the colouring is OK */
+	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+			unsigned long addr, unsigned long sz)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte = NULL;
+
+	/* We must align the address, because our caller will run
+	 * set_huge_pte_at() on whatever we return, which writes out
+	 * all of the sub-ptes for the hugepage range.  So we have
+	 * to give it the first such sub-pte.
+	 */
+	addr &= HPAGE_MASK;
+
+	pgd = pgd_offset(mm, addr);
+	pud = pud_alloc(mm, pgd, addr);
+	if (pud) {
+		pmd = pmd_alloc(mm, pud, addr);
+		if (pmd)
+			pte = pte_alloc_map(mm, pmd, addr);
+	}
+	return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm,
+		       unsigned long addr, unsigned long sz)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte = NULL;
+
+	addr &= HPAGE_MASK;
+
+	pgd = pgd_offset(mm, addr);
+	if (!pgd_none(*pgd)) {
+		pud = pud_offset(pgd, addr);
+		if (!pud_none(*pud)) {
+			pmd = pmd_offset(pud, addr);
+			if (!pmd_none(*pmd))
+				pte = pte_offset_map(pmd, addr);
+		}
+	}
+	return pte;
+}
+
+/* Purge data and instruction TLB entries.  Must be called holding
+ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
+{
+	int i;
+
+	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
+	 * Linux standard huge pages (e.g. 2 MB) */
+	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
+
+	addr &= HPAGE_MASK;
+	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
+
+	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
+		purge_tlb_entries(mm, addr);
+		addr += (1UL << REAL_HPAGE_SHIFT);
+	}
+}
+
+/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
+static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t entry)
+{
+	unsigned long addr_start;
+	int i;
+
+	addr &= HPAGE_MASK;
+	addr_start = addr;
+
+	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+		set_pte(ptep, entry);
+		ptep++;
+
+		addr += PAGE_SIZE;
+		pte_val(entry) += PAGE_SIZE;
+	}
+
+	purge_tlb_entries_huge(mm, addr_start);
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t entry)
+{
+	unsigned long flags;
+
+	purge_tlb_start(flags);
+	__set_huge_pte_at(mm, addr, ptep, entry);
+	purge_tlb_end(flags);
+}
+
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep)
+{
+	unsigned long flags;
+	pte_t entry;
+
+	purge_tlb_start(flags);
+	entry = *ptep;
+	__set_huge_pte_at(mm, addr, ptep, __pte(0));
+	purge_tlb_end(flags);
+
+	return entry;
+}
+
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+				unsigned long addr, pte_t *ptep)
+{
+	unsigned long flags;
+	pte_t old_pte;
+
+	purge_tlb_start(flags);
+	old_pte = *ptep;
+	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+	purge_tlb_end(flags);
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+				unsigned long addr, pte_t *ptep,
+				pte_t pte, int dirty)
+{
+	unsigned long flags;
+	int changed;
+
+	purge_tlb_start(flags);
+	changed = !pte_same(*ptep, pte);
+	if (changed) {
+		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+	}
+	purge_tlb_end(flags);
+	return changed;
+}
+
+
+int pmd_huge(pmd_t pmd)
+{
+	return 0;
+}
+
+int pud_huge(pud_t pud)
+{
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/arch/parisc/mm/init.c b/src/kernel/linux/v4.14/arch/parisc/mm/init.c
new file mode 100644
index 0000000..8be075f
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/mm/init.c
@@ -0,0 +1,932 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/parisc/mm/init.c
+ *
+ *  Copyright (C) 1995	Linus Torvalds
+ *  Copyright 1999 SuSE GmbH
+ *    changed by Philipp Rumpf
+ *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ *  Copyright 2004 Randolph Chung (tausq@debian.org)
+ *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>		/* for hppa_dma_ops and pcxl_dma_ops */
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/unistd.h>
+#include <linux/nodemask.h>	/* for node_online_map */
+#include <linux/pagemap.h>	/* for release_pages */
+#include <linux/compat.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/pdc_chassis.h>
+#include <asm/mmzone.h>
+#include <asm/sections.h>
+#include <asm/msgbuf.h>
+
+extern int  data_start;
+extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
+
+#if CONFIG_PGTABLE_LEVELS == 3
+/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
+ * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
+ * guarantee that global objects will be laid out in memory in the same order
+ * as the order of declaration, so put these in different sections and use
+ * the linker script to order them. */
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
+#endif
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
+
+#ifdef CONFIG_DISCONTIGMEM
+struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+#endif
+
+static struct resource data_resource = {
+	.name	= "Kernel data",
+	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+};
+
+static struct resource code_resource = {
+	.name	= "Kernel code",
+	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+};
+
+static struct resource pdcdata_resource = {
+	.name	= "PDC data (Page Zero)",
+	.start	= 0,
+	.end	= 0x9ff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
+
+/* The following array is initialized from the firmware specific
+ * information retrieved in kernel/inventory.c.
+ */
+
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
+int npmem_ranges __read_mostly;
+
+/*
+ * get_memblock() allocates pages via memblock.
+ * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
+ * doesn't allocate from bottom to top which is needed because we only created
+ * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
+ */
+static void * __init get_memblock(unsigned long size)
+{
+	static phys_addr_t search_addr __initdata;
+	phys_addr_t phys;
+
+	if (!search_addr)
+		search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
+	search_addr = ALIGN(search_addr, size);
+	while (!memblock_is_region_memory(search_addr, size) ||
+		memblock_is_region_reserved(search_addr, size)) {
+		search_addr += size;
+	}
+	phys = search_addr;
+
+	if (phys)
+		memblock_reserve(phys, size);
+	else
+		panic("get_memblock() failed.\n");
+
+	memset(__va(phys), 0, size);
+
+	return __va(phys);
+}
+
+#ifdef CONFIG_64BIT
+#define MAX_MEM         (~0UL)
+#else /* !CONFIG_64BIT */
+#define MAX_MEM         (3584U*1024U*1024U)
+#endif /* !CONFIG_64BIT */
+
+static unsigned long mem_limit __read_mostly = MAX_MEM;
+
+static void __init mem_limit_func(void)
+{
+	char *cp, *end;
+	unsigned long limit;
+
+	/* We need this before __setup() functions are called */
+
+	limit = MAX_MEM;
+	for (cp = boot_command_line; *cp; ) {
+		if (memcmp(cp, "mem=", 4) == 0) {
+			cp += 4;
+			limit = memparse(cp, &end);
+			if (end != cp)
+				break;
+			cp = end;
+		} else {
+			while (*cp != ' ' && *cp)
+				++cp;
+			while (*cp == ' ')
+				++cp;
+		}
+	}
+
+	if (limit < mem_limit)
+		mem_limit = limit;
+}
+
+#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
+
+static void __init setup_bootmem(void)
+{
+	unsigned long mem_max;
+#ifndef CONFIG_DISCONTIGMEM
+	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
+	int npmem_holes;
+#endif
+	int i, sysram_resource_count;
+
+	disable_sr_hashing(); /* Turn off space register hashing */
+
+	/*
+	 * Sort the ranges. Since the number of ranges is typically
+	 * small, and performance is not an issue here, just do
+	 * a simple insertion sort.
+	 */
+
+	for (i = 1; i < npmem_ranges; i++) {
+		int j;
+
+		for (j = i; j > 0; j--) {
+			unsigned long tmp;
+
+			if (pmem_ranges[j-1].start_pfn <
+			    pmem_ranges[j].start_pfn) {
+
+				break;
+			}
+			tmp = pmem_ranges[j-1].start_pfn;
+			pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
+			pmem_ranges[j].start_pfn = tmp;
+			tmp = pmem_ranges[j-1].pages;
+			pmem_ranges[j-1].pages = pmem_ranges[j].pages;
+			pmem_ranges[j].pages = tmp;
+		}
+	}
+
+#ifndef CONFIG_DISCONTIGMEM
+	/*
+	 * Throw out ranges that are too far apart (controlled by
+	 * MAX_GAP).
+	 */
+
+	for (i = 1; i < npmem_ranges; i++) {
+		if (pmem_ranges[i].start_pfn -
+			(pmem_ranges[i-1].start_pfn +
+			 pmem_ranges[i-1].pages) > MAX_GAP) {
+			npmem_ranges = i;
+			printk("Large gap in memory detected (%ld pages). "
+			       "Consider turning on CONFIG_DISCONTIGMEM\n",
+			       pmem_ranges[i].start_pfn -
+			       (pmem_ranges[i-1].start_pfn +
+			        pmem_ranges[i-1].pages));
+			break;
+		}
+	}
+#endif
+
+	/* Print the memory ranges */
+	pr_info("Memory Ranges:\n");
+
+	for (i = 0; i < npmem_ranges; i++) {
+		struct resource *res = &sysram_resources[i];
+		unsigned long start;
+		unsigned long size;
+
+		size = (pmem_ranges[i].pages << PAGE_SHIFT);
+		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
+			i, start, start + (size - 1), size >> 20);
+
+		/* request memory resource */
+		res->name = "System RAM";
+		res->start = start;
+		res->end = start + size - 1;
+		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+		request_resource(&iomem_resource, res);
+	}
+
+	sysram_resource_count = npmem_ranges;
+
+	/*
+	 * For 32 bit kernels we limit the amount of memory we can
+	 * support, in order to preserve enough kernel address space
+	 * for other purposes. For 64 bit kernels we don't normally
+	 * limit the memory, but this mechanism can be used to
+	 * artificially limit the amount of memory (and it is written
+	 * to work with multiple memory ranges).
+	 */
+
+	mem_limit_func();       /* check for "mem=" argument */
+
+	mem_max = 0;
+	for (i = 0; i < npmem_ranges; i++) {
+		unsigned long rsize;
+
+		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
+		if ((mem_max + rsize) > mem_limit) {
+			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
+			if (mem_max == mem_limit)
+				npmem_ranges = i;
+			else {
+				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
+						       - (mem_max >> PAGE_SHIFT);
+				npmem_ranges = i + 1;
+				mem_max = mem_limit;
+			}
+			break;
+		}
+		mem_max += rsize;
+	}
+
+	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
+
+#ifndef CONFIG_DISCONTIGMEM
+	/* Merge the ranges, keeping track of the holes */
+
+	{
+		unsigned long end_pfn;
+		unsigned long hole_pages;
+
+		npmem_holes = 0;
+		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
+		for (i = 1; i < npmem_ranges; i++) {
+
+			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
+			if (hole_pages) {
+				pmem_holes[npmem_holes].start_pfn = end_pfn;
+				pmem_holes[npmem_holes++].pages = hole_pages;
+				end_pfn += hole_pages;
+			}
+			end_pfn += pmem_ranges[i].pages;
+		}
+
+		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
+		npmem_ranges = 1;
+	}
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+	for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
+		memset(NODE_DATA(i), 0, sizeof(pg_data_t));
+	}
+	memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
+
+	for (i = 0; i < npmem_ranges; i++) {
+		node_set_state(i, N_NORMAL_MEMORY);
+		node_set_online(i);
+	}
+#endif
+
+	/*
+	 * Initialize and free the full range of memory in each range.
+	 */
+
+	max_pfn = 0;
+	for (i = 0; i < npmem_ranges; i++) {
+		unsigned long start_pfn;
+		unsigned long npages;
+		unsigned long start;
+		unsigned long size;
+
+		start_pfn = pmem_ranges[i].start_pfn;
+		npages = pmem_ranges[i].pages;
+
+		start = start_pfn << PAGE_SHIFT;
+		size = npages << PAGE_SHIFT;
+
+		/* add system RAM memblock */
+		memblock_add(start, size);
+
+		if ((start_pfn + npages) > max_pfn)
+			max_pfn = start_pfn + npages;
+	}
+
+	/* IOMMU is always used to access "high mem" on those boxes
+	 * that can support enough mem that a PCI device couldn't
+	 * directly DMA to any physical addresses.
+	 * ISA DMA support will need to revisit this.
+	 */
+	max_low_pfn = max_pfn;
+
+	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
+
+#define PDC_CONSOLE_IO_IODC_SIZE 32768
+
+	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
+				PDC_CONSOLE_IO_IODC_SIZE));
+	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
+			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
+
+#ifndef CONFIG_DISCONTIGMEM
+
+	/* reserve the holes */
+
+	for (i = 0; i < npmem_holes; i++) {
+		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
+				(pmem_holes[i].pages << PAGE_SHIFT));
+	}
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start) {
+		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
+		if (__pa(initrd_start) < mem_max) {
+			unsigned long initrd_reserve;
+
+			if (__pa(initrd_end) > mem_max) {
+				initrd_reserve = mem_max - __pa(initrd_start);
+			} else {
+				initrd_reserve = initrd_end - initrd_start;
+			}
+			initrd_below_start_ok = 1;
+			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
+
+			memblock_reserve(__pa(initrd_start), initrd_reserve);
+		}
+	}
+#endif
+
+	data_resource.start =  virt_to_phys(&data_start);
+	data_resource.end = virt_to_phys(_end) - 1;
+	code_resource.start = virt_to_phys(_text);
+	code_resource.end = virt_to_phys(&data_start)-1;
+
+	/* We don't know which region the kernel will be in, so try
+	 * all of them.
+	 */
+	for (i = 0; i < sysram_resource_count; i++) {
+		struct resource *res = &sysram_resources[i];
+		request_resource(res, &code_resource);
+		request_resource(res, &data_resource);
+	}
+	request_resource(&sysram_resources[0], &pdcdata_resource);
+
+	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
+	pdc_pdt_init();
+}
+
+static int __init parisc_text_address(unsigned long vaddr)
+{
+	static unsigned long head_ptr __initdata;
+
+	if (!head_ptr)
+		head_ptr = PAGE_MASK & (unsigned long)
+			dereference_function_descriptor(&parisc_kernel_start);
+
+	return core_kernel_text(vaddr) || vaddr == head_ptr;
+}
+
+static void __init map_pages(unsigned long start_vaddr,
+			     unsigned long start_paddr, unsigned long size,
+			     pgprot_t pgprot, int force)
+{
+	pgd_t *pg_dir;
+	pmd_t *pmd;
+	pte_t *pg_table;
+	unsigned long end_paddr;
+	unsigned long start_pmd;
+	unsigned long start_pte;
+	unsigned long tmp1;
+	unsigned long tmp2;
+	unsigned long address;
+	unsigned long vaddr;
+	unsigned long ro_start;
+	unsigned long ro_end;
+	unsigned long kernel_end;
+
+	ro_start = __pa((unsigned long)_text);
+	ro_end   = __pa((unsigned long)&data_start);
+	kernel_end  = __pa((unsigned long)&_end);
+
+	end_paddr = start_paddr + size;
+
+	pg_dir = pgd_offset_k(start_vaddr);
+
+#if PTRS_PER_PMD == 1
+	start_pmd = 0;
+#else
+	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+#endif
+	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+
+	address = start_paddr;
+	vaddr = start_vaddr;
+	while (address < end_paddr) {
+#if PTRS_PER_PMD == 1
+		pmd = (pmd_t *)__pa(pg_dir);
+#else
+		pmd = (pmd_t *)pgd_address(*pg_dir);
+
+		/*
+		 * pmd is physical at this point
+		 */
+
+		if (!pmd) {
+			pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
+			pmd = (pmd_t *) __pa(pmd);
+		}
+
+		pgd_populate(NULL, pg_dir, __va(pmd));
+#endif
+		pg_dir++;
+
+		/* now change pmd to kernel virtual addresses */
+
+		pmd = (pmd_t *)__va(pmd) + start_pmd;
+		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
+
+			/*
+			 * pg_table is physical at this point
+			 */
+
+			pg_table = (pte_t *)pmd_address(*pmd);
+			if (!pg_table) {
+				pg_table = (pte_t *) get_memblock(PAGE_SIZE);
+				pg_table = (pte_t *) __pa(pg_table);
+			}
+
+			pmd_populate_kernel(NULL, pmd, __va(pg_table));
+
+			/* now change pg_table to kernel virtual addresses */
+
+			pg_table = (pte_t *) __va(pg_table) + start_pte;
+			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
+				pte_t pte;
+
+				if (force)
+					pte =  __mk_pte(address, pgprot);
+				else if (parisc_text_address(vaddr)) {
+					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+					if (address >= ro_start && address < kernel_end)
+						pte = pte_mkhuge(pte);
+				}
+				else
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+				if (address >= ro_start && address < ro_end) {
+					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+					pte = pte_mkhuge(pte);
+				} else
+#endif
+				{
+					pte = __mk_pte(address, pgprot);
+					if (address >= ro_start && address < kernel_end)
+						pte = pte_mkhuge(pte);
+				}
+
+				if (address >= end_paddr)
+					break;
+
+				set_pte(pg_table, pte);
+
+				address += PAGE_SIZE;
+				vaddr += PAGE_SIZE;
+			}
+			start_pte = 0;
+
+			if (address >= end_paddr)
+			    break;
+		}
+		start_pmd = 0;
+	}
+}
+
+void free_initmem(void)
+{
+	unsigned long init_begin = (unsigned long)__init_begin;
+	unsigned long init_end = (unsigned long)__init_end;
+
+	/* The init text pages are marked R-X.  We have to
+	 * flush the icache and mark them RW-
+	 *
+	 * This is tricky, because map_pages is in the init section.
+	 * Do a dummy remap of the data section first (the data
+	 * section is already PAGE_KERNEL) to pull in the TLB entries
+	 * for map_kernel */
+	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+		  PAGE_KERNEL_RWX, 1);
+	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
+	 * map_pages */
+	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+		  PAGE_KERNEL, 1);
+
+	/* force the kernel to see the new TLB entries */
+	__flush_tlb_range(0, init_begin, init_end);
+
+	/* finally dump all the instructions which were cached, since the
+	 * pages are no-longer executable */
+	flush_icache_range(init_begin, init_end);
+	
+	free_initmem_default(POISON_FREE_INITMEM);
+
+	/* set up a new led state on systems shipped LED State panel */
+	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
+}
+
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_rodata_ro(void)
+{
+	/* rodata memory was already mapped with KERNEL_RO access rights by
+           pagetable_init() and map_pages(). No need to do additional stuff here */
+	printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+		(unsigned long)(__end_rodata - __start_rodata) >> 10);
+}
+#endif
+
+
+/*
+ * Just an arbitrary offset to serve as a "hole" between mapping areas
+ * (between top of physical memory and a potential pcxl dma mapping
+ * area, and below the vmalloc mapping area).
+ *
+ * The current 32K value just means that there will be a 32K "hole"
+ * between mapping areas. That means that  any out-of-bounds memory
+ * accesses will hopefully be caught. The vmalloc() routines leaves
+ * a hole of 4kB between each vmalloced area for the same reason.
+ */
+
+ /* Leave room for gateway page expansion */
+#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
+#error KERNEL_MAP_START is in gateway reserved region
+#endif
+#define MAP_START (KERNEL_MAP_START)
+
+#define VM_MAP_OFFSET  (32*1024)
+#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
+				     & ~(VM_MAP_OFFSET-1)))
+
+void *parisc_vmalloc_start __read_mostly;
+EXPORT_SYMBOL(parisc_vmalloc_start);
+
+#ifdef CONFIG_PA11
+unsigned long pcxl_dma_start __read_mostly;
+#endif
+
+void __init mem_init(void)
+{
+	/* Do sanity checks on IPC (compat) structures */
+	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
+#ifndef CONFIG_64BIT
+	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
+	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
+	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
+#endif
+#ifdef CONFIG_COMPAT
+	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
+	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
+	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
+	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
+#endif
+
+	/* Do sanity checks on page table constants */
+	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
+	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
+	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
+	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
+			> BITS_PER_LONG);
+
+	high_memory = __va((max_pfn << PAGE_SHIFT));
+	set_max_mapnr(max_low_pfn);
+	free_all_bootmem();
+
+#ifdef CONFIG_PA11
+	if (hppa_dma_ops == &pcxl_dma_ops) {
+		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
+		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+						+ PCXL_DMA_MAP_SIZE);
+	} else {
+		pcxl_dma_start = 0;
+		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+	}
+#else
+	parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+#endif
+
+	mem_init_print_info(NULL);
+#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
+	printk("virtual kernel memory layout:\n"
+	       "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n"
+	       "    memory  : 0x%p - 0x%p   (%4ld MB)\n"
+	       "      .init : 0x%p - 0x%p   (%4ld kB)\n"
+	       "      .data : 0x%p - 0x%p   (%4ld kB)\n"
+	       "      .text : 0x%p - 0x%p   (%4ld kB)\n",
+
+	       (void*)VMALLOC_START, (void*)VMALLOC_END,
+	       (VMALLOC_END - VMALLOC_START) >> 20,
+
+	       __va(0), high_memory,
+	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+
+	       __init_begin, __init_end,
+	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
+
+	       _etext, _edata,
+	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
+
+	       _text, _etext,
+	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
+#endif
+}
+
+unsigned long *empty_zero_page __read_mostly;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * pagetable_init() sets up the page tables
+ *
+ * Note that gateway_init() places the Linux gateway page at page 0.
+ * Since gateway pages cannot be dereferenced this has the desirable
+ * side effect of trapping those pesky NULL-reference errors in the
+ * kernel.
+ */
+static void __init pagetable_init(void)
+{
+	int range;
+
+	/* Map each physical memory range to its kernel vaddr */
+
+	for (range = 0; range < npmem_ranges; range++) {
+		unsigned long start_paddr;
+		unsigned long end_paddr;
+		unsigned long size;
+
+		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
+		size = pmem_ranges[range].pages << PAGE_SHIFT;
+		end_paddr = start_paddr + size;
+
+		map_pages((unsigned long)__va(start_paddr), start_paddr,
+			  size, PAGE_KERNEL, 0);
+	}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_end && initrd_end > mem_limit) {
+		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
+		map_pages(initrd_start, __pa(initrd_start),
+			  initrd_end - initrd_start, PAGE_KERNEL, 0);
+	}
+#endif
+
+	empty_zero_page = get_memblock(PAGE_SIZE);
+}
+
+static void __init gateway_init(void)
+{
+	unsigned long linux_gateway_page_addr;
+	/* FIXME: This is 'const' in order to trick the compiler
+	   into not treating it as DP-relative data. */
+	extern void * const linux_gateway_page;
+
+	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
+
+	/*
+	 * Setup Linux Gateway page.
+	 *
+	 * The Linux gateway page will reside in kernel space (on virtual
+	 * page 0), so it doesn't need to be aliased into user space.
+	 */
+
+	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
+		  PAGE_SIZE, PAGE_GATEWAY, 1);
+}
+
+void __init paging_init(void)
+{
+	int i;
+
+	setup_bootmem();
+	pagetable_init();
+	gateway_init();
+	flush_cache_all_local(); /* start with known state */
+	flush_tlb_all_local(NULL);
+
+	for (i = 0; i < npmem_ranges; i++) {
+		unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+
+		zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
+
+#ifdef CONFIG_DISCONTIGMEM
+		/* Need to initialize the pfnnid_map before we can initialize
+		   the zone */
+		{
+		    int j;
+		    for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
+			 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
+			 j++) {
+			pfnnid_map[j] = i;
+		    }
+		}
+#endif
+
+		free_area_init_node(i, zones_size,
+				pmem_ranges[i].start_pfn, NULL);
+	}
+}
+
+#ifdef CONFIG_PA20
+
+/*
+ * Currently, all PA20 chips have 18 bit protection IDs, which is the
+ * limiting factor (space ids are 32 bits).
+ */
+
+#define NR_SPACE_IDS 262144
+
+#else
+
+/*
+ * Currently we have a one-to-one relationship between space IDs and
+ * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
+ * support 15 bit protection IDs, so that is the limiting factor.
+ * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
+ * probably not worth the effort for a special case here.
+ */
+
+#define NR_SPACE_IDS 32768
+
+#endif  /* !CONFIG_PA20 */
+
+#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
+#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
+
+static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
+static unsigned long dirty_space_id[SID_ARRAY_SIZE];
+static unsigned long space_id_index;
+static unsigned long free_space_ids = NR_SPACE_IDS - 1;
+static unsigned long dirty_space_ids = 0;
+
+static DEFINE_SPINLOCK(sid_lock);
+
+unsigned long alloc_sid(void)
+{
+	unsigned long index;
+
+	spin_lock(&sid_lock);
+
+	if (free_space_ids == 0) {
+		if (dirty_space_ids != 0) {
+			spin_unlock(&sid_lock);
+			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
+			spin_lock(&sid_lock);
+		}
+		BUG_ON(free_space_ids == 0);
+	}
+
+	free_space_ids--;
+
+	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
+	space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
+	space_id_index = index;
+
+	spin_unlock(&sid_lock);
+
+	return index << SPACEID_SHIFT;
+}
+
+void free_sid(unsigned long spaceid)
+{
+	unsigned long index = spaceid >> SPACEID_SHIFT;
+	unsigned long *dirty_space_offset;
+
+	dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
+	index &= (BITS_PER_LONG - 1);
+
+	spin_lock(&sid_lock);
+
+	BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
+
+	*dirty_space_offset |= (1L << index);
+	dirty_space_ids++;
+
+	spin_unlock(&sid_lock);
+}
+
+
+#ifdef CONFIG_SMP
+static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
+{
+	int i;
+
+	/* NOTE: sid_lock must be held upon entry */
+
+	*ndirtyptr = dirty_space_ids;
+	if (dirty_space_ids != 0) {
+	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
+		dirty_array[i] = dirty_space_id[i];
+		dirty_space_id[i] = 0;
+	    }
+	    dirty_space_ids = 0;
+	}
+
+	return;
+}
+
+static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
+{
+	int i;
+
+	/* NOTE: sid_lock must be held upon entry */
+
+	if (ndirty != 0) {
+		for (i = 0; i < SID_ARRAY_SIZE; i++) {
+			space_id[i] ^= dirty_array[i];
+		}
+
+		free_space_ids += ndirty;
+		space_id_index = 0;
+	}
+}
+
+#else /* CONFIG_SMP */
+
+static void recycle_sids(void)
+{
+	int i;
+
+	/* NOTE: sid_lock must be held upon entry */
+
+	if (dirty_space_ids != 0) {
+		for (i = 0; i < SID_ARRAY_SIZE; i++) {
+			space_id[i] ^= dirty_space_id[i];
+			dirty_space_id[i] = 0;
+		}
+
+		free_space_ids += dirty_space_ids;
+		dirty_space_ids = 0;
+		space_id_index = 0;
+	}
+}
+#endif
+
+/*
+ * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
+ * purged, we can safely reuse the space ids that were released but
+ * not flushed from the tlb.
+ */
+
+#ifdef CONFIG_SMP
+
+static unsigned long recycle_ndirty;
+static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
+static unsigned int recycle_inuse;
+
+void flush_tlb_all(void)
+{
+	int do_recycle;
+
+	__inc_irq_stat(irq_tlb_count);
+	do_recycle = 0;
+	spin_lock(&sid_lock);
+	if (dirty_space_ids > RECYCLE_THRESHOLD) {
+	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
+	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
+	    recycle_inuse++;
+	    do_recycle++;
+	}
+	spin_unlock(&sid_lock);
+	on_each_cpu(flush_tlb_all_local, NULL, 1);
+	if (do_recycle) {
+	    spin_lock(&sid_lock);
+	    recycle_sids(recycle_ndirty,recycle_dirty_array);
+	    recycle_inuse = 0;
+	    spin_unlock(&sid_lock);
+	}
+}
+#else
+void flush_tlb_all(void)
+{
+	__inc_irq_stat(irq_tlb_count);
+	spin_lock(&sid_lock);
+	flush_tlb_all_local(NULL);
+	recycle_sids();
+	spin_unlock(&sid_lock);
+}
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
diff --git a/src/kernel/linux/v4.14/arch/parisc/mm/ioremap.c b/src/kernel/linux/v4.14/arch/parisc/mm/ioremap.c
new file mode 100644
index 0000000..f29f682
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/mm/ioremap.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/parisc/mm/ioremap.c
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+	void __iomem *addr;
+	struct vm_struct *area;
+	unsigned long offset, last_addr;
+	pgprot_t pgprot;
+
+#ifdef CONFIG_EISA
+	unsigned long end = phys_addr + size - 1;
+	/* Support EISA addresses */
+	if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
+	    (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+		phys_addr |= F_EXTEND(0xfc000000);
+		flags |= _PAGE_NO_CACHE;
+	}
+#endif
+
+	/* Don't allow wraparound or zero size */
+	last_addr = phys_addr + size - 1;
+	if (!size || last_addr < phys_addr)
+		return NULL;
+
+	/*
+	 * Don't allow anybody to remap normal RAM that we're using..
+	 */
+	if (phys_addr < virt_to_phys(high_memory)) {
+		char *t_addr, *t_end;
+		struct page *page;
+
+		t_addr = __va(phys_addr);
+		t_end = t_addr + (size - 1);
+	   
+		for (page = virt_to_page(t_addr); 
+		     page <= virt_to_page(t_end); page++) {
+			if(!PageReserved(page))
+				return NULL;
+		}
+	}
+
+	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
+			  _PAGE_ACCESSED | flags);
+
+	/*
+	 * Mappings have to be page-aligned
+	 */
+	offset = phys_addr & ~PAGE_MASK;
+	phys_addr &= PAGE_MASK;
+	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+	/*
+	 * Ok, go for it..
+	 */
+	area = get_vm_area(size, VM_IOREMAP);
+	if (!area)
+		return NULL;
+
+	addr = (void __iomem *) area->addr;
+	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+			       phys_addr, pgprot)) {
+		vunmap(addr);
+		return NULL;
+	}
+
+	return (void __iomem *) (offset + (char __iomem *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+void iounmap(const volatile void __iomem *io_addr)
+{
+	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+	if (is_vmalloc_addr((void *)addr))
+		vunmap((void *)addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/src/kernel/linux/v4.14/arch/parisc/nm b/src/kernel/linux/v4.14/arch/parisc/nm
new file mode 100644
index 0000000..c788308
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/nm
@@ -0,0 +1,6 @@
+#!/bin/sh
+##
+# Hack to have an nm which removes the local symbols.  We also rely
+# on this nm being hidden out of the ordinarily executable path
+##
+${CROSS_COMPILE}nm $* | grep -v '.LC*[0-9]*$'
diff --git a/src/kernel/linux/v4.14/arch/parisc/oprofile/Makefile b/src/kernel/linux/v4.14/arch/parisc/oprofile/Makefile
new file mode 100644
index 0000000..86a1ccc
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/oprofile/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+		oprof.o cpu_buffer.o buffer_sync.o \
+		event_buffer.o oprofile_files.o \
+		oprofilefs.o oprofile_stats.o \
+		timer_int.o )
+
+oprofile-y				:= $(DRIVER_OBJS) init.o
diff --git a/src/kernel/linux/v4.14/arch/parisc/oprofile/init.c b/src/kernel/linux/v4.14/arch/parisc/oprofile/init.c
new file mode 100644
index 0000000..026cba2
--- /dev/null
+++ b/src/kernel/linux/v4.14/arch/parisc/oprofile/init.c
@@ -0,0 +1,23 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+	return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}