[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_common_tables.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_common_tables.h
new file mode 100644
index 0000000..911ecd4
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_common_tables.h
@@ -0,0 +1,136 @@
+/* ----------------------------------------------------------------------

+* Copyright (C) 2010-2014 ARM Limited. All rights reserved.

+*

+* $Date:        19. March 2015

+* $Revision: 	V.1.4.5

+*

+* Project: 	    CMSIS DSP Library

+* Title:	    arm_common_tables.h

+*

+* Description:	This file has extern declaration for common tables like Bitreverse, reciprocal etc which are used across different functions

+*

+* Target Processor: Cortex-M4/Cortex-M3

+*

+* Redistribution and use in source and binary forms, with or without

+* modification, are permitted provided that the following conditions

+* are met:

+*   - Redistributions of source code must retain the above copyright

+*     notice, this list of conditions and the following disclaimer.

+*   - Redistributions in binary form must reproduce the above copyright

+*     notice, this list of conditions and the following disclaimer in

+*     the documentation and/or other materials provided with the

+*     distribution.

+*   - Neither the name of ARM LIMITED nor the names of its contributors

+*     may be used to endorse or promote products derived from this

+*     software without specific prior written permission.

+*

+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS

+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE

+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,

+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,

+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER

+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT

+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN

+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+* POSSIBILITY OF SUCH DAMAGE.

+* -------------------------------------------------------------------- */

+

+#ifndef _ARM_COMMON_TABLES_H

+#define _ARM_COMMON_TABLES_H

+

+#include "arm_math.h"

+

+extern const uint16_t armBitRevTable[1024];

+extern const q15_t armRecipTableQ15[64];

+extern const q31_t armRecipTableQ31[64];

+//extern const q31_t realCoefAQ31[1024];

+//extern const q31_t realCoefBQ31[1024];

+extern const float32_t twiddleCoef_16[32];

+extern const float32_t twiddleCoef_32[64];

+extern const float32_t twiddleCoef_64[128];

+extern const float32_t twiddleCoef_128[256];

+extern const float32_t twiddleCoef_256[512];

+extern const float32_t twiddleCoef_512[1024];

+extern const float32_t twiddleCoef_1024[2048];

+extern const float32_t twiddleCoef_2048[4096];

+extern const float32_t twiddleCoef_4096[8192];

+#define twiddleCoef twiddleCoef_4096

+extern const q31_t twiddleCoef_16_q31[24];

+extern const q31_t twiddleCoef_32_q31[48];

+extern const q31_t twiddleCoef_64_q31[96];

+extern const q31_t twiddleCoef_128_q31[192];

+extern const q31_t twiddleCoef_256_q31[384];

+extern const q31_t twiddleCoef_512_q31[768];

+extern const q31_t twiddleCoef_1024_q31[1536];

+extern const q31_t twiddleCoef_2048_q31[3072];

+extern const q31_t twiddleCoef_4096_q31[6144];

+extern const q15_t twiddleCoef_16_q15[24];

+extern const q15_t twiddleCoef_32_q15[48];

+extern const q15_t twiddleCoef_64_q15[96];

+extern const q15_t twiddleCoef_128_q15[192];

+extern const q15_t twiddleCoef_256_q15[384];

+extern const q15_t twiddleCoef_512_q15[768];

+extern const q15_t twiddleCoef_1024_q15[1536];

+extern const q15_t twiddleCoef_2048_q15[3072];

+extern const q15_t twiddleCoef_4096_q15[6144];

+extern const float32_t twiddleCoef_rfft_32[32];

+extern const float32_t twiddleCoef_rfft_64[64];

+extern const float32_t twiddleCoef_rfft_128[128];

+extern const float32_t twiddleCoef_rfft_256[256];

+extern const float32_t twiddleCoef_rfft_512[512];

+extern const float32_t twiddleCoef_rfft_1024[1024];

+extern const float32_t twiddleCoef_rfft_2048[2048];

+extern const float32_t twiddleCoef_rfft_4096[4096];

+

+

+/* floating-point bit reversal tables */

+#define ARMBITREVINDEXTABLE__16_TABLE_LENGTH ((uint16_t)20  )

+#define ARMBITREVINDEXTABLE__32_TABLE_LENGTH ((uint16_t)48  )

+#define ARMBITREVINDEXTABLE__64_TABLE_LENGTH ((uint16_t)56  )

+#define ARMBITREVINDEXTABLE_128_TABLE_LENGTH ((uint16_t)208 )

+#define ARMBITREVINDEXTABLE_256_TABLE_LENGTH ((uint16_t)440 )

+#define ARMBITREVINDEXTABLE_512_TABLE_LENGTH ((uint16_t)448 )

+#define ARMBITREVINDEXTABLE1024_TABLE_LENGTH ((uint16_t)1800)

+#define ARMBITREVINDEXTABLE2048_TABLE_LENGTH ((uint16_t)3808)

+#define ARMBITREVINDEXTABLE4096_TABLE_LENGTH ((uint16_t)4032)

+

+extern const uint16_t armBitRevIndexTable16[ARMBITREVINDEXTABLE__16_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable32[ARMBITREVINDEXTABLE__32_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable64[ARMBITREVINDEXTABLE__64_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable128[ARMBITREVINDEXTABLE_128_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable256[ARMBITREVINDEXTABLE_256_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable512[ARMBITREVINDEXTABLE_512_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable1024[ARMBITREVINDEXTABLE1024_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable2048[ARMBITREVINDEXTABLE2048_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable4096[ARMBITREVINDEXTABLE4096_TABLE_LENGTH];

+

+/* fixed-point bit reversal tables */

+#define ARMBITREVINDEXTABLE_FIXED___16_TABLE_LENGTH ((uint16_t)12  )

+#define ARMBITREVINDEXTABLE_FIXED___32_TABLE_LENGTH ((uint16_t)24  )

+#define ARMBITREVINDEXTABLE_FIXED___64_TABLE_LENGTH ((uint16_t)56  )

+#define ARMBITREVINDEXTABLE_FIXED__128_TABLE_LENGTH ((uint16_t)112 )

+#define ARMBITREVINDEXTABLE_FIXED__256_TABLE_LENGTH ((uint16_t)240 )

+#define ARMBITREVINDEXTABLE_FIXED__512_TABLE_LENGTH ((uint16_t)480 )

+#define ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH ((uint16_t)992 )

+#define ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH ((uint16_t)1984)

+#define ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH ((uint16_t)4032)

+

+extern const uint16_t armBitRevIndexTable_fixed_16[ARMBITREVINDEXTABLE_FIXED___16_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_32[ARMBITREVINDEXTABLE_FIXED___32_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_64[ARMBITREVINDEXTABLE_FIXED___64_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_128[ARMBITREVINDEXTABLE_FIXED__128_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_256[ARMBITREVINDEXTABLE_FIXED__256_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_512[ARMBITREVINDEXTABLE_FIXED__512_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_1024[ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_2048[ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH];

+extern const uint16_t armBitRevIndexTable_fixed_4096[ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH];

+

+/* Tables for Fast Math Sine and Cosine */

+extern const float32_t sinTable_f32[FAST_MATH_TABLE_SIZE + 1];

+extern const q31_t sinTable_q31[FAST_MATH_TABLE_SIZE + 1];

+extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1];

+

+#endif /*  ARM_COMMON_TABLES_H */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_const_structs.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_const_structs.h
new file mode 100644
index 0000000..54595f5
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_const_structs.h
@@ -0,0 +1,79 @@
+/* ----------------------------------------------------------------------

+* Copyright (C) 2010-2014 ARM Limited. All rights reserved.

+*

+* $Date:        19. March 2015

+* $Revision: 	V.1.4.5

+*

+* Project: 	    CMSIS DSP Library

+* Title:	    arm_const_structs.h

+*

+* Description:	This file has constant structs that are initialized for

+*              user convenience.  For example, some can be given as

+*              arguments to the arm_cfft_f32() function.

+*

+* Target Processor: Cortex-M4/Cortex-M3

+*

+* Redistribution and use in source and binary forms, with or without

+* modification, are permitted provided that the following conditions

+* are met:

+*   - Redistributions of source code must retain the above copyright

+*     notice, this list of conditions and the following disclaimer.

+*   - Redistributions in binary form must reproduce the above copyright

+*     notice, this list of conditions and the following disclaimer in

+*     the documentation and/or other materials provided with the

+*     distribution.

+*   - Neither the name of ARM LIMITED nor the names of its contributors

+*     may be used to endorse or promote products derived from this

+*     software without specific prior written permission.

+*

+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS

+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE

+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,

+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,

+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER

+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT

+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN

+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+* POSSIBILITY OF SUCH DAMAGE.

+* -------------------------------------------------------------------- */

+

+#ifndef _ARM_CONST_STRUCTS_H

+#define _ARM_CONST_STRUCTS_H

+

+#include "arm_math.h"

+#include "arm_common_tables.h"

+

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len16;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len32;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len64;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len128;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len256;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len512;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len1024;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len2048;

+   extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len4096;

+

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len16;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len32;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len64;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len128;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len256;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len512;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len1024;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len2048;

+   extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len4096;

+

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len16;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len32;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len64;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len128;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len256;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len512;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len1024;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len2048;

+   extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len4096;

+

+#endif

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_math.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_math.h
new file mode 100644
index 0000000..6dd430d
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/arm_math.h
@@ -0,0 +1,7556 @@
+/* ----------------------------------------------------------------------

+* Copyright (C) 2010-2015 ARM Limited. All rights reserved.

+*

+* $Date:        19. March 2015

+* $Revision: 	V.1.4.5

+*

+* Project: 	    CMSIS DSP Library

+* Title:	    arm_math.h

+*

+* Description:	Public header file for CMSIS DSP Library

+*

+* Target Processor: Cortex-M7/Cortex-M4/Cortex-M3/Cortex-M0

+*

+* Redistribution and use in source and binary forms, with or without

+* modification, are permitted provided that the following conditions

+* are met:

+*   - Redistributions of source code must retain the above copyright

+*     notice, this list of conditions and the following disclaimer.

+*   - Redistributions in binary form must reproduce the above copyright

+*     notice, this list of conditions and the following disclaimer in

+*     the documentation and/or other materials provided with the

+*     distribution.

+*   - Neither the name of ARM LIMITED nor the names of its contributors

+*     may be used to endorse or promote products derived from this

+*     software without specific prior written permission.

+*

+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS

+* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE

+* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,

+* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,

+* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;

+* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER

+* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT

+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN

+* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+* POSSIBILITY OF SUCH DAMAGE.

+ * -------------------------------------------------------------------- */

+

+/**

+   \mainpage CMSIS DSP Software Library

+   *

+   * Introduction

+   * ------------

+   *

+   * This user manual describes the CMSIS DSP software library,

+   * a suite of common signal processing functions for use on Cortex-M processor based devices.

+   *

+   * The library is divided into a number of functions each covering a specific category:

+   * - Basic math functions

+   * - Fast math functions

+   * - Complex math functions

+   * - Filters

+   * - Matrix functions

+   * - Transforms

+   * - Motor control functions

+   * - Statistical functions

+   * - Support functions

+   * - Interpolation functions

+   *

+   * The library has separate functions for operating on 8-bit integers, 16-bit integers,

+   * 32-bit integer and 32-bit floating-point values.

+   *

+   * Using the Library

+   * ------------

+   *

+   * The library installer contains prebuilt versions of the libraries in the <code>Lib</code> folder.

+   * - arm_cortexM7lfdp_math.lib (Little endian and Double Precision Floating Point Unit on Cortex-M7)

+   * - arm_cortexM7bfdp_math.lib (Big endian and Double Precision Floating Point Unit on Cortex-M7)

+   * - arm_cortexM7lfsp_math.lib (Little endian and Single Precision Floating Point Unit on Cortex-M7)

+   * - arm_cortexM7bfsp_math.lib (Big endian and Single Precision Floating Point Unit on Cortex-M7)

+   * - arm_cortexM7l_math.lib (Little endian on Cortex-M7)

+   * - arm_cortexM7b_math.lib (Big endian on Cortex-M7)

+   * - arm_cortexM4lf_math.lib (Little endian and Floating Point Unit on Cortex-M4)

+   * - arm_cortexM4bf_math.lib (Big endian and Floating Point Unit on Cortex-M4)

+   * - arm_cortexM4l_math.lib (Little endian on Cortex-M4)

+   * - arm_cortexM4b_math.lib (Big endian on Cortex-M4)

+   * - arm_cortexM3l_math.lib (Little endian on Cortex-M3)

+   * - arm_cortexM3b_math.lib (Big endian on Cortex-M3)

+   * - arm_cortexM0l_math.lib (Little endian on Cortex-M0 / CortexM0+)

+   * - arm_cortexM0b_math.lib (Big endian on Cortex-M0 / CortexM0+)

+   *

+   * The library functions are declared in the public file <code>arm_math.h</code> which is placed in the <code>Include</code> folder.

+   * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single

+   * public header file <code> arm_math.h</code> for Cortex-M7/M4/M3/M0/M0+ with little endian and big endian. Same header file will be used for floating point unit(FPU) variants.

+   * Define the appropriate pre processor MACRO ARM_MATH_CM7 or ARM_MATH_CM4 or  ARM_MATH_CM3 or

+   * ARM_MATH_CM0 or ARM_MATH_CM0PLUS depending on the target processor in the application.

+   *

+   * Examples

+   * --------

+   *

+   * The library ships with a number of examples which demonstrate how to use the library functions.

+   *

+   * Toolchain Support

+   * ------------

+   *

+   * The library has been developed and tested with MDK-ARM version 5.14.0.0

+   * The library is being tested in GCC and IAR toolchains and updates on this activity will be made available shortly.

+   *

+   * Building the Library

+   * ------------

+   *

+   * The library installer contains a project file to re build libraries on MDK-ARM Tool chain in the <code>CMSIS\\DSP_Lib\\Source\\ARM</code> folder.

+   * - arm_cortexM_math.uvprojx

+   *

+   *

+   * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional pre processor MACROs detailed above.

+   *

+   * Pre-processor Macros

+   * ------------

+   *

+   * Each library project have differant pre-processor macros.

+   *

+   * - UNALIGNED_SUPPORT_DISABLE:

+   *

+   * Define macro UNALIGNED_SUPPORT_DISABLE, If the silicon does not support unaligned memory access

+   *

+   * - ARM_MATH_BIG_ENDIAN:

+   *

+   * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets.

+   *

+   * - ARM_MATH_MATRIX_CHECK:

+   *

+   * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices

+   *

+   * - ARM_MATH_ROUNDING:

+   *

+   * Define macro ARM_MATH_ROUNDING for rounding on support functions

+   *

+   * - ARM_MATH_CMx:

+   *

+   * Define macro ARM_MATH_CM4 for building the library on Cortex-M4 target, ARM_MATH_CM3 for building library on Cortex-M3 target

+   * and ARM_MATH_CM0 for building library on Cortex-M0 target, ARM_MATH_CM0PLUS for building library on Cortex-M0+ target, and

+   * ARM_MATH_CM7 for building the library on cortex-M7.

+   *

+   * - __FPU_PRESENT:

+   *

+   * Initialize macro __FPU_PRESENT = 1 when building on FPU supported Targets. Enable this macro for M4bf and M4lf libraries

+   *

+   * <hr>

+   * CMSIS-DSP in ARM::CMSIS Pack

+   * -----------------------------

+   * 

+   * The following files relevant to CMSIS-DSP are present in the <b>ARM::CMSIS</b> Pack directories:

+   * |File/Folder                   |Content                                                                 |

+   * |------------------------------|------------------------------------------------------------------------|

+   * |\b CMSIS\\Documentation\\DSP  | This documentation                                                     |

+   * |\b CMSIS\\DSP_Lib             | Software license agreement (license.txt)                               |

+   * |\b CMSIS\\DSP_Lib\\Examples   | Example projects demonstrating the usage of the library functions      |

+   * |\b CMSIS\\DSP_Lib\\Source     | Source files for rebuilding the library                                |

+   * 

+   * <hr>

+   * Revision History of CMSIS-DSP

+   * ------------

+   * Please refer to \ref ChangeLog_pg.

+   *

+   * Copyright Notice

+   * ------------

+   *

+   * Copyright (C) 2010-2015 ARM Limited. All rights reserved.

+   */

+

+

+/**

+ * @defgroup groupMath Basic Math Functions

+ */

+

+/**

+ * @defgroup groupFastMath Fast Math Functions

+ * This set of functions provides a fast approximation to sine, cosine, and square root.

+ * As compared to most of the other functions in the CMSIS math library, the fast math functions

+ * operate on individual values and not arrays.

+ * There are separate functions for Q15, Q31, and floating-point data.

+ *

+ */

+

+/**

+ * @defgroup groupCmplxMath Complex Math Functions

+ * This set of functions operates on complex data vectors.

+ * The data in the complex arrays is stored in an interleaved fashion

+ * (real, imag, real, imag, ...).

+ * In the API functions, the number of samples in a complex array refers

+ * to the number of complex values; the array contains twice this number of

+ * real values.

+ */

+

+/**

+ * @defgroup groupFilters Filtering Functions

+ */

+

+/**

+ * @defgroup groupMatrix Matrix Functions

+ *

+ * This set of functions provides basic matrix math operations.

+ * The functions operate on matrix data structures.  For example,

+ * the type

+ * definition for the floating-point matrix structure is shown

+ * below:

+ * <pre>

+ *     typedef struct

+ *     {

+ *       uint16_t numRows;     // number of rows of the matrix.

+ *       uint16_t numCols;     // number of columns of the matrix.

+ *       float32_t *pData;     // points to the data of the matrix.

+ *     } arm_matrix_instance_f32;

+ * </pre>

+ * There are similar definitions for Q15 and Q31 data types.

+ *

+ * The structure specifies the size of the matrix and then points to

+ * an array of data.  The array is of size <code>numRows X numCols</code>

+ * and the values are arranged in row order.  That is, the

+ * matrix element (i, j) is stored at:

+ * <pre>

+ *     pData[i*numCols + j]

+ * </pre>

+ *

+ * \par Init Functions

+ * There is an associated initialization function for each type of matrix

+ * data structure.

+ * The initialization function sets the values of the internal structure fields.

+ * Refer to the function <code>arm_mat_init_f32()</code>, <code>arm_mat_init_q31()</code>

+ * and <code>arm_mat_init_q15()</code> for floating-point, Q31 and Q15 types,  respectively.

+ *

+ * \par

+ * Use of the initialization function is optional. However, if initialization function is used

+ * then the instance structure cannot be placed into a const data section.

+ * To place the instance structure in a const data

+ * section, manually initialize the data structure.  For example:

+ * <pre>

+ * <code>arm_matrix_instance_f32 S = {nRows, nColumns, pData};</code>

+ * <code>arm_matrix_instance_q31 S = {nRows, nColumns, pData};</code>

+ * <code>arm_matrix_instance_q15 S = {nRows, nColumns, pData};</code>

+ * </pre>

+ * where <code>nRows</code> specifies the number of rows, <code>nColumns</code>

+ * specifies the number of columns, and <code>pData</code> points to the

+ * data array.

+ *

+ * \par Size Checking

+ * By default all of the matrix functions perform size checking on the input and

+ * output matrices.  For example, the matrix addition function verifies that the

+ * two input matrices and the output matrix all have the same number of rows and

+ * columns.  If the size check fails the functions return:

+ * <pre>

+ *     ARM_MATH_SIZE_MISMATCH

+ * </pre>

+ * Otherwise the functions return

+ * <pre>

+ *     ARM_MATH_SUCCESS

+ * </pre>

+ * There is some overhead associated with this matrix size checking.

+ * The matrix size checking is enabled via the \#define

+ * <pre>

+ *     ARM_MATH_MATRIX_CHECK

+ * </pre>

+ * within the library project settings.  By default this macro is defined

+ * and size checking is enabled.  By changing the project settings and

+ * undefining this macro size checking is eliminated and the functions

+ * run a bit faster.  With size checking disabled the functions always

+ * return <code>ARM_MATH_SUCCESS</code>.

+ */

+

+/**

+ * @defgroup groupTransforms Transform Functions

+ */

+

+/**

+ * @defgroup groupController Controller Functions

+ */

+

+/**

+ * @defgroup groupStats Statistics Functions

+ */

+/**

+ * @defgroup groupSupport Support Functions

+ */

+

+/**

+ * @defgroup groupInterpolation Interpolation Functions

+ * These functions perform 1- and 2-dimensional interpolation of data.

+ * Linear interpolation is used for 1-dimensional data and

+ * bilinear interpolation is used for 2-dimensional data.

+ */

+

+/**

+ * @defgroup groupExamples Examples

+ */

+#ifndef _ARM_MATH_H

+#define _ARM_MATH_H

+

+#define __CMSIS_GENERIC         /* disable NVIC and Systick functions */

+

+#if defined(ARM_MATH_CM7)

+  #include "core_cm7.h"

+#elif defined (ARM_MATH_CM4)

+  #include "core_cm4.h"

+#elif defined (ARM_MATH_CM3)

+  #include "core_cm3.h"

+#elif defined (ARM_MATH_CM0)

+  #include "core_cm0.h"

+#define ARM_MATH_CM0_FAMILY

+  #elif defined (ARM_MATH_CM0PLUS)

+#include "core_cm0plus.h"

+  #define ARM_MATH_CM0_FAMILY

+#else

+  #error "Define according the used Cortex core ARM_MATH_CM7, ARM_MATH_CM4, ARM_MATH_CM3, ARM_MATH_CM0PLUS or ARM_MATH_CM0"

+#endif

+

+#undef  __CMSIS_GENERIC         /* enable NVIC and Systick functions */

+#include "string.h"

+#include "math.h"

+#ifdef	__cplusplus

+extern "C"

+{

+#endif

+

+

+  /**

+   * @brief Macros required for reciprocal calculation in Normalized LMS

+   */

+

+#define DELTA_Q31 			(0x100)

+#define DELTA_Q15 			0x5

+#define INDEX_MASK 			0x0000003F

+#ifndef PI

+#define PI					3.14159265358979f

+#endif

+

+  /**

+   * @brief Macros required for SINE and COSINE Fast math approximations

+   */

+

+#define FAST_MATH_TABLE_SIZE  512

+#define FAST_MATH_Q31_SHIFT   (32 - 10)

+#define FAST_MATH_Q15_SHIFT   (16 - 10)

+#define CONTROLLER_Q31_SHIFT  (32 - 9)

+#define TABLE_SIZE  256

+#define TABLE_SPACING_Q31	   0x400000

+#define TABLE_SPACING_Q15	   0x80

+

+  /**

+   * @brief Macros required for SINE and COSINE Controller functions

+   */

+  /* 1.31(q31) Fixed value of 2/360 */

+  /* -1 to +1 is divided into 360 values so total spacing is (2/360) */

+#define INPUT_SPACING			0xB60B61

+

+  /**

+   * @brief Macro for Unaligned Support

+   */

+#ifndef UNALIGNED_SUPPORT_DISABLE

+    #define ALIGN4

+#else

+  #if defined  (__GNUC__)

+    #define ALIGN4 __attribute__((aligned(4)))

+  #else

+    #define ALIGN4 __align(4)

+  #endif

+#endif	/*	#ifndef UNALIGNED_SUPPORT_DISABLE	*/

+

+  /**

+   * @brief Error status returned by some functions in the library.

+   */

+

+  typedef enum

+  {

+    ARM_MATH_SUCCESS = 0,                /**< No error */

+    ARM_MATH_ARGUMENT_ERROR = -1,        /**< One or more arguments are incorrect */

+    ARM_MATH_LENGTH_ERROR = -2,          /**< Length of data buffer is incorrect */

+    ARM_MATH_SIZE_MISMATCH = -3,         /**< Size of matrices is not compatible with the operation. */

+    ARM_MATH_NANINF = -4,                /**< Not-a-number (NaN) or infinity is generated */

+    ARM_MATH_SINGULAR = -5,              /**< Generated by matrix inversion if the input matrix is singular and cannot be inverted. */

+    ARM_MATH_TEST_FAILURE = -6           /**< Test Failed  */

+  } arm_status;

+

+  /**

+   * @brief 8-bit fractional data type in 1.7 format.

+   */

+  typedef int8_t q7_t;

+

+  /**

+   * @brief 16-bit fractional data type in 1.15 format.

+   */

+  typedef int16_t q15_t;

+

+  /**

+   * @brief 32-bit fractional data type in 1.31 format.

+   */

+  typedef int32_t q31_t;

+

+  /**

+   * @brief 64-bit fractional data type in 1.63 format.

+   */

+  typedef int64_t q63_t;

+

+  /**

+   * @brief 32-bit floating-point type definition.

+   */

+  typedef float float32_t;

+

+  /**

+   * @brief 64-bit floating-point type definition.

+   */

+  typedef double float64_t;

+

+  /**

+   * @brief definition to read/write two 16 bit values.

+   */

+#if defined __CC_ARM

+  #define __SIMD32_TYPE int32_t __packed

+  #define CMSIS_UNUSED __attribute__((unused))

+#elif defined __ICCARM__

+  #define __SIMD32_TYPE int32_t __packed

+  #define CMSIS_UNUSED

+#elif defined __GNUC__

+  #define __SIMD32_TYPE int32_t

+  #define CMSIS_UNUSED __attribute__((unused))

+#elif defined __CSMC__			/* Cosmic */

+  #define __SIMD32_TYPE int32_t

+  #define CMSIS_UNUSED

+#elif defined __TASKING__

+  #define __SIMD32_TYPE __unaligned int32_t

+  #define CMSIS_UNUSED

+#else

+  #error Unknown compiler

+#endif

+

+#define __SIMD32(addr)  (*(__SIMD32_TYPE **) & (addr))

+#define __SIMD32_CONST(addr)  ((__SIMD32_TYPE *)(addr))

+

+#define _SIMD32_OFFSET(addr)  (*(__SIMD32_TYPE *)  (addr))

+

+#define __SIMD64(addr)  (*(int64_t **) & (addr))

+

+#if defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY)

+  /**

+   * @brief definition to pack two 16 bit values.

+   */

+#define __PKHBT(ARG1, ARG2, ARG3)      ( (((int32_t)(ARG1) <<  0) & (int32_t)0x0000FFFF) | \

+                                         (((int32_t)(ARG2) << ARG3) & (int32_t)0xFFFF0000)  )

+#define __PKHTB(ARG1, ARG2, ARG3)      ( (((int32_t)(ARG1) <<  0) & (int32_t)0xFFFF0000) | \

+                                         (((int32_t)(ARG2) >> ARG3) & (int32_t)0x0000FFFF)  )

+

+#endif

+

+

+   /**

+   * @brief definition to pack four 8 bit values.

+   */

+#ifndef ARM_MATH_BIG_ENDIAN

+

+#define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v0) <<  0) & (int32_t)0x000000FF) |	\

+                                (((int32_t)(v1) <<  8) & (int32_t)0x0000FF00) |	\

+							    (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) |	\

+							    (((int32_t)(v3) << 24) & (int32_t)0xFF000000)  )

+#else

+

+#define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v3) <<  0) & (int32_t)0x000000FF) |	\

+                                (((int32_t)(v2) <<  8) & (int32_t)0x0000FF00) |	\

+							    (((int32_t)(v1) << 16) & (int32_t)0x00FF0000) |	\

+							    (((int32_t)(v0) << 24) & (int32_t)0xFF000000)  )

+

+#endif

+

+

+  /**

+   * @brief Clips Q63 to Q31 values.

+   */

+  static __INLINE q31_t clip_q63_to_q31(

+  q63_t x)

+  {

+    return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ?

+      ((0x7FFFFFFF ^ ((q31_t) (x >> 63)))) : (q31_t) x;

+  }

+

+  /**

+   * @brief Clips Q63 to Q15 values.

+   */

+  static __INLINE q15_t clip_q63_to_q15(

+  q63_t x)

+  {

+    return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ?

+      ((0x7FFF ^ ((q15_t) (x >> 63)))) : (q15_t) (x >> 15);

+  }

+

+  /**

+   * @brief Clips Q31 to Q7 values.

+   */

+  static __INLINE q7_t clip_q31_to_q7(

+  q31_t x)

+  {

+    return ((q31_t) (x >> 24) != ((q31_t) x >> 23)) ?

+      ((0x7F ^ ((q7_t) (x >> 31)))) : (q7_t) x;

+  }

+

+  /**

+   * @brief Clips Q31 to Q15 values.

+   */

+  static __INLINE q15_t clip_q31_to_q15(

+  q31_t x)

+  {

+    return ((q31_t) (x >> 16) != ((q31_t) x >> 15)) ?

+      ((0x7FFF ^ ((q15_t) (x >> 31)))) : (q15_t) x;

+  }

+

+  /**

+   * @brief Multiplies 32 X 64 and returns 32 bit result in 2.30 format.

+   */

+

+  static __INLINE q63_t mult32x64(

+  q63_t x,

+  q31_t y)

+  {

+    return ((((q63_t) (x & 0x00000000FFFFFFFF) * y) >> 32) +

+            (((q63_t) (x >> 32) * y)));

+  }

+

+

+//#if defined (ARM_MATH_CM0_FAMILY) && defined ( __CC_ARM   )

+//#define __CLZ __clz

+//#endif

+

+//note: function can be removed when all toolchain support __CLZ for Cortex-M0

+#if defined (ARM_MATH_CM0_FAMILY) && ((defined (__ICCARM__))  )

+

+  static __INLINE uint32_t __CLZ(

+  q31_t data);

+

+

+  static __INLINE uint32_t __CLZ(

+  q31_t data)

+  {

+    uint32_t count = 0;

+    uint32_t mask = 0x80000000;

+

+    while((data & mask) == 0)

+    {

+      count += 1u;

+      mask = mask >> 1u;

+    }

+

+    return (count);

+

+  }

+

+#endif

+

+  /**

+   * @brief Function to Calculates 1/in (reciprocal) value of Q31 Data type.

+   */

+

+  static __INLINE uint32_t arm_recip_q31(

+  q31_t in,

+  q31_t * dst,

+  q31_t * pRecipTable)

+  {

+

+    uint32_t out, tempVal;

+    uint32_t index, i;

+    uint32_t signBits;

+

+    if(in > 0)

+    {

+      signBits = __CLZ(in) - 1;

+    }

+    else

+    {

+      signBits = __CLZ(-in) - 1;

+    }

+

+    /* Convert input sample to 1.31 format */

+    in = in << signBits;

+

+    /* calculation of index for initial approximated Val */

+    index = (uint32_t) (in >> 24u);

+    index = (index & INDEX_MASK);

+

+    /* 1.31 with exp 1 */

+    out = pRecipTable[index];

+

+    /* calculation of reciprocal value */

+    /* running approximation for two iterations */

+    for (i = 0u; i < 2u; i++)

+    {

+      tempVal = (q31_t) (((q63_t) in * out) >> 31u);

+      tempVal = 0x7FFFFFFF - tempVal;

+      /*      1.31 with exp 1 */

+      //out = (q31_t) (((q63_t) out * tempVal) >> 30u);

+      out = (q31_t) clip_q63_to_q31(((q63_t) out * tempVal) >> 30u);

+    }

+

+    /* write output */

+    *dst = out;

+

+    /* return num of signbits of out = 1/in value */

+    return (signBits + 1u);

+

+  }

+

+  /**

+   * @brief Function to Calculates 1/in (reciprocal) value of Q15 Data type.

+   */

+  static __INLINE uint32_t arm_recip_q15(

+  q15_t in,

+  q15_t * dst,

+  q15_t * pRecipTable)

+  {

+

+    uint32_t out = 0, tempVal = 0;

+    uint32_t index = 0, i = 0;

+    uint32_t signBits = 0;

+

+    if(in > 0)

+    {

+      signBits = __CLZ(in) - 17;

+    }

+    else

+    {

+      signBits = __CLZ(-in) - 17;

+    }

+

+    /* Convert input sample to 1.15 format */

+    in = in << signBits;

+

+    /* calculation of index for initial approximated Val */

+    index = in >> 8;

+    index = (index & INDEX_MASK);

+

+    /*      1.15 with exp 1  */

+    out = pRecipTable[index];

+

+    /* calculation of reciprocal value */

+    /* running approximation for two iterations */

+    for (i = 0; i < 2; i++)

+    {

+      tempVal = (q15_t) (((q31_t) in * out) >> 15);

+      tempVal = 0x7FFF - tempVal;

+      /*      1.15 with exp 1 */

+      out = (q15_t) (((q31_t) out * tempVal) >> 14);

+    }

+

+    /* write output */

+    *dst = out;

+

+    /* return num of signbits of out = 1/in value */

+    return (signBits + 1);

+

+  }

+

+

+  /*

+   * @brief C custom defined intrinisic function for only M0 processors

+   */

+#if defined(ARM_MATH_CM0_FAMILY)

+

+  static __INLINE q31_t __SSAT(

+  q31_t x,

+  uint32_t y)

+  {

+    int32_t posMax, negMin;

+    uint32_t i;

+

+    posMax = 1;

+    for (i = 0; i < (y - 1); i++)

+    {

+      posMax = posMax * 2;

+    }

+

+    if(x > 0)

+    {

+      posMax = (posMax - 1);

+

+      if(x > posMax)

+      {

+        x = posMax;

+      }

+    }

+    else

+    {

+      negMin = -posMax;

+

+      if(x < negMin)

+      {

+        x = negMin;

+      }

+    }

+    return (x);

+

+

+  }

+

+#endif /* end of ARM_MATH_CM0_FAMILY */

+

+

+

+  /*

+   * @brief C custom defined intrinsic function for M3 and M0 processors

+   */

+#if defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY)

+

+  /*

+   * @brief C custom defined QADD8 for M3 and M0 processors

+   */

+  static __INLINE q31_t __QADD8(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q7_t r, s, t, u;

+

+    r = (q7_t) x;

+    s = (q7_t) y;

+

+    r = __SSAT((q31_t) (r + s), 8);

+    s = __SSAT(((q31_t) (((x << 16) >> 24) + ((y << 16) >> 24))), 8);

+    t = __SSAT(((q31_t) (((x << 8) >> 24) + ((y << 8) >> 24))), 8);

+    u = __SSAT(((q31_t) ((x >> 24) + (y >> 24))), 8);

+

+    sum =

+      (((q31_t) u << 24) & 0xFF000000) | (((q31_t) t << 16) & 0x00FF0000) |

+      (((q31_t) s << 8) & 0x0000FF00) | (r & 0x000000FF);

+

+    return sum;

+

+  }

+

+  /*

+   * @brief C custom defined QSUB8 for M3 and M0 processors

+   */

+  static __INLINE q31_t __QSUB8(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s, t, u;

+

+    r = (q7_t) x;

+    s = (q7_t) y;

+

+    r = __SSAT((r - s), 8);

+    s = __SSAT(((q31_t) (((x << 16) >> 24) - ((y << 16) >> 24))), 8) << 8;

+    t = __SSAT(((q31_t) (((x << 8) >> 24) - ((y << 8) >> 24))), 8) << 16;

+    u = __SSAT(((q31_t) ((x >> 24) - (y >> 24))), 8) << 24;

+

+    sum =

+      (u & 0xFF000000) | (t & 0x00FF0000) | (s & 0x0000FF00) | (r &

+                                                                0x000000FF);

+

+    return sum;

+  }

+

+  /*

+   * @brief C custom defined QADD16 for M3 and M0 processors

+   */

+

+  /*

+   * @brief C custom defined QADD16 for M3 and M0 processors

+   */

+  static __INLINE q31_t __QADD16(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = __SSAT(r + s, 16);

+    s = __SSAT(((q31_t) ((x >> 16) + (y >> 16))), 16) << 16;

+

+    sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return sum;

+

+  }

+

+  /*

+   * @brief C custom defined SHADD16 for M3 and M0 processors

+   */

+  static __INLINE q31_t __SHADD16(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = ((r >> 1) + (s >> 1));

+    s = ((q31_t) ((x >> 17) + (y >> 17))) << 16;

+

+    sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return sum;

+

+  }

+

+  /*

+   * @brief C custom defined QSUB16 for M3 and M0 processors

+   */

+  static __INLINE q31_t __QSUB16(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = __SSAT(r - s, 16);

+    s = __SSAT(((q31_t) ((x >> 16) - (y >> 16))), 16) << 16;

+

+    sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return sum;

+  }

+

+  /*

+   * @brief C custom defined SHSUB16 for M3 and M0 processors

+   */

+  static __INLINE q31_t __SHSUB16(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t diff;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = ((r >> 1) - (s >> 1));

+    s = (((x >> 17) - (y >> 17)) << 16);

+

+    diff = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return diff;

+  }

+

+  /*

+   * @brief C custom defined QASX for M3 and M0 processors

+   */

+  static __INLINE q31_t __QASX(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum = 0;

+

+    sum =

+      ((sum +

+        clip_q31_to_q15((q31_t) ((q15_t) (x >> 16) + (q15_t) y))) << 16) +

+      clip_q31_to_q15((q31_t) ((q15_t) x - (q15_t) (y >> 16)));

+

+    return sum;

+  }

+

+  /*

+   * @brief C custom defined SHASX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SHASX(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = ((r >> 1) - (y >> 17));

+    s = (((x >> 17) + (s >> 1)) << 16);

+

+    sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return sum;

+  }

+

+

+  /*

+   * @brief C custom defined QSAX for M3 and M0 processors

+   */

+  static __INLINE q31_t __QSAX(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum = 0;

+

+    sum =

+      ((sum +

+        clip_q31_to_q15((q31_t) ((q15_t) (x >> 16) - (q15_t) y))) << 16) +

+      clip_q31_to_q15((q31_t) ((q15_t) x + (q15_t) (y >> 16)));

+

+    return sum;

+  }

+

+  /*

+   * @brief C custom defined SHSAX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SHSAX(

+  q31_t x,

+  q31_t y)

+  {

+

+    q31_t sum;

+    q31_t r, s;

+

+    r = (q15_t) x;

+    s = (q15_t) y;

+

+    r = ((r >> 1) + (y >> 17));

+    s = (((x >> 17) - (s >> 1)) << 16);

+

+    sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);

+

+    return sum;

+  }

+

+  /*

+   * @brief C custom defined SMUSDX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMUSDX(

+  q31_t x,

+  q31_t y)

+  {

+

+    return ((q31_t) (((q15_t) x * (q15_t) (y >> 16)) -

+                     ((q15_t) (x >> 16) * (q15_t) y)));

+  }

+

+  /*

+   * @brief C custom defined SMUADX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMUADX(

+  q31_t x,

+  q31_t y)

+  {

+

+    return ((q31_t) (((q15_t) x * (q15_t) (y >> 16)) +

+                     ((q15_t) (x >> 16) * (q15_t) y)));

+  }

+

+  /*

+   * @brief C custom defined QADD for M3 and M0 processors

+   */

+  static __INLINE q31_t __QADD(

+  q31_t x,

+  q31_t y)

+  {

+    return clip_q63_to_q31((q63_t) x + y);

+  }

+

+  /*

+   * @brief C custom defined QSUB for M3 and M0 processors

+   */

+  static __INLINE q31_t __QSUB(

+  q31_t x,

+  q31_t y)

+  {

+    return clip_q63_to_q31((q63_t) x - y);

+  }

+

+  /*

+   * @brief C custom defined SMLAD for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMLAD(

+  q31_t x,

+  q31_t y,

+  q31_t sum)

+  {

+

+    return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) +

+            ((q15_t) x * (q15_t) y));

+  }

+

+  /*

+   * @brief C custom defined SMLADX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMLADX(

+  q31_t x,

+  q31_t y,

+  q31_t sum)

+  {

+

+    return (sum + ((q15_t) (x >> 16) * (q15_t) (y)) +

+            ((q15_t) x * (q15_t) (y >> 16)));

+  }

+

+  /*

+   * @brief C custom defined SMLSDX for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMLSDX(

+  q31_t x,

+  q31_t y,

+  q31_t sum)

+  {

+

+    return (sum - ((q15_t) (x >> 16) * (q15_t) (y)) +

+            ((q15_t) x * (q15_t) (y >> 16)));

+  }

+

+  /*

+   * @brief C custom defined SMLALD for M3 and M0 processors

+   */

+  static __INLINE q63_t __SMLALD(

+  q31_t x,

+  q31_t y,

+  q63_t sum)

+  {

+

+    return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) +

+            ((q15_t) x * (q15_t) y));

+  }

+

+  /*

+   * @brief C custom defined SMLALDX for M3 and M0 processors

+   */

+  static __INLINE q63_t __SMLALDX(

+  q31_t x,

+  q31_t y,

+  q63_t sum)

+  {

+

+    return (sum + ((q15_t) (x >> 16) * (q15_t) y)) +

+      ((q15_t) x * (q15_t) (y >> 16));

+  }

+

+  /*

+   * @brief C custom defined SMUAD for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMUAD(

+  q31_t x,

+  q31_t y)

+  {

+

+    return (((x >> 16) * (y >> 16)) +

+            (((x << 16) >> 16) * ((y << 16) >> 16)));

+  }

+

+  /*

+   * @brief C custom defined SMUSD for M3 and M0 processors

+   */

+  static __INLINE q31_t __SMUSD(

+  q31_t x,

+  q31_t y)

+  {

+

+    return (-((x >> 16) * (y >> 16)) +

+            (((x << 16) >> 16) * ((y << 16) >> 16)));

+  }

+

+

+  /*

+   * @brief C custom defined SXTB16 for M3 and M0 processors

+   */

+  static __INLINE q31_t __SXTB16(

+  q31_t x)

+  {

+

+    return ((((x << 24) >> 24) & 0x0000FFFF) |

+            (((x << 8) >> 8) & 0xFFFF0000));

+  }

+

+

+#endif /* defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY) */

+

+

+  /**

+   * @brief Instance structure for the Q7 FIR filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;        /**< number of filter coefficients in the filter. */

+    q7_t *pState;            /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q7_t *pCoeffs;           /**< points to the coefficient array. The array is of length numTaps.*/

+  } arm_fir_instance_q7;

+

+  /**

+   * @brief Instance structure for the Q15 FIR filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;         /**< number of filter coefficients in the filter. */

+    q15_t *pState;            /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q15_t *pCoeffs;           /**< points to the coefficient array. The array is of length numTaps.*/

+  } arm_fir_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 FIR filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;         /**< number of filter coefficients in the filter. */

+    q31_t *pState;            /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q31_t *pCoeffs;           /**< points to the coefficient array. The array is of length numTaps. */

+  } arm_fir_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point FIR filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;     /**< number of filter coefficients in the filter. */

+    float32_t *pState;    /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    float32_t *pCoeffs;   /**< points to the coefficient array. The array is of length numTaps. */

+  } arm_fir_instance_f32;

+

+

+  /**

+   * @brief Processing function for the Q7 FIR filter.

+   * @param[in] *S points to an instance of the Q7 FIR filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_q7(

+  const arm_fir_instance_q7 * S,

+  q7_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the Q7 FIR filter.

+   * @param[in,out] *S points to an instance of the Q7 FIR structure.

+   * @param[in] numTaps  Number of filter coefficients in the filter.

+   * @param[in] *pCoeffs points to the filter coefficients.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] blockSize number of samples that are processed.

+   * @return none

+   */

+  void arm_fir_init_q7(

+  arm_fir_instance_q7 * S,

+  uint16_t numTaps,

+  q7_t * pCoeffs,

+  q7_t * pState,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the Q15 FIR filter.

+   * @param[in] *S points to an instance of the Q15 FIR structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_q15(

+  const arm_fir_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the fast Q15 FIR filter for Cortex-M3 and Cortex-M4.

+   * @param[in] *S points to an instance of the Q15 FIR filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_fast_q15(

+  const arm_fir_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q15 FIR filter.

+   * @param[in,out] *S points to an instance of the Q15 FIR filter structure.

+   * @param[in] numTaps  Number of filter coefficients in the filter. Must be even and greater than or equal to 4.

+   * @param[in] *pCoeffs points to the filter coefficients.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] blockSize number of samples that are processed at a time.

+   * @return The function returns ARM_MATH_SUCCESS if initialization was successful or ARM_MATH_ARGUMENT_ERROR if

+   * <code>numTaps</code> is not a supported value.

+   */

+

+  arm_status arm_fir_init_q15(

+  arm_fir_instance_q15 * S,

+  uint16_t numTaps,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q31 FIR filter.

+   * @param[in] *S points to an instance of the Q31 FIR filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_q31(

+  const arm_fir_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the fast Q31 FIR filter for Cortex-M3 and Cortex-M4.

+   * @param[in] *S points to an instance of the Q31 FIR structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_fast_q31(

+  const arm_fir_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q31 FIR filter.

+   * @param[in,out] *S points to an instance of the Q31 FIR structure.

+   * @param[in] 	numTaps  Number of filter coefficients in the filter.

+   * @param[in] 	*pCoeffs points to the filter coefficients.

+   * @param[in] 	*pState points to the state buffer.

+   * @param[in] 	blockSize number of samples that are processed at a time.

+   * @return 		none.

+   */

+  void arm_fir_init_q31(

+  arm_fir_instance_q31 * S,

+  uint16_t numTaps,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the floating-point FIR filter.

+   * @param[in] *S points to an instance of the floating-point FIR structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_f32(

+  const arm_fir_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the floating-point FIR filter.

+   * @param[in,out] *S points to an instance of the floating-point FIR filter structure.

+   * @param[in] 	numTaps  Number of filter coefficients in the filter.

+   * @param[in] 	*pCoeffs points to the filter coefficients.

+   * @param[in] 	*pState points to the state buffer.

+   * @param[in] 	blockSize number of samples that are processed at a time.

+   * @return    	none.

+   */

+  void arm_fir_init_f32(

+  arm_fir_instance_f32 * S,

+  uint16_t numTaps,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Instance structure for the Q15 Biquad cascade filter.

+   */

+  typedef struct

+  {

+    int8_t numStages;         /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    q15_t *pState;            /**< Points to the array of state coefficients.  The array is of length 4*numStages. */

+    q15_t *pCoeffs;           /**< Points to the array of coefficients.  The array is of length 5*numStages. */

+    int8_t postShift;         /**< Additional shift, in bits, applied to each output sample. */

+

+  } arm_biquad_casd_df1_inst_q15;

+

+

+  /**

+   * @brief Instance structure for the Q31 Biquad cascade filter.

+   */

+  typedef struct

+  {

+    uint32_t numStages;      /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    q31_t *pState;           /**< Points to the array of state coefficients.  The array is of length 4*numStages. */

+    q31_t *pCoeffs;          /**< Points to the array of coefficients.  The array is of length 5*numStages. */

+    uint8_t postShift;       /**< Additional shift, in bits, applied to each output sample. */

+

+  } arm_biquad_casd_df1_inst_q31;

+

+  /**

+   * @brief Instance structure for the floating-point Biquad cascade filter.

+   */

+  typedef struct

+  {

+    uint32_t numStages;         /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    float32_t *pState;          /**< Points to the array of state coefficients.  The array is of length 4*numStages. */

+    float32_t *pCoeffs;         /**< Points to the array of coefficients.  The array is of length 5*numStages. */

+

+

+  } arm_biquad_casd_df1_inst_f32;

+

+

+

+  /**

+   * @brief Processing function for the Q15 Biquad cascade filter.

+   * @param[in]  *S points to an instance of the Q15 Biquad cascade structure.

+   * @param[in]  *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in]  blockSize number of samples to process.

+   * @return     none.

+   */

+

+  void arm_biquad_cascade_df1_q15(

+  const arm_biquad_casd_df1_inst_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q15 Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the Q15 Biquad cascade structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @param[in]     postShift    Shift to be applied to the output. Varies according to the coefficients format

+   * @return        none

+   */

+

+  void arm_biquad_cascade_df1_init_q15(

+  arm_biquad_casd_df1_inst_q15 * S,

+  uint8_t numStages,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  int8_t postShift);

+

+

+  /**

+   * @brief Fast but less precise processing function for the Q15 Biquad cascade filter for Cortex-M3 and Cortex-M4.

+   * @param[in]  *S points to an instance of the Q15 Biquad cascade structure.

+   * @param[in]  *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in]  blockSize number of samples to process.

+   * @return     none.

+   */

+

+  void arm_biquad_cascade_df1_fast_q15(

+  const arm_biquad_casd_df1_inst_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the Q31 Biquad cascade filter

+   * @param[in]  *S         points to an instance of the Q31 Biquad cascade structure.

+   * @param[in]  *pSrc      points to the block of input data.

+   * @param[out] *pDst      points to the block of output data.

+   * @param[in]  blockSize  number of samples to process.

+   * @return     none.

+   */

+

+  void arm_biquad_cascade_df1_q31(

+  const arm_biquad_casd_df1_inst_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Fast but less precise processing function for the Q31 Biquad cascade filter for Cortex-M3 and Cortex-M4.

+   * @param[in]  *S         points to an instance of the Q31 Biquad cascade structure.

+   * @param[in]  *pSrc      points to the block of input data.

+   * @param[out] *pDst      points to the block of output data.

+   * @param[in]  blockSize  number of samples to process.

+   * @return     none.

+   */

+

+  void arm_biquad_cascade_df1_fast_q31(

+  const arm_biquad_casd_df1_inst_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q31 Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the Q31 Biquad cascade structure.

+   * @param[in]     numStages      number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @param[in]     postShift    Shift to be applied to the output. Varies according to the coefficients format

+   * @return        none

+   */

+

+  void arm_biquad_cascade_df1_init_q31(

+  arm_biquad_casd_df1_inst_q31 * S,

+  uint8_t numStages,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  int8_t postShift);

+

+  /**

+   * @brief Processing function for the floating-point Biquad cascade filter.

+   * @param[in]  *S         points to an instance of the floating-point Biquad cascade structure.

+   * @param[in]  *pSrc      points to the block of input data.

+   * @param[out] *pDst      points to the block of output data.

+   * @param[in]  blockSize  number of samples to process.

+   * @return     none.

+   */

+

+  void arm_biquad_cascade_df1_f32(

+  const arm_biquad_casd_df1_inst_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the floating-point Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the floating-point Biquad cascade structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @return        none

+   */

+

+  void arm_biquad_cascade_df1_init_f32(

+  arm_biquad_casd_df1_inst_f32 * S,

+  uint8_t numStages,

+  float32_t * pCoeffs,

+  float32_t * pState);

+

+

+  /**

+   * @brief Instance structure for the floating-point matrix structure.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;     /**< number of rows of the matrix.     */

+    uint16_t numCols;     /**< number of columns of the matrix.  */

+    float32_t *pData;     /**< points to the data of the matrix. */

+  } arm_matrix_instance_f32;

+

+

+  /**

+   * @brief Instance structure for the floating-point matrix structure.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;     /**< number of rows of the matrix.     */

+    uint16_t numCols;     /**< number of columns of the matrix.  */

+    float64_t *pData;     /**< points to the data of the matrix. */

+  } arm_matrix_instance_f64;

+

+  /**

+   * @brief Instance structure for the Q15 matrix structure.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;     /**< number of rows of the matrix.     */

+    uint16_t numCols;     /**< number of columns of the matrix.  */

+    q15_t *pData;         /**< points to the data of the matrix. */

+

+  } arm_matrix_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 matrix structure.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;     /**< number of rows of the matrix.     */

+    uint16_t numCols;     /**< number of columns of the matrix.  */

+    q31_t *pData;         /**< points to the data of the matrix. */

+

+  } arm_matrix_instance_q31;

+

+

+

+  /**

+   * @brief Floating-point matrix addition.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_add_f32(

+  const arm_matrix_instance_f32 * pSrcA,

+  const arm_matrix_instance_f32 * pSrcB,

+  arm_matrix_instance_f32 * pDst);

+

+  /**

+   * @brief Q15 matrix addition.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_add_q15(

+  const arm_matrix_instance_q15 * pSrcA,

+  const arm_matrix_instance_q15 * pSrcB,

+  arm_matrix_instance_q15 * pDst);

+

+  /**

+   * @brief Q31 matrix addition.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_add_q31(

+  const arm_matrix_instance_q31 * pSrcA,

+  const arm_matrix_instance_q31 * pSrcB,

+  arm_matrix_instance_q31 * pDst);

+

+  /**

+   * @brief Floating-point, complex, matrix multiplication.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_cmplx_mult_f32(

+  const arm_matrix_instance_f32 * pSrcA,

+  const arm_matrix_instance_f32 * pSrcB,

+  arm_matrix_instance_f32 * pDst);

+

+  /**

+   * @brief Q15, complex,  matrix multiplication.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_cmplx_mult_q15(

+  const arm_matrix_instance_q15 * pSrcA,

+  const arm_matrix_instance_q15 * pSrcB,

+  arm_matrix_instance_q15 * pDst,

+  q15_t * pScratch);

+

+  /**

+   * @brief Q31, complex, matrix multiplication.

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_cmplx_mult_q31(

+  const arm_matrix_instance_q31 * pSrcA,

+  const arm_matrix_instance_q31 * pSrcB,

+  arm_matrix_instance_q31 * pDst);

+

+

+  /**

+   * @brief Floating-point matrix transpose.

+   * @param[in]  *pSrc points to the input matrix

+   * @param[out] *pDst points to the output matrix

+   * @return 	The function returns either  <code>ARM_MATH_SIZE_MISMATCH</code>

+   * or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_trans_f32(

+  const arm_matrix_instance_f32 * pSrc,

+  arm_matrix_instance_f32 * pDst);

+

+

+  /**

+   * @brief Q15 matrix transpose.

+   * @param[in]  *pSrc points to the input matrix

+   * @param[out] *pDst points to the output matrix

+   * @return 	The function returns either  <code>ARM_MATH_SIZE_MISMATCH</code>

+   * or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_trans_q15(

+  const arm_matrix_instance_q15 * pSrc,

+  arm_matrix_instance_q15 * pDst);

+

+  /**

+   * @brief Q31 matrix transpose.

+   * @param[in]  *pSrc points to the input matrix

+   * @param[out] *pDst points to the output matrix

+   * @return 	The function returns either  <code>ARM_MATH_SIZE_MISMATCH</code>

+   * or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_trans_q31(

+  const arm_matrix_instance_q31 * pSrc,

+  arm_matrix_instance_q31 * pDst);

+

+

+  /**

+   * @brief Floating-point matrix multiplication

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_mult_f32(

+  const arm_matrix_instance_f32 * pSrcA,

+  const arm_matrix_instance_f32 * pSrcB,

+  arm_matrix_instance_f32 * pDst);

+

+  /**

+   * @brief Q15 matrix multiplication

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @param[in]		 *pState points to the array for storing intermediate results

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_mult_q15(

+  const arm_matrix_instance_q15 * pSrcA,

+  const arm_matrix_instance_q15 * pSrcB,

+  arm_matrix_instance_q15 * pDst,

+  q15_t * pState);

+

+  /**

+   * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4

+   * @param[in]       *pSrcA  points to the first input matrix structure

+   * @param[in]       *pSrcB  points to the second input matrix structure

+   * @param[out]      *pDst   points to output matrix structure

+   * @param[in]		  *pState points to the array for storing intermediate results

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_mult_fast_q15(

+  const arm_matrix_instance_q15 * pSrcA,

+  const arm_matrix_instance_q15 * pSrcB,

+  arm_matrix_instance_q15 * pDst,

+  q15_t * pState);

+

+  /**

+   * @brief Q31 matrix multiplication

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_mult_q31(

+  const arm_matrix_instance_q31 * pSrcA,

+  const arm_matrix_instance_q31 * pSrcB,

+  arm_matrix_instance_q31 * pDst);

+

+  /**

+   * @brief Q31 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_mult_fast_q31(

+  const arm_matrix_instance_q31 * pSrcA,

+  const arm_matrix_instance_q31 * pSrcB,

+  arm_matrix_instance_q31 * pDst);

+

+

+  /**

+   * @brief Floating-point matrix subtraction

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_sub_f32(

+  const arm_matrix_instance_f32 * pSrcA,

+  const arm_matrix_instance_f32 * pSrcB,

+  arm_matrix_instance_f32 * pDst);

+

+  /**

+   * @brief Q15 matrix subtraction

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_sub_q15(

+  const arm_matrix_instance_q15 * pSrcA,

+  const arm_matrix_instance_q15 * pSrcB,

+  arm_matrix_instance_q15 * pDst);

+

+  /**

+   * @brief Q31 matrix subtraction

+   * @param[in]       *pSrcA points to the first input matrix structure

+   * @param[in]       *pSrcB points to the second input matrix structure

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_sub_q31(

+  const arm_matrix_instance_q31 * pSrcA,

+  const arm_matrix_instance_q31 * pSrcB,

+  arm_matrix_instance_q31 * pDst);

+

+  /**

+   * @brief Floating-point matrix scaling.

+   * @param[in]  *pSrc points to the input matrix

+   * @param[in]  scale scale factor

+   * @param[out] *pDst points to the output matrix

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_scale_f32(

+  const arm_matrix_instance_f32 * pSrc,

+  float32_t scale,

+  arm_matrix_instance_f32 * pDst);

+

+  /**

+   * @brief Q15 matrix scaling.

+   * @param[in]       *pSrc points to input matrix

+   * @param[in]       scaleFract fractional portion of the scale factor

+   * @param[in]       shift number of bits to shift the result by

+   * @param[out]      *pDst points to output matrix

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_scale_q15(

+  const arm_matrix_instance_q15 * pSrc,

+  q15_t scaleFract,

+  int32_t shift,

+  arm_matrix_instance_q15 * pDst);

+

+  /**

+   * @brief Q31 matrix scaling.

+   * @param[in]       *pSrc points to input matrix

+   * @param[in]       scaleFract fractional portion of the scale factor

+   * @param[in]       shift number of bits to shift the result by

+   * @param[out]      *pDst points to output matrix structure

+   * @return     The function returns either

+   * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.

+   */

+

+  arm_status arm_mat_scale_q31(

+  const arm_matrix_instance_q31 * pSrc,

+  q31_t scaleFract,

+  int32_t shift,

+  arm_matrix_instance_q31 * pDst);

+

+

+  /**

+   * @brief  Q31 matrix initialization.

+   * @param[in,out] *S             points to an instance of the floating-point matrix structure.

+   * @param[in]     nRows          number of rows in the matrix.

+   * @param[in]     nColumns       number of columns in the matrix.

+   * @param[in]     *pData	       points to the matrix data array.

+   * @return        none

+   */

+

+  void arm_mat_init_q31(

+  arm_matrix_instance_q31 * S,

+  uint16_t nRows,

+  uint16_t nColumns,

+  q31_t * pData);

+

+  /**

+   * @brief  Q15 matrix initialization.

+   * @param[in,out] *S             points to an instance of the floating-point matrix structure.

+   * @param[in]     nRows          number of rows in the matrix.

+   * @param[in]     nColumns       number of columns in the matrix.

+   * @param[in]     *pData	       points to the matrix data array.

+   * @return        none

+   */

+

+  void arm_mat_init_q15(

+  arm_matrix_instance_q15 * S,

+  uint16_t nRows,

+  uint16_t nColumns,

+  q15_t * pData);

+

+  /**

+   * @brief  Floating-point matrix initialization.

+   * @param[in,out] *S             points to an instance of the floating-point matrix structure.

+   * @param[in]     nRows          number of rows in the matrix.

+   * @param[in]     nColumns       number of columns in the matrix.

+   * @param[in]     *pData	       points to the matrix data array.

+   * @return        none

+   */

+

+  void arm_mat_init_f32(

+  arm_matrix_instance_f32 * S,

+  uint16_t nRows,

+  uint16_t nColumns,

+  float32_t * pData);

+

+

+

+  /**

+   * @brief Instance structure for the Q15 PID Control.

+   */

+  typedef struct

+  {

+    q15_t A0;    /**< The derived gain, A0 = Kp + Ki + Kd . */

+#ifdef ARM_MATH_CM0_FAMILY

+    q15_t A1;

+    q15_t A2;

+#else

+    q31_t A1;           /**< The derived gain A1 = -Kp - 2Kd | Kd.*/

+#endif

+    q15_t state[3];       /**< The state array of length 3. */

+    q15_t Kp;           /**< The proportional gain. */

+    q15_t Ki;           /**< The integral gain. */

+    q15_t Kd;           /**< The derivative gain. */

+  } arm_pid_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 PID Control.

+   */

+  typedef struct

+  {

+    q31_t A0;            /**< The derived gain, A0 = Kp + Ki + Kd . */

+    q31_t A1;            /**< The derived gain, A1 = -Kp - 2Kd. */

+    q31_t A2;            /**< The derived gain, A2 = Kd . */

+    q31_t state[3];      /**< The state array of length 3. */

+    q31_t Kp;            /**< The proportional gain. */

+    q31_t Ki;            /**< The integral gain. */

+    q31_t Kd;            /**< The derivative gain. */

+

+  } arm_pid_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point PID Control.

+   */

+  typedef struct

+  {

+    float32_t A0;          /**< The derived gain, A0 = Kp + Ki + Kd . */

+    float32_t A1;          /**< The derived gain, A1 = -Kp - 2Kd. */

+    float32_t A2;          /**< The derived gain, A2 = Kd . */

+    float32_t state[3];    /**< The state array of length 3. */

+    float32_t Kp;               /**< The proportional gain. */

+    float32_t Ki;               /**< The integral gain. */

+    float32_t Kd;               /**< The derivative gain. */

+  } arm_pid_instance_f32;

+

+

+

+  /**

+   * @brief  Initialization function for the floating-point PID Control.

+   * @param[in,out] *S      points to an instance of the PID structure.

+   * @param[in]     resetStateFlag  flag to reset the state. 0 = no change in state 1 = reset the state.

+   * @return none.

+   */

+  void arm_pid_init_f32(

+  arm_pid_instance_f32 * S,

+  int32_t resetStateFlag);

+

+  /**

+   * @brief  Reset function for the floating-point PID Control.

+   * @param[in,out] *S is an instance of the floating-point PID Control structure

+   * @return none

+   */

+  void arm_pid_reset_f32(

+  arm_pid_instance_f32 * S);

+

+

+  /**

+   * @brief  Initialization function for the Q31 PID Control.

+   * @param[in,out] *S points to an instance of the Q15 PID structure.

+   * @param[in]     resetStateFlag  flag to reset the state. 0 = no change in state 1 = reset the state.

+   * @return none.

+   */

+  void arm_pid_init_q31(

+  arm_pid_instance_q31 * S,

+  int32_t resetStateFlag);

+

+

+  /**

+   * @brief  Reset function for the Q31 PID Control.

+   * @param[in,out] *S points to an instance of the Q31 PID Control structure

+   * @return none

+   */

+

+  void arm_pid_reset_q31(

+  arm_pid_instance_q31 * S);

+

+  /**

+   * @brief  Initialization function for the Q15 PID Control.

+   * @param[in,out] *S points to an instance of the Q15 PID structure.

+   * @param[in] resetStateFlag  flag to reset the state. 0 = no change in state 1 = reset the state.

+   * @return none.

+   */

+  void arm_pid_init_q15(

+  arm_pid_instance_q15 * S,

+  int32_t resetStateFlag);

+

+  /**

+   * @brief  Reset function for the Q15 PID Control.

+   * @param[in,out] *S points to an instance of the q15 PID Control structure

+   * @return none

+   */

+  void arm_pid_reset_q15(

+  arm_pid_instance_q15 * S);

+

+

+  /**

+   * @brief Instance structure for the floating-point Linear Interpolate function.

+   */

+  typedef struct

+  {

+    uint32_t nValues;           /**< nValues */

+    float32_t x1;               /**< x1 */

+    float32_t xSpacing;         /**< xSpacing */

+    float32_t *pYData;          /**< pointer to the table of Y values */

+  } arm_linear_interp_instance_f32;

+

+  /**

+   * @brief Instance structure for the floating-point bilinear interpolation function.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;   /**< number of rows in the data table. */

+    uint16_t numCols;   /**< number of columns in the data table. */

+    float32_t *pData;   /**< points to the data table. */

+  } arm_bilinear_interp_instance_f32;

+

+   /**

+   * @brief Instance structure for the Q31 bilinear interpolation function.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;   /**< number of rows in the data table. */

+    uint16_t numCols;   /**< number of columns in the data table. */

+    q31_t *pData;       /**< points to the data table. */

+  } arm_bilinear_interp_instance_q31;

+

+   /**

+   * @brief Instance structure for the Q15 bilinear interpolation function.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;   /**< number of rows in the data table. */

+    uint16_t numCols;   /**< number of columns in the data table. */

+    q15_t *pData;       /**< points to the data table. */

+  } arm_bilinear_interp_instance_q15;

+

+   /**

+   * @brief Instance structure for the Q15 bilinear interpolation function.

+   */

+

+  typedef struct

+  {

+    uint16_t numRows;   /**< number of rows in the data table. */

+    uint16_t numCols;   /**< number of columns in the data table. */

+    q7_t *pData;                /**< points to the data table. */

+  } arm_bilinear_interp_instance_q7;

+

+

+  /**

+   * @brief Q7 vector multiplication.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst  points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_mult_q7(

+  q7_t * pSrcA,

+  q7_t * pSrcB,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q15 vector multiplication.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst  points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_mult_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q31 vector multiplication.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_mult_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Floating-point vector multiplication.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_mult_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+

+

+

+

+

+  /**

+   * @brief Instance structure for the Q15 CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                 /**< length of the FFT. */

+    uint8_t ifftFlag;                /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;          /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    q15_t *pTwiddle;                     /**< points to the Sin twiddle factor table. */

+    uint16_t *pBitRevTable;          /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;       /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;           /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+  } arm_cfft_radix2_instance_q15;

+

+/* Deprecated */

+  arm_status arm_cfft_radix2_init_q15(

+  arm_cfft_radix2_instance_q15 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+/* Deprecated */

+  void arm_cfft_radix2_q15(

+  const arm_cfft_radix2_instance_q15 * S,

+  q15_t * pSrc);

+

+

+

+  /**

+   * @brief Instance structure for the Q15 CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                 /**< length of the FFT. */

+    uint8_t ifftFlag;                /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;          /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    q15_t *pTwiddle;                 /**< points to the twiddle factor table. */

+    uint16_t *pBitRevTable;          /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;       /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;           /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+  } arm_cfft_radix4_instance_q15;

+

+/* Deprecated */

+  arm_status arm_cfft_radix4_init_q15(

+  arm_cfft_radix4_instance_q15 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+/* Deprecated */

+  void arm_cfft_radix4_q15(

+  const arm_cfft_radix4_instance_q15 * S,

+  q15_t * pSrc);

+

+  /**

+   * @brief Instance structure for the Radix-2 Q31 CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                 /**< length of the FFT. */

+    uint8_t ifftFlag;                /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;          /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    q31_t *pTwiddle;                     /**< points to the Twiddle factor table. */

+    uint16_t *pBitRevTable;          /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;       /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;           /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+  } arm_cfft_radix2_instance_q31;

+

+/* Deprecated */

+  arm_status arm_cfft_radix2_init_q31(

+  arm_cfft_radix2_instance_q31 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+/* Deprecated */

+  void arm_cfft_radix2_q31(

+  const arm_cfft_radix2_instance_q31 * S,

+  q31_t * pSrc);

+

+  /**

+   * @brief Instance structure for the Q31 CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                 /**< length of the FFT. */

+    uint8_t ifftFlag;                /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;          /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    q31_t *pTwiddle;                 /**< points to the twiddle factor table. */

+    uint16_t *pBitRevTable;          /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;       /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;           /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+  } arm_cfft_radix4_instance_q31;

+

+/* Deprecated */

+  void arm_cfft_radix4_q31(

+  const arm_cfft_radix4_instance_q31 * S,

+  q31_t * pSrc);

+

+/* Deprecated */

+  arm_status arm_cfft_radix4_init_q31(

+  arm_cfft_radix4_instance_q31 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+  /**

+   * @brief Instance structure for the floating-point CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                   /**< length of the FFT. */

+    uint8_t ifftFlag;                  /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;            /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    float32_t *pTwiddle;               /**< points to the Twiddle factor table. */

+    uint16_t *pBitRevTable;            /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;         /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;             /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+    float32_t onebyfftLen;                 /**< value of 1/fftLen. */

+  } arm_cfft_radix2_instance_f32;

+

+/* Deprecated */

+  arm_status arm_cfft_radix2_init_f32(

+  arm_cfft_radix2_instance_f32 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+/* Deprecated */

+  void arm_cfft_radix2_f32(

+  const arm_cfft_radix2_instance_f32 * S,

+  float32_t * pSrc);

+

+  /**

+   * @brief Instance structure for the floating-point CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                   /**< length of the FFT. */

+    uint8_t ifftFlag;                  /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */

+    uint8_t bitReverseFlag;            /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */

+    float32_t *pTwiddle;               /**< points to the Twiddle factor table. */

+    uint16_t *pBitRevTable;            /**< points to the bit reversal table. */

+    uint16_t twidCoefModifier;         /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    uint16_t bitRevFactor;             /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */

+    float32_t onebyfftLen;                 /**< value of 1/fftLen. */

+  } arm_cfft_radix4_instance_f32;

+

+/* Deprecated */

+  arm_status arm_cfft_radix4_init_f32(

+  arm_cfft_radix4_instance_f32 * S,

+  uint16_t fftLen,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+/* Deprecated */

+  void arm_cfft_radix4_f32(

+  const arm_cfft_radix4_instance_f32 * S,

+  float32_t * pSrc);

+

+  /**

+   * @brief Instance structure for the fixed-point CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                   /**< length of the FFT. */

+    const q15_t *pTwiddle;             /**< points to the Twiddle factor table. */

+    const uint16_t *pBitRevTable;      /**< points to the bit reversal table. */

+    uint16_t bitRevLength;             /**< bit reversal table length. */

+  } arm_cfft_instance_q15;

+

+void arm_cfft_q15( 

+    const arm_cfft_instance_q15 * S, 

+    q15_t * p1,

+    uint8_t ifftFlag,

+    uint8_t bitReverseFlag);  

+

+  /**

+   * @brief Instance structure for the fixed-point CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                   /**< length of the FFT. */

+    const q31_t *pTwiddle;             /**< points to the Twiddle factor table. */

+    const uint16_t *pBitRevTable;      /**< points to the bit reversal table. */

+    uint16_t bitRevLength;             /**< bit reversal table length. */

+  } arm_cfft_instance_q31;

+

+void arm_cfft_q31( 

+    const arm_cfft_instance_q31 * S, 

+    q31_t * p1,

+    uint8_t ifftFlag,

+    uint8_t bitReverseFlag);  

+  

+  /**

+   * @brief Instance structure for the floating-point CFFT/CIFFT function.

+   */

+

+  typedef struct

+  {

+    uint16_t fftLen;                   /**< length of the FFT. */

+    const float32_t *pTwiddle;         /**< points to the Twiddle factor table. */

+    const uint16_t *pBitRevTable;      /**< points to the bit reversal table. */

+    uint16_t bitRevLength;             /**< bit reversal table length. */

+  } arm_cfft_instance_f32;

+

+  void arm_cfft_f32(

+  const arm_cfft_instance_f32 * S,

+  float32_t * p1,

+  uint8_t ifftFlag,

+  uint8_t bitReverseFlag);

+

+  /**

+   * @brief Instance structure for the Q15 RFFT/RIFFT function.

+   */

+

+  typedef struct

+  {

+    uint32_t fftLenReal;                      /**< length of the real FFT. */

+    uint8_t ifftFlagR;                        /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */

+    uint8_t bitReverseFlagR;                  /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */

+    uint32_t twidCoefRModifier;               /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    q15_t *pTwiddleAReal;                     /**< points to the real twiddle factor table. */

+    q15_t *pTwiddleBReal;                     /**< points to the imag twiddle factor table. */

+    const arm_cfft_instance_q15 *pCfft;       /**< points to the complex FFT instance. */

+  } arm_rfft_instance_q15;

+

+  arm_status arm_rfft_init_q15(

+  arm_rfft_instance_q15 * S,

+  uint32_t fftLenReal,

+  uint32_t ifftFlagR,

+  uint32_t bitReverseFlag);

+

+  void arm_rfft_q15(

+  const arm_rfft_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst);

+

+  /**

+   * @brief Instance structure for the Q31 RFFT/RIFFT function.

+   */

+

+  typedef struct

+  {

+    uint32_t fftLenReal;                        /**< length of the real FFT. */

+    uint8_t ifftFlagR;                          /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */

+    uint8_t bitReverseFlagR;                    /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */

+    uint32_t twidCoefRModifier;                 /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    q31_t *pTwiddleAReal;                       /**< points to the real twiddle factor table. */

+    q31_t *pTwiddleBReal;                       /**< points to the imag twiddle factor table. */

+    const arm_cfft_instance_q31 *pCfft;         /**< points to the complex FFT instance. */

+  } arm_rfft_instance_q31;

+

+  arm_status arm_rfft_init_q31(

+  arm_rfft_instance_q31 * S,

+  uint32_t fftLenReal,

+  uint32_t ifftFlagR,

+  uint32_t bitReverseFlag);

+

+  void arm_rfft_q31(

+  const arm_rfft_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst);

+

+  /**

+   * @brief Instance structure for the floating-point RFFT/RIFFT function.

+   */

+

+  typedef struct

+  {

+    uint32_t fftLenReal;                        /**< length of the real FFT. */

+    uint16_t fftLenBy2;                         /**< length of the complex FFT. */

+    uint8_t ifftFlagR;                          /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */

+    uint8_t bitReverseFlagR;                    /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */

+    uint32_t twidCoefRModifier;                     /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */

+    float32_t *pTwiddleAReal;                   /**< points to the real twiddle factor table. */

+    float32_t *pTwiddleBReal;                   /**< points to the imag twiddle factor table. */

+    arm_cfft_radix4_instance_f32 *pCfft;        /**< points to the complex FFT instance. */

+  } arm_rfft_instance_f32;

+

+  arm_status arm_rfft_init_f32(

+  arm_rfft_instance_f32 * S,

+  arm_cfft_radix4_instance_f32 * S_CFFT,

+  uint32_t fftLenReal,

+  uint32_t ifftFlagR,

+  uint32_t bitReverseFlag);

+

+  void arm_rfft_f32(

+  const arm_rfft_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst);

+

+  /**

+   * @brief Instance structure for the floating-point RFFT/RIFFT function.

+   */

+

+typedef struct

+  {

+    arm_cfft_instance_f32 Sint;      /**< Internal CFFT structure. */

+    uint16_t fftLenRFFT;                        /**< length of the real sequence */

+	float32_t * pTwiddleRFFT;					/**< Twiddle factors real stage  */

+  } arm_rfft_fast_instance_f32 ;

+

+arm_status arm_rfft_fast_init_f32 (

+	arm_rfft_fast_instance_f32 * S,

+	uint16_t fftLen);

+

+void arm_rfft_fast_f32(

+  arm_rfft_fast_instance_f32 * S,

+  float32_t * p, float32_t * pOut,

+  uint8_t ifftFlag);

+

+  /**

+   * @brief Instance structure for the floating-point DCT4/IDCT4 function.

+   */

+

+  typedef struct

+  {

+    uint16_t N;                         /**< length of the DCT4. */

+    uint16_t Nby2;                      /**< half of the length of the DCT4. */

+    float32_t normalize;                /**< normalizing factor. */

+    float32_t *pTwiddle;                /**< points to the twiddle factor table. */

+    float32_t *pCosFactor;              /**< points to the cosFactor table. */

+    arm_rfft_instance_f32 *pRfft;        /**< points to the real FFT instance. */

+    arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */

+  } arm_dct4_instance_f32;

+

+  /**

+   * @brief  Initialization function for the floating-point DCT4/IDCT4.

+   * @param[in,out] *S         points to an instance of floating-point DCT4/IDCT4 structure.

+   * @param[in]     *S_RFFT    points to an instance of floating-point RFFT/RIFFT structure.

+   * @param[in]     *S_CFFT    points to an instance of floating-point CFFT/CIFFT structure.

+   * @param[in]     N          length of the DCT4.

+   * @param[in]     Nby2       half of the length of the DCT4.

+   * @param[in]     normalize  normalizing factor.

+   * @return		arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if <code>fftLenReal</code> is not a supported transform length.

+   */

+

+  arm_status arm_dct4_init_f32(

+  arm_dct4_instance_f32 * S,

+  arm_rfft_instance_f32 * S_RFFT,

+  arm_cfft_radix4_instance_f32 * S_CFFT,

+  uint16_t N,

+  uint16_t Nby2,

+  float32_t normalize);

+

+  /**

+   * @brief Processing function for the floating-point DCT4/IDCT4.

+   * @param[in]       *S             points to an instance of the floating-point DCT4/IDCT4 structure.

+   * @param[in]       *pState        points to state buffer.

+   * @param[in,out]   *pInlineBuffer points to the in-place input and output buffer.

+   * @return none.

+   */

+

+  void arm_dct4_f32(

+  const arm_dct4_instance_f32 * S,

+  float32_t * pState,

+  float32_t * pInlineBuffer);

+

+  /**

+   * @brief Instance structure for the Q31 DCT4/IDCT4 function.

+   */

+

+  typedef struct

+  {

+    uint16_t N;                         /**< length of the DCT4. */

+    uint16_t Nby2;                      /**< half of the length of the DCT4. */

+    q31_t normalize;                    /**< normalizing factor. */

+    q31_t *pTwiddle;                    /**< points to the twiddle factor table. */

+    q31_t *pCosFactor;                  /**< points to the cosFactor table. */

+    arm_rfft_instance_q31 *pRfft;        /**< points to the real FFT instance. */

+    arm_cfft_radix4_instance_q31 *pCfft; /**< points to the complex FFT instance. */

+  } arm_dct4_instance_q31;

+

+  /**

+   * @brief  Initialization function for the Q31 DCT4/IDCT4.

+   * @param[in,out] *S         points to an instance of Q31 DCT4/IDCT4 structure.

+   * @param[in]     *S_RFFT    points to an instance of Q31 RFFT/RIFFT structure

+   * @param[in]     *S_CFFT    points to an instance of Q31 CFFT/CIFFT structure

+   * @param[in]     N          length of the DCT4.

+   * @param[in]     Nby2       half of the length of the DCT4.

+   * @param[in]     normalize  normalizing factor.

+   * @return		arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if <code>N</code> is not a supported transform length.

+   */

+

+  arm_status arm_dct4_init_q31(

+  arm_dct4_instance_q31 * S,

+  arm_rfft_instance_q31 * S_RFFT,

+  arm_cfft_radix4_instance_q31 * S_CFFT,

+  uint16_t N,

+  uint16_t Nby2,

+  q31_t normalize);

+

+  /**

+   * @brief Processing function for the Q31 DCT4/IDCT4.

+   * @param[in]       *S             points to an instance of the Q31 DCT4 structure.

+   * @param[in]       *pState        points to state buffer.

+   * @param[in,out]   *pInlineBuffer points to the in-place input and output buffer.

+   * @return none.

+   */

+

+  void arm_dct4_q31(

+  const arm_dct4_instance_q31 * S,

+  q31_t * pState,

+  q31_t * pInlineBuffer);

+

+  /**

+   * @brief Instance structure for the Q15 DCT4/IDCT4 function.

+   */

+

+  typedef struct

+  {

+    uint16_t N;                         /**< length of the DCT4. */

+    uint16_t Nby2;                      /**< half of the length of the DCT4. */

+    q15_t normalize;                    /**< normalizing factor. */

+    q15_t *pTwiddle;                    /**< points to the twiddle factor table. */

+    q15_t *pCosFactor;                  /**< points to the cosFactor table. */

+    arm_rfft_instance_q15 *pRfft;        /**< points to the real FFT instance. */

+    arm_cfft_radix4_instance_q15 *pCfft; /**< points to the complex FFT instance. */

+  } arm_dct4_instance_q15;

+

+  /**

+   * @brief  Initialization function for the Q15 DCT4/IDCT4.

+   * @param[in,out] *S         points to an instance of Q15 DCT4/IDCT4 structure.

+   * @param[in]     *S_RFFT    points to an instance of Q15 RFFT/RIFFT structure.

+   * @param[in]     *S_CFFT    points to an instance of Q15 CFFT/CIFFT structure.

+   * @param[in]     N          length of the DCT4.

+   * @param[in]     Nby2       half of the length of the DCT4.

+   * @param[in]     normalize  normalizing factor.

+   * @return		arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if <code>N</code> is not a supported transform length.

+   */

+

+  arm_status arm_dct4_init_q15(

+  arm_dct4_instance_q15 * S,

+  arm_rfft_instance_q15 * S_RFFT,

+  arm_cfft_radix4_instance_q15 * S_CFFT,

+  uint16_t N,

+  uint16_t Nby2,

+  q15_t normalize);

+

+  /**

+   * @brief Processing function for the Q15 DCT4/IDCT4.

+   * @param[in]       *S             points to an instance of the Q15 DCT4 structure.

+   * @param[in]       *pState        points to state buffer.

+   * @param[in,out]   *pInlineBuffer points to the in-place input and output buffer.

+   * @return none.

+   */

+

+  void arm_dct4_q15(

+  const arm_dct4_instance_q15 * S,

+  q15_t * pState,

+  q15_t * pInlineBuffer);

+

+  /**

+   * @brief Floating-point vector addition.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_add_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q7 vector addition.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_add_q7(

+  q7_t * pSrcA,

+  q7_t * pSrcB,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q15 vector addition.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_add_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q31 vector addition.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_add_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Floating-point vector subtraction.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_sub_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q7 vector subtraction.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_sub_q7(

+  q7_t * pSrcA,

+  q7_t * pSrcB,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q15 vector subtraction.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_sub_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q31 vector subtraction.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_sub_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Multiplies a floating-point vector by a scalar.

+   * @param[in]       *pSrc points to the input vector

+   * @param[in]       scale scale factor to be applied

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_scale_f32(

+  float32_t * pSrc,

+  float32_t scale,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Multiplies a Q7 vector by a scalar.

+   * @param[in]       *pSrc points to the input vector

+   * @param[in]       scaleFract fractional portion of the scale value

+   * @param[in]       shift number of bits to shift the result by

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_scale_q7(

+  q7_t * pSrc,

+  q7_t scaleFract,

+  int8_t shift,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Multiplies a Q15 vector by a scalar.

+   * @param[in]       *pSrc points to the input vector

+   * @param[in]       scaleFract fractional portion of the scale value

+   * @param[in]       shift number of bits to shift the result by

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_scale_q15(

+  q15_t * pSrc,

+  q15_t scaleFract,

+  int8_t shift,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Multiplies a Q31 vector by a scalar.

+   * @param[in]       *pSrc points to the input vector

+   * @param[in]       scaleFract fractional portion of the scale value

+   * @param[in]       shift number of bits to shift the result by

+   * @param[out]      *pDst points to the output vector

+   * @param[in]       blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_scale_q31(

+  q31_t * pSrc,

+  q31_t scaleFract,

+  int8_t shift,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q7 vector absolute value.

+   * @param[in]       *pSrc points to the input buffer

+   * @param[out]      *pDst points to the output buffer

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_abs_q7(

+  q7_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Floating-point vector absolute value.

+   * @param[in]       *pSrc points to the input buffer

+   * @param[out]      *pDst points to the output buffer

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_abs_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q15 vector absolute value.

+   * @param[in]       *pSrc points to the input buffer

+   * @param[out]      *pDst points to the output buffer

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_abs_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Q31 vector absolute value.

+   * @param[in]       *pSrc points to the input buffer

+   * @param[out]      *pDst points to the output buffer

+   * @param[in]       blockSize number of samples in each vector

+   * @return none.

+   */

+

+  void arm_abs_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Dot product of floating-point vectors.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[in]       blockSize number of samples in each vector

+   * @param[out]      *result output result returned here

+   * @return none.

+   */

+

+  void arm_dot_prod_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  uint32_t blockSize,

+  float32_t * result);

+

+  /**

+   * @brief Dot product of Q7 vectors.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[in]       blockSize number of samples in each vector

+   * @param[out]      *result output result returned here

+   * @return none.

+   */

+

+  void arm_dot_prod_q7(

+  q7_t * pSrcA,

+  q7_t * pSrcB,

+  uint32_t blockSize,

+  q31_t * result);

+

+  /**

+   * @brief Dot product of Q15 vectors.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[in]       blockSize number of samples in each vector

+   * @param[out]      *result output result returned here

+   * @return none.

+   */

+

+  void arm_dot_prod_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  uint32_t blockSize,

+  q63_t * result);

+

+  /**

+   * @brief Dot product of Q31 vectors.

+   * @param[in]       *pSrcA points to the first input vector

+   * @param[in]       *pSrcB points to the second input vector

+   * @param[in]       blockSize number of samples in each vector

+   * @param[out]      *result output result returned here

+   * @return none.

+   */

+

+  void arm_dot_prod_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  uint32_t blockSize,

+  q63_t * result);

+

+  /**

+   * @brief  Shifts the elements of a Q7 vector a specified number of bits.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  shiftBits number of bits to shift.  A positive value shifts left; a negative value shifts right.

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_shift_q7(

+  q7_t * pSrc,

+  int8_t shiftBits,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Shifts the elements of a Q15 vector a specified number of bits.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  shiftBits number of bits to shift.  A positive value shifts left; a negative value shifts right.

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_shift_q15(

+  q15_t * pSrc,

+  int8_t shiftBits,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Shifts the elements of a Q31 vector a specified number of bits.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  shiftBits number of bits to shift.  A positive value shifts left; a negative value shifts right.

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_shift_q31(

+  q31_t * pSrc,

+  int8_t shiftBits,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Adds a constant offset to a floating-point vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  offset is the offset to be added

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_offset_f32(

+  float32_t * pSrc,

+  float32_t offset,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Adds a constant offset to a Q7 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  offset is the offset to be added

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_offset_q7(

+  q7_t * pSrc,

+  q7_t offset,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Adds a constant offset to a Q15 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  offset is the offset to be added

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_offset_q15(

+  q15_t * pSrc,

+  q15_t offset,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Adds a constant offset to a Q31 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[in]  offset is the offset to be added

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_offset_q31(

+  q31_t * pSrc,

+  q31_t offset,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Negates the elements of a floating-point vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_negate_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Negates the elements of a Q7 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_negate_q7(

+  q7_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Negates the elements of a Q15 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_negate_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Negates the elements of a Q31 vector.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  blockSize number of samples in the vector

+   * @return none.

+   */

+

+  void arm_negate_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+  /**

+   * @brief  Copies the elements of a floating-point vector.

+   * @param[in]  *pSrc input pointer

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_copy_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Copies the elements of a Q7 vector.

+   * @param[in]  *pSrc input pointer

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_copy_q7(

+  q7_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Copies the elements of a Q15 vector.

+   * @param[in]  *pSrc input pointer

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_copy_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Copies the elements of a Q31 vector.

+   * @param[in]  *pSrc input pointer

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_copy_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+  /**

+   * @brief  Fills a constant value into a floating-point vector.

+   * @param[in]  value input value to be filled

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_fill_f32(

+  float32_t value,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Fills a constant value into a Q7 vector.

+   * @param[in]  value input value to be filled

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_fill_q7(

+  q7_t value,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Fills a constant value into a Q15 vector.

+   * @param[in]  value input value to be filled

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_fill_q15(

+  q15_t value,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Fills a constant value into a Q31 vector.

+   * @param[in]  value input value to be filled

+   * @param[out]  *pDst output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_fill_q31(

+  q31_t value,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+/**

+ * @brief Convolution of floating-point sequences.

+ * @param[in] *pSrcA points to the first input sequence.

+ * @param[in] srcALen length of the first input sequence.

+ * @param[in] *pSrcB points to the second input sequence.

+ * @param[in] srcBLen length of the second input sequence.

+ * @param[out] *pDst points to the location where the output result is written.  Length srcALen+srcBLen-1.

+ * @return none.

+ */

+

+  void arm_conv_f32(

+  float32_t * pSrcA,

+  uint32_t srcALen,

+  float32_t * pSrcB,

+  uint32_t srcBLen,

+  float32_t * pDst);

+

+

+  /**

+   * @brief Convolution of Q15 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @param[in]  *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]  *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).

+   * @return none.

+   */

+

+

+  void arm_conv_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+/**

+ * @brief Convolution of Q15 sequences.

+ * @param[in] *pSrcA points to the first input sequence.

+ * @param[in] srcALen length of the first input sequence.

+ * @param[in] *pSrcB points to the second input sequence.

+ * @param[in] srcBLen length of the second input sequence.

+ * @param[out] *pDst points to the location where the output result is written.  Length srcALen+srcBLen-1.

+ * @return none.

+ */

+

+  void arm_conv_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst);

+

+  /**

+   * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @return none.

+   */

+

+  void arm_conv_fast_q15(

+			  q15_t * pSrcA,

+			 uint32_t srcALen,

+			  q15_t * pSrcB,

+			 uint32_t srcBLen,

+			 q15_t * pDst);

+

+  /**

+   * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @param[in]  *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]  *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).

+   * @return none.

+   */

+

+  void arm_conv_fast_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+

+  /**

+   * @brief Convolution of Q31 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @return none.

+   */

+

+  void arm_conv_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst);

+

+  /**

+   * @brief Convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @return none.

+   */

+

+  void arm_conv_fast_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst);

+

+

+    /**

+   * @brief Convolution of Q7 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @param[in]  *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]  *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).

+   * @return none.

+   */

+

+  void arm_conv_opt_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+

+  /**

+   * @brief Convolution of Q7 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length srcALen+srcBLen-1.

+   * @return none.

+   */

+

+  void arm_conv_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst);

+

+

+  /**

+   * @brief Partial convolution of floating-point sequences.

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_f32(

+  float32_t * pSrcA,

+  uint32_t srcALen,

+  float32_t * pSrcB,

+  uint32_t srcBLen,

+  float32_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints);

+

+    /**

+   * @brief Partial convolution of Q15 sequences.

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @param[in]       * pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]       * pScratch2 points to scratch buffer of size min(srcALen, srcBLen).

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+/**

+   * @brief Partial convolution of Q15 sequences.

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints);

+

+  /**

+   * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_fast_q15(

+				        q15_t * pSrcA,

+				       uint32_t srcALen,

+				        q15_t * pSrcB,

+				       uint32_t srcBLen,

+				       q15_t * pDst,

+				       uint32_t firstIndex,

+				       uint32_t numPoints);

+

+

+  /**

+   * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @param[in]       * pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]       * pScratch2 points to scratch buffer of size min(srcALen, srcBLen).

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_fast_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+  /**

+   * @brief Partial convolution of Q31 sequences.

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints);

+

+

+  /**

+   * @brief Partial convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_fast_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints);

+

+

+  /**

+   * @brief Partial convolution of Q7 sequences

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @param[in]  *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]  *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_opt_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+/**

+   * @brief Partial convolution of Q7 sequences.

+   * @param[in]       *pSrcA points to the first input sequence.

+   * @param[in]       srcALen length of the first input sequence.

+   * @param[in]       *pSrcB points to the second input sequence.

+   * @param[in]       srcBLen length of the second input sequence.

+   * @param[out]      *pDst points to the block of output data

+   * @param[in]       firstIndex is the first output sample to start with.

+   * @param[in]       numPoints is the number of output points to be computed.

+   * @return  Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].

+   */

+

+  arm_status arm_conv_partial_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst,

+  uint32_t firstIndex,

+  uint32_t numPoints);

+

+

+

+  /**

+   * @brief Instance structure for the Q15 FIR decimator.

+   */

+

+  typedef struct

+  {

+    uint8_t M;                      /**< decimation factor. */

+    uint16_t numTaps;               /**< number of coefficients in the filter. */

+    q15_t *pCoeffs;                  /**< points to the coefficient array. The array is of length numTaps.*/

+    q15_t *pState;                   /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+  } arm_fir_decimate_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 FIR decimator.

+   */

+

+  typedef struct

+  {

+    uint8_t M;                  /**< decimation factor. */

+    uint16_t numTaps;           /**< number of coefficients in the filter. */

+    q31_t *pCoeffs;              /**< points to the coefficient array. The array is of length numTaps.*/

+    q31_t *pState;               /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+

+  } arm_fir_decimate_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point FIR decimator.

+   */

+

+  typedef struct

+  {

+    uint8_t M;                          /**< decimation factor. */

+    uint16_t numTaps;                   /**< number of coefficients in the filter. */

+    float32_t *pCoeffs;                  /**< points to the coefficient array. The array is of length numTaps.*/

+    float32_t *pState;                   /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+

+  } arm_fir_decimate_instance_f32;

+

+

+

+  /**

+   * @brief Processing function for the floating-point FIR decimator.

+   * @param[in] *S points to an instance of the floating-point FIR decimator structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none

+   */

+

+  void arm_fir_decimate_f32(

+  const arm_fir_decimate_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the floating-point FIR decimator.

+   * @param[in,out] *S points to an instance of the floating-point FIR decimator structure.

+   * @param[in] numTaps  number of coefficients in the filter.

+   * @param[in] M  decimation factor.

+   * @param[in] *pCoeffs points to the filter coefficients.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return    The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * <code>blockSize</code> is not a multiple of <code>M</code>.

+   */

+

+  arm_status arm_fir_decimate_init_f32(

+  arm_fir_decimate_instance_f32 * S,

+  uint16_t numTaps,

+  uint8_t M,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q15 FIR decimator.

+   * @param[in] *S points to an instance of the Q15 FIR decimator structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none

+   */

+

+  void arm_fir_decimate_q15(

+  const arm_fir_decimate_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q15 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4.

+   * @param[in] *S points to an instance of the Q15 FIR decimator structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none

+   */

+

+  void arm_fir_decimate_fast_q15(

+  const arm_fir_decimate_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+

+

+  /**

+   * @brief  Initialization function for the Q15 FIR decimator.

+   * @param[in,out] *S points to an instance of the Q15 FIR decimator structure.

+   * @param[in] numTaps  number of coefficients in the filter.

+   * @param[in] M  decimation factor.

+   * @param[in] *pCoeffs points to the filter coefficients.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return    The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * <code>blockSize</code> is not a multiple of <code>M</code>.

+   */

+

+  arm_status arm_fir_decimate_init_q15(

+  arm_fir_decimate_instance_q15 * S,

+  uint16_t numTaps,

+  uint8_t M,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q31 FIR decimator.

+   * @param[in] *S points to an instance of the Q31 FIR decimator structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none

+   */

+

+  void arm_fir_decimate_q31(

+  const arm_fir_decimate_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q31 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4.

+   * @param[in] *S points to an instance of the Q31 FIR decimator structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none

+   */

+

+  void arm_fir_decimate_fast_q31(

+  arm_fir_decimate_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the Q31 FIR decimator.

+   * @param[in,out] *S points to an instance of the Q31 FIR decimator structure.

+   * @param[in] numTaps  number of coefficients in the filter.

+   * @param[in] M  decimation factor.

+   * @param[in] *pCoeffs points to the filter coefficients.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return    The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * <code>blockSize</code> is not a multiple of <code>M</code>.

+   */

+

+  arm_status arm_fir_decimate_init_q31(

+  arm_fir_decimate_instance_q31 * S,

+  uint16_t numTaps,

+  uint8_t M,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  uint32_t blockSize);

+

+

+

+  /**

+   * @brief Instance structure for the Q15 FIR interpolator.

+   */

+

+  typedef struct

+  {

+    uint8_t L;                      /**< upsample factor. */

+    uint16_t phaseLength;           /**< length of each polyphase filter component. */

+    q15_t *pCoeffs;                 /**< points to the coefficient array. The array is of length L*phaseLength. */

+    q15_t *pState;                  /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */

+  } arm_fir_interpolate_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 FIR interpolator.

+   */

+

+  typedef struct

+  {

+    uint8_t L;                      /**< upsample factor. */

+    uint16_t phaseLength;           /**< length of each polyphase filter component. */

+    q31_t *pCoeffs;                  /**< points to the coefficient array. The array is of length L*phaseLength. */

+    q31_t *pState;                   /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */

+  } arm_fir_interpolate_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point FIR interpolator.

+   */

+

+  typedef struct

+  {

+    uint8_t L;                     /**< upsample factor. */

+    uint16_t phaseLength;          /**< length of each polyphase filter component. */

+    float32_t *pCoeffs;             /**< points to the coefficient array. The array is of length L*phaseLength. */

+    float32_t *pState;              /**< points to the state variable array. The array is of length phaseLength+numTaps-1. */

+  } arm_fir_interpolate_instance_f32;

+

+

+  /**

+   * @brief Processing function for the Q15 FIR interpolator.

+   * @param[in] *S        points to an instance of the Q15 FIR interpolator structure.

+   * @param[in] *pSrc     points to the block of input data.

+   * @param[out] *pDst    points to the block of output data.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_interpolate_q15(

+  const arm_fir_interpolate_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the Q15 FIR interpolator.

+   * @param[in,out] *S        points to an instance of the Q15 FIR interpolator structure.

+   * @param[in]     L         upsample factor.

+   * @param[in]     numTaps   number of filter coefficients in the filter.

+   * @param[in]     *pCoeffs  points to the filter coefficient buffer.

+   * @param[in]     *pState   points to the state buffer.

+   * @param[in]     blockSize number of input samples to process per call.

+   * @return        The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * the filter length <code>numTaps</code> is not a multiple of the interpolation factor <code>L</code>.

+   */

+

+  arm_status arm_fir_interpolate_init_q15(

+  arm_fir_interpolate_instance_q15 * S,

+  uint8_t L,

+  uint16_t numTaps,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q31 FIR interpolator.

+   * @param[in] *S        points to an instance of the Q15 FIR interpolator structure.

+   * @param[in] *pSrc     points to the block of input data.

+   * @param[out] *pDst    points to the block of output data.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_interpolate_q31(

+  const arm_fir_interpolate_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q31 FIR interpolator.

+   * @param[in,out] *S        points to an instance of the Q31 FIR interpolator structure.

+   * @param[in]     L         upsample factor.

+   * @param[in]     numTaps   number of filter coefficients in the filter.

+   * @param[in]     *pCoeffs  points to the filter coefficient buffer.

+   * @param[in]     *pState   points to the state buffer.

+   * @param[in]     blockSize number of input samples to process per call.

+   * @return        The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * the filter length <code>numTaps</code> is not a multiple of the interpolation factor <code>L</code>.

+   */

+

+  arm_status arm_fir_interpolate_init_q31(

+  arm_fir_interpolate_instance_q31 * S,

+  uint8_t L,

+  uint16_t numTaps,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the floating-point FIR interpolator.

+   * @param[in] *S        points to an instance of the floating-point FIR interpolator structure.

+   * @param[in] *pSrc     points to the block of input data.

+   * @param[out] *pDst    points to the block of output data.

+   * @param[in] blockSize number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_interpolate_f32(

+  const arm_fir_interpolate_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the floating-point FIR interpolator.

+   * @param[in,out] *S        points to an instance of the floating-point FIR interpolator structure.

+   * @param[in]     L         upsample factor.

+   * @param[in]     numTaps   number of filter coefficients in the filter.

+   * @param[in]     *pCoeffs  points to the filter coefficient buffer.

+   * @param[in]     *pState   points to the state buffer.

+   * @param[in]     blockSize number of input samples to process per call.

+   * @return        The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if

+   * the filter length <code>numTaps</code> is not a multiple of the interpolation factor <code>L</code>.

+   */

+

+  arm_status arm_fir_interpolate_init_f32(

+  arm_fir_interpolate_instance_f32 * S,

+  uint8_t L,

+  uint16_t numTaps,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Instance structure for the high precision Q31 Biquad cascade filter.

+   */

+

+  typedef struct

+  {

+    uint8_t numStages;       /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    q63_t *pState;           /**< points to the array of state coefficients.  The array is of length 4*numStages. */

+    q31_t *pCoeffs;          /**< points to the array of coefficients.  The array is of length 5*numStages. */

+    uint8_t postShift;       /**< additional shift, in bits, applied to each output sample. */

+

+  } arm_biquad_cas_df1_32x64_ins_q31;

+

+

+  /**

+   * @param[in]  *S        points to an instance of the high precision Q31 Biquad cascade filter structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_biquad_cas_df1_32x64_q31(

+  const arm_biquad_cas_df1_32x64_ins_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @param[in,out] *S           points to an instance of the high precision Q31 Biquad cascade filter structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @param[in]     postShift    shift to be applied to the output. Varies according to the coefficients format

+   * @return        none

+   */

+

+  void arm_biquad_cas_df1_32x64_init_q31(

+  arm_biquad_cas_df1_32x64_ins_q31 * S,

+  uint8_t numStages,

+  q31_t * pCoeffs,

+  q63_t * pState,

+  uint8_t postShift);

+

+

+

+  /**

+   * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.

+   */

+

+  typedef struct

+  {

+    uint8_t numStages;         /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    float32_t *pState;         /**< points to the array of state coefficients.  The array is of length 2*numStages. */

+    float32_t *pCoeffs;        /**< points to the array of coefficients.  The array is of length 5*numStages. */

+  } arm_biquad_cascade_df2T_instance_f32;

+

+

+

+  /**

+   * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.

+   */

+

+  typedef struct

+  {

+    uint8_t numStages;         /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    float32_t *pState;         /**< points to the array of state coefficients.  The array is of length 4*numStages. */

+    float32_t *pCoeffs;        /**< points to the array of coefficients.  The array is of length 5*numStages. */

+  } arm_biquad_cascade_stereo_df2T_instance_f32;

+

+

+

+  /**

+   * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.

+   */

+

+  typedef struct

+  {

+    uint8_t numStages;         /**< number of 2nd order stages in the filter.  Overall order is 2*numStages. */

+    float64_t *pState;         /**< points to the array of state coefficients.  The array is of length 2*numStages. */

+    float64_t *pCoeffs;        /**< points to the array of coefficients.  The array is of length 5*numStages. */

+  } arm_biquad_cascade_df2T_instance_f64;

+

+

+  /**

+   * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter.

+   * @param[in]  *S        points to an instance of the filter data structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_biquad_cascade_df2T_f32(

+  const arm_biquad_cascade_df2T_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. 2 channels

+   * @param[in]  *S        points to an instance of the filter data structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_biquad_cascade_stereo_df2T_f32(

+  const arm_biquad_cascade_stereo_df2T_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter.

+   * @param[in]  *S        points to an instance of the filter data structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_biquad_cascade_df2T_f64(

+  const arm_biquad_cascade_df2T_instance_f64 * S,

+  float64_t * pSrc,

+  float64_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the floating-point transposed direct form II Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the filter data structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @return        none

+   */

+

+  void arm_biquad_cascade_df2T_init_f32(

+  arm_biquad_cascade_df2T_instance_f32 * S,

+  uint8_t numStages,

+  float32_t * pCoeffs,

+  float32_t * pState);

+

+

+  /**

+   * @brief  Initialization function for the floating-point transposed direct form II Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the filter data structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @return        none

+   */

+

+  void arm_biquad_cascade_stereo_df2T_init_f32(

+  arm_biquad_cascade_stereo_df2T_instance_f32 * S,

+  uint8_t numStages,

+  float32_t * pCoeffs,

+  float32_t * pState);

+

+

+  /**

+   * @brief  Initialization function for the floating-point transposed direct form II Biquad cascade filter.

+   * @param[in,out] *S           points to an instance of the filter data structure.

+   * @param[in]     numStages    number of 2nd order stages in the filter.

+   * @param[in]     *pCoeffs     points to the filter coefficients.

+   * @param[in]     *pState      points to the state buffer.

+   * @return        none

+   */

+

+  void arm_biquad_cascade_df2T_init_f64(

+  arm_biquad_cascade_df2T_instance_f64 * S,

+  uint8_t numStages,

+  float64_t * pCoeffs,

+  float64_t * pState);

+

+

+

+  /**

+   * @brief Instance structure for the Q15 FIR lattice filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numStages;                          /**< number of filter stages. */

+    q15_t *pState;                               /**< points to the state variable array. The array is of length numStages. */

+    q15_t *pCoeffs;                              /**< points to the coefficient array. The array is of length numStages. */

+  } arm_fir_lattice_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 FIR lattice filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numStages;                          /**< number of filter stages. */

+    q31_t *pState;                               /**< points to the state variable array. The array is of length numStages. */

+    q31_t *pCoeffs;                              /**< points to the coefficient array. The array is of length numStages. */

+  } arm_fir_lattice_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point FIR lattice filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numStages;                  /**< number of filter stages. */

+    float32_t *pState;                   /**< points to the state variable array. The array is of length numStages. */

+    float32_t *pCoeffs;                  /**< points to the coefficient array. The array is of length numStages. */

+  } arm_fir_lattice_instance_f32;

+

+  /**

+   * @brief Initialization function for the Q15 FIR lattice filter.

+   * @param[in] *S points to an instance of the Q15 FIR lattice structure.

+   * @param[in] numStages  number of filter stages.

+   * @param[in] *pCoeffs points to the coefficient buffer.  The array is of length numStages.

+   * @param[in] *pState points to the state buffer.  The array is of length numStages.

+   * @return none.

+   */

+

+  void arm_fir_lattice_init_q15(

+  arm_fir_lattice_instance_q15 * S,

+  uint16_t numStages,

+  q15_t * pCoeffs,

+  q15_t * pState);

+

+

+  /**

+   * @brief Processing function for the Q15 FIR lattice filter.

+   * @param[in] *S points to an instance of the Q15 FIR lattice structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+  void arm_fir_lattice_q15(

+  const arm_fir_lattice_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for the Q31 FIR lattice filter.

+   * @param[in] *S points to an instance of the Q31 FIR lattice structure.

+   * @param[in] numStages  number of filter stages.

+   * @param[in] *pCoeffs points to the coefficient buffer.  The array is of length numStages.

+   * @param[in] *pState points to the state buffer.   The array is of length numStages.

+   * @return none.

+   */

+

+  void arm_fir_lattice_init_q31(

+  arm_fir_lattice_instance_q31 * S,

+  uint16_t numStages,

+  q31_t * pCoeffs,

+  q31_t * pState);

+

+

+  /**

+   * @brief Processing function for the Q31 FIR lattice filter.

+   * @param[in]  *S        points to an instance of the Q31 FIR lattice structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_fir_lattice_q31(

+  const arm_fir_lattice_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+/**

+ * @brief Initialization function for the floating-point FIR lattice filter.

+ * @param[in] *S points to an instance of the floating-point FIR lattice structure.

+ * @param[in] numStages  number of filter stages.

+ * @param[in] *pCoeffs points to the coefficient buffer.  The array is of length numStages.

+ * @param[in] *pState points to the state buffer.  The array is of length numStages.

+ * @return none.

+ */

+

+  void arm_fir_lattice_init_f32(

+  arm_fir_lattice_instance_f32 * S,

+  uint16_t numStages,

+  float32_t * pCoeffs,

+  float32_t * pState);

+

+  /**

+   * @brief Processing function for the floating-point FIR lattice filter.

+   * @param[in]  *S        points to an instance of the floating-point FIR lattice structure.

+   * @param[in]  *pSrc     points to the block of input data.

+   * @param[out] *pDst     points to the block of output data

+   * @param[in]  blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_fir_lattice_f32(

+  const arm_fir_lattice_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Instance structure for the Q15 IIR lattice filter.

+   */

+  typedef struct

+  {

+    uint16_t numStages;                         /**< number of stages in the filter. */

+    q15_t *pState;                              /**< points to the state variable array. The array is of length numStages+blockSize. */

+    q15_t *pkCoeffs;                            /**< points to the reflection coefficient array. The array is of length numStages. */

+    q15_t *pvCoeffs;                            /**< points to the ladder coefficient array. The array is of length numStages+1. */

+  } arm_iir_lattice_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q31 IIR lattice filter.

+   */

+  typedef struct

+  {

+    uint16_t numStages;                         /**< number of stages in the filter. */

+    q31_t *pState;                              /**< points to the state variable array. The array is of length numStages+blockSize. */

+    q31_t *pkCoeffs;                            /**< points to the reflection coefficient array. The array is of length numStages. */

+    q31_t *pvCoeffs;                            /**< points to the ladder coefficient array. The array is of length numStages+1. */

+  } arm_iir_lattice_instance_q31;

+

+  /**

+   * @brief Instance structure for the floating-point IIR lattice filter.

+   */

+  typedef struct

+  {

+    uint16_t numStages;                         /**< number of stages in the filter. */

+    float32_t *pState;                          /**< points to the state variable array. The array is of length numStages+blockSize. */

+    float32_t *pkCoeffs;                        /**< points to the reflection coefficient array. The array is of length numStages. */

+    float32_t *pvCoeffs;                        /**< points to the ladder coefficient array. The array is of length numStages+1. */

+  } arm_iir_lattice_instance_f32;

+

+  /**

+   * @brief Processing function for the floating-point IIR lattice filter.

+   * @param[in] *S points to an instance of the floating-point IIR lattice structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_iir_lattice_f32(

+  const arm_iir_lattice_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for the floating-point IIR lattice filter.

+   * @param[in] *S points to an instance of the floating-point IIR lattice structure.

+   * @param[in] numStages number of stages in the filter.

+   * @param[in] *pkCoeffs points to the reflection coefficient buffer.  The array is of length numStages.

+   * @param[in] *pvCoeffs points to the ladder coefficient buffer.  The array is of length numStages+1.

+   * @param[in] *pState points to the state buffer.  The array is of length numStages+blockSize-1.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_iir_lattice_init_f32(

+  arm_iir_lattice_instance_f32 * S,

+  uint16_t numStages,

+  float32_t * pkCoeffs,

+  float32_t * pvCoeffs,

+  float32_t * pState,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the Q31 IIR lattice filter.

+   * @param[in] *S points to an instance of the Q31 IIR lattice structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_iir_lattice_q31(

+  const arm_iir_lattice_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Initialization function for the Q31 IIR lattice filter.

+   * @param[in] *S points to an instance of the Q31 IIR lattice structure.

+   * @param[in] numStages number of stages in the filter.

+   * @param[in] *pkCoeffs points to the reflection coefficient buffer.  The array is of length numStages.

+   * @param[in] *pvCoeffs points to the ladder coefficient buffer.  The array is of length numStages+1.

+   * @param[in] *pState points to the state buffer.  The array is of length numStages+blockSize.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_iir_lattice_init_q31(

+  arm_iir_lattice_instance_q31 * S,

+  uint16_t numStages,

+  q31_t * pkCoeffs,

+  q31_t * pvCoeffs,

+  q31_t * pState,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Processing function for the Q15 IIR lattice filter.

+   * @param[in] *S points to an instance of the Q15 IIR lattice structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[out] *pDst points to the block of output data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_iir_lattice_q15(

+  const arm_iir_lattice_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+

+/**

+ * @brief Initialization function for the Q15 IIR lattice filter.

+ * @param[in] *S points to an instance of the fixed-point Q15 IIR lattice structure.

+ * @param[in] numStages  number of stages in the filter.

+ * @param[in] *pkCoeffs points to reflection coefficient buffer.  The array is of length numStages.

+ * @param[in] *pvCoeffs points to ladder coefficient buffer.  The array is of length numStages+1.

+ * @param[in] *pState points to state buffer.  The array is of length numStages+blockSize.

+ * @param[in] blockSize number of samples to process per call.

+ * @return none.

+ */

+

+  void arm_iir_lattice_init_q15(

+  arm_iir_lattice_instance_q15 * S,

+  uint16_t numStages,

+  q15_t * pkCoeffs,

+  q15_t * pvCoeffs,

+  q15_t * pState,

+  uint32_t blockSize);

+

+  /**

+   * @brief Instance structure for the floating-point LMS filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;    /**< number of coefficients in the filter. */

+    float32_t *pState;   /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    float32_t *pCoeffs;  /**< points to the coefficient array. The array is of length numTaps. */

+    float32_t mu;        /**< step size that controls filter coefficient updates. */

+  } arm_lms_instance_f32;

+

+  /**

+   * @brief Processing function for floating-point LMS filter.

+   * @param[in]  *S points to an instance of the floating-point LMS filter structure.

+   * @param[in]  *pSrc points to the block of input data.

+   * @param[in]  *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in]  blockSize number of samples to process.

+   * @return     none.

+   */

+

+  void arm_lms_f32(

+  const arm_lms_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pRef,

+  float32_t * pOut,

+  float32_t * pErr,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for floating-point LMS filter.

+   * @param[in] *S points to an instance of the floating-point LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to the coefficient buffer.

+   * @param[in] *pState points to state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_init_f32(

+  arm_lms_instance_f32 * S,

+  uint16_t numTaps,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  float32_t mu,

+  uint32_t blockSize);

+

+  /**

+   * @brief Instance structure for the Q15 LMS filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;    /**< number of coefficients in the filter. */

+    q15_t *pState;       /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q15_t *pCoeffs;      /**< points to the coefficient array. The array is of length numTaps. */

+    q15_t mu;            /**< step size that controls filter coefficient updates. */

+    uint32_t postShift;  /**< bit shift applied to coefficients. */

+  } arm_lms_instance_q15;

+

+

+  /**

+   * @brief Initialization function for the Q15 LMS filter.

+   * @param[in] *S points to an instance of the Q15 LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to the coefficient buffer.

+   * @param[in] *pState points to the state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @param[in] postShift bit shift applied to coefficients.

+   * @return    none.

+   */

+

+  void arm_lms_init_q15(

+  arm_lms_instance_q15 * S,

+  uint16_t numTaps,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  q15_t mu,

+  uint32_t blockSize,

+  uint32_t postShift);

+

+  /**

+   * @brief Processing function for Q15 LMS filter.

+   * @param[in] *S points to an instance of the Q15 LMS filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[in] *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_q15(

+  const arm_lms_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pRef,

+  q15_t * pOut,

+  q15_t * pErr,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Instance structure for the Q31 LMS filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;    /**< number of coefficients in the filter. */

+    q31_t *pState;       /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q31_t *pCoeffs;      /**< points to the coefficient array. The array is of length numTaps. */

+    q31_t mu;            /**< step size that controls filter coefficient updates. */

+    uint32_t postShift;  /**< bit shift applied to coefficients. */

+

+  } arm_lms_instance_q31;

+

+  /**

+   * @brief Processing function for Q31 LMS filter.

+   * @param[in]  *S points to an instance of the Q15 LMS filter structure.

+   * @param[in]  *pSrc points to the block of input data.

+   * @param[in]  *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in]  blockSize number of samples to process.

+   * @return     none.

+   */

+

+  void arm_lms_q31(

+  const arm_lms_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pRef,

+  q31_t * pOut,

+  q31_t * pErr,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for Q31 LMS filter.

+   * @param[in] *S points to an instance of the Q31 LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to coefficient buffer.

+   * @param[in] *pState points to state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @param[in] postShift bit shift applied to coefficients.

+   * @return none.

+   */

+

+  void arm_lms_init_q31(

+  arm_lms_instance_q31 * S,

+  uint16_t numTaps,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  q31_t mu,

+  uint32_t blockSize,

+  uint32_t postShift);

+

+  /**

+   * @brief Instance structure for the floating-point normalized LMS filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;     /**< number of coefficients in the filter. */

+    float32_t *pState;    /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    float32_t *pCoeffs;   /**< points to the coefficient array. The array is of length numTaps. */

+    float32_t mu;        /**< step size that control filter coefficient updates. */

+    float32_t energy;    /**< saves previous frame energy. */

+    float32_t x0;        /**< saves previous input sample. */

+  } arm_lms_norm_instance_f32;

+

+  /**

+   * @brief Processing function for floating-point normalized LMS filter.

+   * @param[in] *S points to an instance of the floating-point normalized LMS filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[in] *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_norm_f32(

+  arm_lms_norm_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pRef,

+  float32_t * pOut,

+  float32_t * pErr,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for floating-point normalized LMS filter.

+   * @param[in] *S points to an instance of the floating-point LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to coefficient buffer.

+   * @param[in] *pState points to state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_norm_init_f32(

+  arm_lms_norm_instance_f32 * S,

+  uint16_t numTaps,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  float32_t mu,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Instance structure for the Q31 normalized LMS filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;     /**< number of coefficients in the filter. */

+    q31_t *pState;        /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q31_t *pCoeffs;       /**< points to the coefficient array. The array is of length numTaps. */

+    q31_t mu;             /**< step size that controls filter coefficient updates. */

+    uint8_t postShift;    /**< bit shift applied to coefficients. */

+    q31_t *recipTable;    /**< points to the reciprocal initial value table. */

+    q31_t energy;         /**< saves previous frame energy. */

+    q31_t x0;             /**< saves previous input sample. */

+  } arm_lms_norm_instance_q31;

+

+  /**

+   * @brief Processing function for Q31 normalized LMS filter.

+   * @param[in] *S points to an instance of the Q31 normalized LMS filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[in] *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_norm_q31(

+  arm_lms_norm_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pRef,

+  q31_t * pOut,

+  q31_t * pErr,

+  uint32_t blockSize);

+

+  /**

+   * @brief Initialization function for Q31 normalized LMS filter.

+   * @param[in] *S points to an instance of the Q31 normalized LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to coefficient buffer.

+   * @param[in] *pState points to state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @param[in] postShift bit shift applied to coefficients.

+   * @return none.

+   */

+

+  void arm_lms_norm_init_q31(

+  arm_lms_norm_instance_q31 * S,

+  uint16_t numTaps,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  q31_t mu,

+  uint32_t blockSize,

+  uint8_t postShift);

+

+  /**

+   * @brief Instance structure for the Q15 normalized LMS filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;    /**< Number of coefficients in the filter. */

+    q15_t *pState;        /**< points to the state variable array. The array is of length numTaps+blockSize-1. */

+    q15_t *pCoeffs;       /**< points to the coefficient array. The array is of length numTaps. */

+    q15_t mu;            /**< step size that controls filter coefficient updates. */

+    uint8_t postShift;   /**< bit shift applied to coefficients. */

+    q15_t *recipTable;   /**< Points to the reciprocal initial value table. */

+    q15_t energy;        /**< saves previous frame energy. */

+    q15_t x0;            /**< saves previous input sample. */

+  } arm_lms_norm_instance_q15;

+

+  /**

+   * @brief Processing function for Q15 normalized LMS filter.

+   * @param[in] *S points to an instance of the Q15 normalized LMS filter structure.

+   * @param[in] *pSrc points to the block of input data.

+   * @param[in] *pRef points to the block of reference data.

+   * @param[out] *pOut points to the block of output data.

+   * @param[out] *pErr points to the block of error data.

+   * @param[in] blockSize number of samples to process.

+   * @return none.

+   */

+

+  void arm_lms_norm_q15(

+  arm_lms_norm_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pRef,

+  q15_t * pOut,

+  q15_t * pErr,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief Initialization function for Q15 normalized LMS filter.

+   * @param[in] *S points to an instance of the Q15 normalized LMS filter structure.

+   * @param[in] numTaps  number of filter coefficients.

+   * @param[in] *pCoeffs points to coefficient buffer.

+   * @param[in] *pState points to state buffer.

+   * @param[in] mu step size that controls filter coefficient updates.

+   * @param[in] blockSize number of samples to process.

+   * @param[in] postShift bit shift applied to coefficients.

+   * @return none.

+   */

+

+  void arm_lms_norm_init_q15(

+  arm_lms_norm_instance_q15 * S,

+  uint16_t numTaps,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  q15_t mu,

+  uint32_t blockSize,

+  uint8_t postShift);

+

+  /**

+   * @brief Correlation of floating-point sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_f32(

+  float32_t * pSrcA,

+  uint32_t srcALen,

+  float32_t * pSrcB,

+  uint32_t srcBLen,

+  float32_t * pDst);

+

+

+   /**

+   * @brief Correlation of Q15 sequences

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @param[in]  *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @return none.

+   */

+  void arm_correlate_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  q15_t * pScratch);

+

+

+  /**

+   * @brief Correlation of Q15 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst);

+

+  /**

+   * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_fast_q15(

+			       q15_t * pSrcA,

+			      uint32_t srcALen,

+			       q15_t * pSrcB,

+			      uint32_t srcBLen,

+			      q15_t * pDst);

+

+

+

+  /**

+   * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @param[in]  *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @return none.

+   */

+

+  void arm_correlate_fast_opt_q15(

+  q15_t * pSrcA,

+  uint32_t srcALen,

+  q15_t * pSrcB,

+  uint32_t srcBLen,

+  q15_t * pDst,

+  q15_t * pScratch);

+

+  /**

+   * @brief Correlation of Q31 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst);

+

+  /**

+   * @brief Correlation of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_fast_q31(

+  q31_t * pSrcA,

+  uint32_t srcALen,

+  q31_t * pSrcB,

+  uint32_t srcBLen,

+  q31_t * pDst);

+

+

+

+ /**

+   * @brief Correlation of Q7 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @param[in]  *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.

+   * @param[in]  *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).

+   * @return none.

+   */

+

+  void arm_correlate_opt_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst,

+  q15_t * pScratch1,

+  q15_t * pScratch2);

+

+

+  /**

+   * @brief Correlation of Q7 sequences.

+   * @param[in] *pSrcA points to the first input sequence.

+   * @param[in] srcALen length of the first input sequence.

+   * @param[in] *pSrcB points to the second input sequence.

+   * @param[in] srcBLen length of the second input sequence.

+   * @param[out] *pDst points to the block of output data  Length 2 * max(srcALen, srcBLen) - 1.

+   * @return none.

+   */

+

+  void arm_correlate_q7(

+  q7_t * pSrcA,

+  uint32_t srcALen,

+  q7_t * pSrcB,

+  uint32_t srcBLen,

+  q7_t * pDst);

+

+

+  /**

+   * @brief Instance structure for the floating-point sparse FIR filter.

+   */

+  typedef struct

+  {

+    uint16_t numTaps;             /**< number of coefficients in the filter. */

+    uint16_t stateIndex;          /**< state buffer index.  Points to the oldest sample in the state buffer. */

+    float32_t *pState;            /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */

+    float32_t *pCoeffs;           /**< points to the coefficient array. The array is of length numTaps.*/

+    uint16_t maxDelay;            /**< maximum offset specified by the pTapDelay array. */

+    int32_t *pTapDelay;           /**< points to the array of delay values.  The array is of length numTaps. */

+  } arm_fir_sparse_instance_f32;

+

+  /**

+   * @brief Instance structure for the Q31 sparse FIR filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;             /**< number of coefficients in the filter. */

+    uint16_t stateIndex;          /**< state buffer index.  Points to the oldest sample in the state buffer. */

+    q31_t *pState;                /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */

+    q31_t *pCoeffs;               /**< points to the coefficient array. The array is of length numTaps.*/

+    uint16_t maxDelay;            /**< maximum offset specified by the pTapDelay array. */

+    int32_t *pTapDelay;           /**< points to the array of delay values.  The array is of length numTaps. */

+  } arm_fir_sparse_instance_q31;

+

+  /**

+   * @brief Instance structure for the Q15 sparse FIR filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;             /**< number of coefficients in the filter. */

+    uint16_t stateIndex;          /**< state buffer index.  Points to the oldest sample in the state buffer. */

+    q15_t *pState;                /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */

+    q15_t *pCoeffs;               /**< points to the coefficient array. The array is of length numTaps.*/

+    uint16_t maxDelay;            /**< maximum offset specified by the pTapDelay array. */

+    int32_t *pTapDelay;           /**< points to the array of delay values.  The array is of length numTaps. */

+  } arm_fir_sparse_instance_q15;

+

+  /**

+   * @brief Instance structure for the Q7 sparse FIR filter.

+   */

+

+  typedef struct

+  {

+    uint16_t numTaps;             /**< number of coefficients in the filter. */

+    uint16_t stateIndex;          /**< state buffer index.  Points to the oldest sample in the state buffer. */

+    q7_t *pState;                 /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */

+    q7_t *pCoeffs;                /**< points to the coefficient array. The array is of length numTaps.*/

+    uint16_t maxDelay;            /**< maximum offset specified by the pTapDelay array. */

+    int32_t *pTapDelay;           /**< points to the array of delay values.  The array is of length numTaps. */

+  } arm_fir_sparse_instance_q7;

+

+  /**

+   * @brief Processing function for the floating-point sparse FIR filter.

+   * @param[in]  *S          points to an instance of the floating-point sparse FIR structure.

+   * @param[in]  *pSrc       points to the block of input data.

+   * @param[out] *pDst       points to the block of output data

+   * @param[in]  *pScratchIn points to a temporary buffer of size blockSize.

+   * @param[in]  blockSize   number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_sparse_f32(

+  arm_fir_sparse_instance_f32 * S,

+  float32_t * pSrc,

+  float32_t * pDst,

+  float32_t * pScratchIn,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the floating-point sparse FIR filter.

+   * @param[in,out] *S         points to an instance of the floating-point sparse FIR structure.

+   * @param[in]     numTaps    number of nonzero coefficients in the filter.

+   * @param[in]     *pCoeffs   points to the array of filter coefficients.

+   * @param[in]     *pState    points to the state buffer.

+   * @param[in]     *pTapDelay points to the array of offset times.

+   * @param[in]     maxDelay   maximum offset time supported.

+   * @param[in]     blockSize  number of samples that will be processed per block.

+   * @return none

+   */

+

+  void arm_fir_sparse_init_f32(

+  arm_fir_sparse_instance_f32 * S,

+  uint16_t numTaps,

+  float32_t * pCoeffs,

+  float32_t * pState,

+  int32_t * pTapDelay,

+  uint16_t maxDelay,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q31 sparse FIR filter.

+   * @param[in]  *S          points to an instance of the Q31 sparse FIR structure.

+   * @param[in]  *pSrc       points to the block of input data.

+   * @param[out] *pDst       points to the block of output data

+   * @param[in]  *pScratchIn points to a temporary buffer of size blockSize.

+   * @param[in]  blockSize   number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_sparse_q31(

+  arm_fir_sparse_instance_q31 * S,

+  q31_t * pSrc,

+  q31_t * pDst,

+  q31_t * pScratchIn,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q31 sparse FIR filter.

+   * @param[in,out] *S         points to an instance of the Q31 sparse FIR structure.

+   * @param[in]     numTaps    number of nonzero coefficients in the filter.

+   * @param[in]     *pCoeffs   points to the array of filter coefficients.

+   * @param[in]     *pState    points to the state buffer.

+   * @param[in]     *pTapDelay points to the array of offset times.

+   * @param[in]     maxDelay   maximum offset time supported.

+   * @param[in]     blockSize  number of samples that will be processed per block.

+   * @return none

+   */

+

+  void arm_fir_sparse_init_q31(

+  arm_fir_sparse_instance_q31 * S,

+  uint16_t numTaps,

+  q31_t * pCoeffs,

+  q31_t * pState,

+  int32_t * pTapDelay,

+  uint16_t maxDelay,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q15 sparse FIR filter.

+   * @param[in]  *S           points to an instance of the Q15 sparse FIR structure.

+   * @param[in]  *pSrc        points to the block of input data.

+   * @param[out] *pDst        points to the block of output data

+   * @param[in]  *pScratchIn  points to a temporary buffer of size blockSize.

+   * @param[in]  *pScratchOut points to a temporary buffer of size blockSize.

+   * @param[in]  blockSize    number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_sparse_q15(

+  arm_fir_sparse_instance_q15 * S,

+  q15_t * pSrc,

+  q15_t * pDst,

+  q15_t * pScratchIn,

+  q31_t * pScratchOut,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Initialization function for the Q15 sparse FIR filter.

+   * @param[in,out] *S         points to an instance of the Q15 sparse FIR structure.

+   * @param[in]     numTaps    number of nonzero coefficients in the filter.

+   * @param[in]     *pCoeffs   points to the array of filter coefficients.

+   * @param[in]     *pState    points to the state buffer.

+   * @param[in]     *pTapDelay points to the array of offset times.

+   * @param[in]     maxDelay   maximum offset time supported.

+   * @param[in]     blockSize  number of samples that will be processed per block.

+   * @return none

+   */

+

+  void arm_fir_sparse_init_q15(

+  arm_fir_sparse_instance_q15 * S,

+  uint16_t numTaps,

+  q15_t * pCoeffs,

+  q15_t * pState,

+  int32_t * pTapDelay,

+  uint16_t maxDelay,

+  uint32_t blockSize);

+

+  /**

+   * @brief Processing function for the Q7 sparse FIR filter.

+   * @param[in]  *S           points to an instance of the Q7 sparse FIR structure.

+   * @param[in]  *pSrc        points to the block of input data.

+   * @param[out] *pDst        points to the block of output data

+   * @param[in]  *pScratchIn  points to a temporary buffer of size blockSize.

+   * @param[in]  *pScratchOut points to a temporary buffer of size blockSize.

+   * @param[in]  blockSize    number of input samples to process per call.

+   * @return none.

+   */

+

+  void arm_fir_sparse_q7(

+  arm_fir_sparse_instance_q7 * S,

+  q7_t * pSrc,

+  q7_t * pDst,

+  q7_t * pScratchIn,

+  q31_t * pScratchOut,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Initialization function for the Q7 sparse FIR filter.

+   * @param[in,out] *S         points to an instance of the Q7 sparse FIR structure.

+   * @param[in]     numTaps    number of nonzero coefficients in the filter.

+   * @param[in]     *pCoeffs   points to the array of filter coefficients.

+   * @param[in]     *pState    points to the state buffer.

+   * @param[in]     *pTapDelay points to the array of offset times.

+   * @param[in]     maxDelay   maximum offset time supported.

+   * @param[in]     blockSize  number of samples that will be processed per block.

+   * @return none

+   */

+

+  void arm_fir_sparse_init_q7(

+  arm_fir_sparse_instance_q7 * S,

+  uint16_t numTaps,

+  q7_t * pCoeffs,

+  q7_t * pState,

+  int32_t * pTapDelay,

+  uint16_t maxDelay,

+  uint32_t blockSize);

+

+

+  /*

+   * @brief  Floating-point sin_cos function.

+   * @param[in]  theta    input value in degrees

+   * @param[out] *pSinVal points to the processed sine output.

+   * @param[out] *pCosVal points to the processed cos output.

+   * @return none.

+   */

+

+  void arm_sin_cos_f32(

+  float32_t theta,

+  float32_t * pSinVal,

+  float32_t * pCcosVal);

+

+  /*

+   * @brief  Q31 sin_cos function.

+   * @param[in]  theta    scaled input value in degrees

+   * @param[out] *pSinVal points to the processed sine output.

+   * @param[out] *pCosVal points to the processed cosine output.

+   * @return none.

+   */

+

+  void arm_sin_cos_q31(

+  q31_t theta,

+  q31_t * pSinVal,

+  q31_t * pCosVal);

+

+

+  /**

+   * @brief  Floating-point complex conjugate.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_conj_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q31 complex conjugate.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_conj_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q15 complex conjugate.

+   * @param[in]  *pSrc points to the input vector

+   * @param[out]  *pDst points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_conj_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t numSamples);

+

+

+

+  /**

+   * @brief  Floating-point complex magnitude squared

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_squared_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q31 complex magnitude squared

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_squared_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q15 complex magnitude squared

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_squared_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t numSamples);

+

+

+ /**

+   * @ingroup groupController

+   */

+

+  /**

+   * @defgroup PID PID Motor Control

+   *

+   * A Proportional Integral Derivative (PID) controller is a generic feedback control

+   * loop mechanism widely used in industrial control systems.

+   * A PID controller is the most commonly used type of feedback controller.

+   *

+   * This set of functions implements (PID) controllers

+   * for Q15, Q31, and floating-point data types.  The functions operate on a single sample

+   * of data and each call to the function returns a single processed value.

+   * <code>S</code> points to an instance of the PID control data structure.  <code>in</code>

+   * is the input sample value. The functions return the output value.

+   *

+   * \par Algorithm:

+   * <pre>

+   *    y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2]

+   *    A0 = Kp + Ki + Kd

+   *    A1 = (-Kp ) - (2 * Kd )

+   *    A2 = Kd  </pre>

+   *

+   * \par

+   * where \c Kp is proportional constant, \c Ki is Integral constant and \c Kd is Derivative constant

+   *

+   * \par

+   * \image html PID.gif "Proportional Integral Derivative Controller"

+   *

+   * \par

+   * The PID controller calculates an "error" value as the difference between

+   * the measured output and the reference input.

+   * The controller attempts to minimize the error by adjusting the process control inputs.

+   * The proportional value determines the reaction to the current error,

+   * the integral value determines the reaction based on the sum of recent errors,

+   * and the derivative value determines the reaction based on the rate at which the error has been changing.

+   *

+   * \par Instance Structure

+   * The Gains A0, A1, A2 and state variables for a PID controller are stored together in an instance data structure.

+   * A separate instance structure must be defined for each PID Controller.

+   * There are separate instance structure declarations for each of the 3 supported data types.

+   *

+   * \par Reset Functions

+   * There is also an associated reset function for each data type which clears the state array.

+   *

+   * \par Initialization Functions

+   * There is also an associated initialization function for each data type.

+   * The initialization function performs the following operations:

+   * - Initializes the Gains A0, A1, A2 from Kp,Ki, Kd gains.

+   * - Zeros out the values in the state buffer.

+   *

+   * \par

+   * Instance structure cannot be placed into a const data section and it is recommended to use the initialization function.

+   *

+   * \par Fixed-Point Behavior

+   * Care must be taken when using the fixed-point versions of the PID Controller functions.

+   * In particular, the overflow and saturation behavior of the accumulator used in each function must be considered.

+   * Refer to the function specific documentation below for usage guidelines.

+   */

+

+  /**

+   * @addtogroup PID

+   * @{

+   */

+

+  /**

+   * @brief  Process function for the floating-point PID Control.

+   * @param[in,out] *S is an instance of the floating-point PID Control structure

+   * @param[in] in input sample to process

+   * @return out processed output sample.

+   */

+

+

+  static __INLINE float32_t arm_pid_f32(

+  arm_pid_instance_f32 * S,

+  float32_t in)

+  {

+    float32_t out;

+

+    /* y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2]  */

+    out = (S->A0 * in) +

+      (S->A1 * S->state[0]) + (S->A2 * S->state[1]) + (S->state[2]);

+

+    /* Update state */

+    S->state[1] = S->state[0];

+    S->state[0] = in;

+    S->state[2] = out;

+

+    /* return to application */

+    return (out);

+

+  }

+

+  /**

+   * @brief  Process function for the Q31 PID Control.

+   * @param[in,out] *S points to an instance of the Q31 PID Control structure

+   * @param[in] in input sample to process

+   * @return out processed output sample.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using an internal 64-bit accumulator.

+   * The accumulator has a 2.62 format and maintains full precision of the intermediate multiplication results but provides only a single guard bit.

+   * Thus, if the accumulator result overflows it wraps around rather than clip.

+   * In order to avoid overflows completely the input signal must be scaled down by 2 bits as there are four additions.

+   * After all multiply-accumulates are performed, the 2.62 accumulator is truncated to 1.32 format and then saturated to 1.31 format.

+   */

+

+  static __INLINE q31_t arm_pid_q31(

+  arm_pid_instance_q31 * S,

+  q31_t in)

+  {

+    q63_t acc;

+    q31_t out;

+

+    /* acc = A0 * x[n]  */

+    acc = (q63_t) S->A0 * in;

+

+    /* acc += A1 * x[n-1] */

+    acc += (q63_t) S->A1 * S->state[0];

+

+    /* acc += A2 * x[n-2]  */

+    acc += (q63_t) S->A2 * S->state[1];

+

+    /* convert output to 1.31 format to add y[n-1] */

+    out = (q31_t) (acc >> 31u);

+

+    /* out += y[n-1] */

+    out += S->state[2];

+

+    /* Update state */

+    S->state[1] = S->state[0];

+    S->state[0] = in;

+    S->state[2] = out;

+

+    /* return to application */

+    return (out);

+

+  }

+

+  /**

+   * @brief  Process function for the Q15 PID Control.

+   * @param[in,out] *S points to an instance of the Q15 PID Control structure

+   * @param[in] in input sample to process

+   * @return out processed output sample.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using a 64-bit internal accumulator.

+   * Both Gains and state variables are represented in 1.15 format and multiplications yield a 2.30 result.

+   * The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format.

+   * There is no risk of internal overflow with this approach and the full precision of intermediate multiplications is preserved.

+   * After all additions have been performed, the accumulator is truncated to 34.15 format by discarding low 15 bits.

+   * Lastly, the accumulator is saturated to yield a result in 1.15 format.

+   */

+

+  static __INLINE q15_t arm_pid_q15(

+  arm_pid_instance_q15 * S,

+  q15_t in)

+  {

+    q63_t acc;

+    q15_t out;

+

+#ifndef ARM_MATH_CM0_FAMILY

+    __SIMD32_TYPE *vstate;

+

+    /* Implementation of PID controller */

+

+    /* acc = A0 * x[n]  */

+    acc = (q31_t) __SMUAD(S->A0, in);

+

+    /* acc += A1 * x[n-1] + A2 * x[n-2]  */

+    vstate = __SIMD32_CONST(S->state);

+    acc = __SMLALD(S->A1, (q31_t) *vstate, acc);

+

+#else

+    /* acc = A0 * x[n]  */

+    acc = ((q31_t) S->A0) * in;

+

+    /* acc += A1 * x[n-1] + A2 * x[n-2]  */

+    acc += (q31_t) S->A1 * S->state[0];

+    acc += (q31_t) S->A2 * S->state[1];

+

+#endif

+

+    /* acc += y[n-1] */

+    acc += (q31_t) S->state[2] << 15;

+

+    /* saturate the output */

+    out = (q15_t) (__SSAT((acc >> 15), 16));

+

+    /* Update state */

+    S->state[1] = S->state[0];

+    S->state[0] = in;

+    S->state[2] = out;

+

+    /* return to application */

+    return (out);

+

+  }

+

+  /**

+   * @} end of PID group

+   */

+

+

+  /**

+   * @brief Floating-point matrix inverse.

+   * @param[in]  *src points to the instance of the input floating-point matrix structure.

+   * @param[out] *dst points to the instance of the output floating-point matrix structure.

+   * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match.

+   * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR.

+   */

+

+  arm_status arm_mat_inverse_f32(

+  const arm_matrix_instance_f32 * src,

+  arm_matrix_instance_f32 * dst);

+

+

+  /**

+   * @brief Floating-point matrix inverse.

+   * @param[in]  *src points to the instance of the input floating-point matrix structure.

+   * @param[out] *dst points to the instance of the output floating-point matrix structure.

+   * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match.

+   * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR.

+   */

+

+  arm_status arm_mat_inverse_f64(

+  const arm_matrix_instance_f64 * src,

+  arm_matrix_instance_f64 * dst);

+

+

+

+  /**

+   * @ingroup groupController

+   */

+

+

+  /**

+   * @defgroup clarke Vector Clarke Transform

+   * Forward Clarke transform converts the instantaneous stator phases into a two-coordinate time invariant vector.

+   * Generally the Clarke transform uses three-phase currents <code>Ia, Ib and Ic</code> to calculate currents

+   * in the two-phase orthogonal stator axis <code>Ialpha</code> and <code>Ibeta</code>.

+   * When <code>Ialpha</code> is superposed with <code>Ia</code> as shown in the figure below

+   * \image html clarke.gif Stator current space vector and its components in (a,b).

+   * and <code>Ia + Ib + Ic = 0</code>, in this condition <code>Ialpha</code> and <code>Ibeta</code>

+   * can be calculated using only <code>Ia</code> and <code>Ib</code>.

+   *

+   * The function operates on a single sample of data and each call to the function returns the processed output.

+   * The library provides separate functions for Q31 and floating-point data types.

+   * \par Algorithm

+   * \image html clarkeFormula.gif

+   * where <code>Ia</code> and <code>Ib</code> are the instantaneous stator phases and

+   * <code>pIalpha</code> and <code>pIbeta</code> are the two coordinates of time invariant vector.

+   * \par Fixed-Point Behavior

+   * Care must be taken when using the Q31 version of the Clarke transform.

+   * In particular, the overflow and saturation behavior of the accumulator used must be considered.

+   * Refer to the function specific documentation below for usage guidelines.

+   */

+

+  /**

+   * @addtogroup clarke

+   * @{

+   */

+

+  /**

+   *

+   * @brief  Floating-point Clarke transform

+   * @param[in]       Ia       input three-phase coordinate <code>a</code>

+   * @param[in]       Ib       input three-phase coordinate <code>b</code>

+   * @param[out]      *pIalpha points to output two-phase orthogonal vector axis alpha

+   * @param[out]      *pIbeta  points to output two-phase orthogonal vector axis beta

+   * @return none.

+   */

+

+  static __INLINE void arm_clarke_f32(

+  float32_t Ia,

+  float32_t Ib,

+  float32_t * pIalpha,

+  float32_t * pIbeta)

+  {

+    /* Calculate pIalpha using the equation, pIalpha = Ia */

+    *pIalpha = Ia;

+

+    /* Calculate pIbeta using the equation, pIbeta = (1/sqrt(3)) * Ia + (2/sqrt(3)) * Ib */

+    *pIbeta =

+      ((float32_t) 0.57735026919 * Ia + (float32_t) 1.15470053838 * Ib);

+

+  }

+

+  /**

+   * @brief  Clarke transform for Q31 version

+   * @param[in]       Ia       input three-phase coordinate <code>a</code>

+   * @param[in]       Ib       input three-phase coordinate <code>b</code>

+   * @param[out]      *pIalpha points to output two-phase orthogonal vector axis alpha

+   * @param[out]      *pIbeta  points to output two-phase orthogonal vector axis beta

+   * @return none.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using an internal 32-bit accumulator.

+   * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.

+   * There is saturation on the addition, hence there is no risk of overflow.

+   */

+

+  static __INLINE void arm_clarke_q31(

+  q31_t Ia,

+  q31_t Ib,

+  q31_t * pIalpha,

+  q31_t * pIbeta)

+  {

+    q31_t product1, product2;                    /* Temporary variables used to store intermediate results */

+

+    /* Calculating pIalpha from Ia by equation pIalpha = Ia */

+    *pIalpha = Ia;

+

+    /* Intermediate product is calculated by (1/(sqrt(3)) * Ia) */

+    product1 = (q31_t) (((q63_t) Ia * 0x24F34E8B) >> 30);

+

+    /* Intermediate product is calculated by (2/sqrt(3) * Ib) */

+    product2 = (q31_t) (((q63_t) Ib * 0x49E69D16) >> 30);

+

+    /* pIbeta is calculated by adding the intermediate products */

+    *pIbeta = __QADD(product1, product2);

+  }

+

+  /**

+   * @} end of clarke group

+   */

+

+  /**

+   * @brief  Converts the elements of the Q7 vector to Q31 vector.

+   * @param[in]  *pSrc     input pointer

+   * @param[out]  *pDst    output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_q7_to_q31(

+  q7_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+

+

+

+  /**

+   * @ingroup groupController

+   */

+

+  /**

+   * @defgroup inv_clarke Vector Inverse Clarke Transform

+   * Inverse Clarke transform converts the two-coordinate time invariant vector into instantaneous stator phases.

+   *

+   * The function operates on a single sample of data and each call to the function returns the processed output.

+   * The library provides separate functions for Q31 and floating-point data types.

+   * \par Algorithm

+   * \image html clarkeInvFormula.gif

+   * where <code>pIa</code> and <code>pIb</code> are the instantaneous stator phases and

+   * <code>Ialpha</code> and <code>Ibeta</code> are the two coordinates of time invariant vector.

+   * \par Fixed-Point Behavior

+   * Care must be taken when using the Q31 version of the Clarke transform.

+   * In particular, the overflow and saturation behavior of the accumulator used must be considered.

+   * Refer to the function specific documentation below for usage guidelines.

+   */

+

+  /**

+   * @addtogroup inv_clarke

+   * @{

+   */

+

+   /**

+   * @brief  Floating-point Inverse Clarke transform

+   * @param[in]       Ialpha  input two-phase orthogonal vector axis alpha

+   * @param[in]       Ibeta   input two-phase orthogonal vector axis beta

+   * @param[out]      *pIa    points to output three-phase coordinate <code>a</code>

+   * @param[out]      *pIb    points to output three-phase coordinate <code>b</code>

+   * @return none.

+   */

+

+

+  static __INLINE void arm_inv_clarke_f32(

+  float32_t Ialpha,

+  float32_t Ibeta,

+  float32_t * pIa,

+  float32_t * pIb)

+  {

+    /* Calculating pIa from Ialpha by equation pIa = Ialpha */

+    *pIa = Ialpha;

+

+    /* Calculating pIb from Ialpha and Ibeta by equation pIb = -(1/2) * Ialpha + (sqrt(3)/2) * Ibeta */

+    *pIb = -0.5 * Ialpha + (float32_t) 0.8660254039 *Ibeta;

+

+  }

+

+  /**

+   * @brief  Inverse Clarke transform for Q31 version

+   * @param[in]       Ialpha  input two-phase orthogonal vector axis alpha

+   * @param[in]       Ibeta   input two-phase orthogonal vector axis beta

+   * @param[out]      *pIa    points to output three-phase coordinate <code>a</code>

+   * @param[out]      *pIb    points to output three-phase coordinate <code>b</code>

+   * @return none.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using an internal 32-bit accumulator.

+   * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.

+   * There is saturation on the subtraction, hence there is no risk of overflow.

+   */

+

+  static __INLINE void arm_inv_clarke_q31(

+  q31_t Ialpha,

+  q31_t Ibeta,

+  q31_t * pIa,

+  q31_t * pIb)

+  {

+    q31_t product1, product2;                    /* Temporary variables used to store intermediate results */

+

+    /* Calculating pIa from Ialpha by equation pIa = Ialpha */

+    *pIa = Ialpha;

+

+    /* Intermediate product is calculated by (1/(2*sqrt(3)) * Ia) */

+    product1 = (q31_t) (((q63_t) (Ialpha) * (0x40000000)) >> 31);

+

+    /* Intermediate product is calculated by (1/sqrt(3) * pIb) */

+    product2 = (q31_t) (((q63_t) (Ibeta) * (0x6ED9EBA1)) >> 31);

+

+    /* pIb is calculated by subtracting the products */

+    *pIb = __QSUB(product2, product1);

+

+  }

+

+  /**

+   * @} end of inv_clarke group

+   */

+

+  /**

+   * @brief  Converts the elements of the Q7 vector to Q15 vector.

+   * @param[in]  *pSrc     input pointer

+   * @param[out] *pDst     output pointer

+   * @param[in]  blockSize number of samples to process

+   * @return none.

+   */

+  void arm_q7_to_q15(

+  q7_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+

+

+  /**

+   * @ingroup groupController

+   */

+

+  /**

+   * @defgroup park Vector Park Transform

+   *

+   * Forward Park transform converts the input two-coordinate vector to flux and torque components.

+   * The Park transform can be used to realize the transformation of the <code>Ialpha</code> and the <code>Ibeta</code> currents

+   * from the stationary to the moving reference frame and control the spatial relationship between

+   * the stator vector current and rotor flux vector.

+   * If we consider the d axis aligned with the rotor flux, the diagram below shows the

+   * current vector and the relationship from the two reference frames:

+   * \image html park.gif "Stator current space vector and its component in (a,b) and in the d,q rotating reference frame"

+   *

+   * The function operates on a single sample of data and each call to the function returns the processed output.

+   * The library provides separate functions for Q31 and floating-point data types.

+   * \par Algorithm

+   * \image html parkFormula.gif

+   * where <code>Ialpha</code> and <code>Ibeta</code> are the stator vector components,

+   * <code>pId</code> and <code>pIq</code> are rotor vector components and <code>cosVal</code> and <code>sinVal</code> are the

+   * cosine and sine values of theta (rotor flux position).

+   * \par Fixed-Point Behavior

+   * Care must be taken when using the Q31 version of the Park transform.

+   * In particular, the overflow and saturation behavior of the accumulator used must be considered.

+   * Refer to the function specific documentation below for usage guidelines.

+   */

+

+  /**

+   * @addtogroup park

+   * @{

+   */

+

+  /**

+   * @brief Floating-point Park transform

+   * @param[in]       Ialpha input two-phase vector coordinate alpha

+   * @param[in]       Ibeta  input two-phase vector coordinate beta

+   * @param[out]      *pId   points to output	rotor reference frame d

+   * @param[out]      *pIq   points to output	rotor reference frame q

+   * @param[in]       sinVal sine value of rotation angle theta

+   * @param[in]       cosVal cosine value of rotation angle theta

+   * @return none.

+   *

+   * The function implements the forward Park transform.

+   *

+   */

+

+  static __INLINE void arm_park_f32(

+  float32_t Ialpha,

+  float32_t Ibeta,

+  float32_t * pId,

+  float32_t * pIq,

+  float32_t sinVal,

+  float32_t cosVal)

+  {

+    /* Calculate pId using the equation, pId = Ialpha * cosVal + Ibeta * sinVal */

+    *pId = Ialpha * cosVal + Ibeta * sinVal;

+

+    /* Calculate pIq using the equation, pIq = - Ialpha * sinVal + Ibeta * cosVal */

+    *pIq = -Ialpha * sinVal + Ibeta * cosVal;

+

+  }

+

+  /**

+   * @brief  Park transform for Q31 version

+   * @param[in]       Ialpha input two-phase vector coordinate alpha

+   * @param[in]       Ibeta  input two-phase vector coordinate beta

+   * @param[out]      *pId   points to output rotor reference frame d

+   * @param[out]      *pIq   points to output rotor reference frame q

+   * @param[in]       sinVal sine value of rotation angle theta

+   * @param[in]       cosVal cosine value of rotation angle theta

+   * @return none.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using an internal 32-bit accumulator.

+   * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.

+   * There is saturation on the addition and subtraction, hence there is no risk of overflow.

+   */

+

+

+  static __INLINE void arm_park_q31(

+  q31_t Ialpha,

+  q31_t Ibeta,

+  q31_t * pId,

+  q31_t * pIq,

+  q31_t sinVal,

+  q31_t cosVal)

+  {

+    q31_t product1, product2;                    /* Temporary variables used to store intermediate results */

+    q31_t product3, product4;                    /* Temporary variables used to store intermediate results */

+

+    /* Intermediate product is calculated by (Ialpha * cosVal) */

+    product1 = (q31_t) (((q63_t) (Ialpha) * (cosVal)) >> 31);

+

+    /* Intermediate product is calculated by (Ibeta * sinVal) */

+    product2 = (q31_t) (((q63_t) (Ibeta) * (sinVal)) >> 31);

+

+

+    /* Intermediate product is calculated by (Ialpha * sinVal) */

+    product3 = (q31_t) (((q63_t) (Ialpha) * (sinVal)) >> 31);

+

+    /* Intermediate product is calculated by (Ibeta * cosVal) */

+    product4 = (q31_t) (((q63_t) (Ibeta) * (cosVal)) >> 31);

+

+    /* Calculate pId by adding the two intermediate products 1 and 2 */

+    *pId = __QADD(product1, product2);

+

+    /* Calculate pIq by subtracting the two intermediate products 3 from 4 */

+    *pIq = __QSUB(product4, product3);

+  }

+

+  /**

+   * @} end of park group

+   */

+

+  /**

+   * @brief  Converts the elements of the Q7 vector to floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q7_to_float(

+  q7_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @ingroup groupController

+   */

+

+  /**

+   * @defgroup inv_park Vector Inverse Park transform

+   * Inverse Park transform converts the input flux and torque components to two-coordinate vector.

+   *

+   * The function operates on a single sample of data and each call to the function returns the processed output.

+   * The library provides separate functions for Q31 and floating-point data types.

+   * \par Algorithm

+   * \image html parkInvFormula.gif

+   * where <code>pIalpha</code> and <code>pIbeta</code> are the stator vector components,

+   * <code>Id</code> and <code>Iq</code> are rotor vector components and <code>cosVal</code> and <code>sinVal</code> are the

+   * cosine and sine values of theta (rotor flux position).

+   * \par Fixed-Point Behavior

+   * Care must be taken when using the Q31 version of the Park transform.

+   * In particular, the overflow and saturation behavior of the accumulator used must be considered.

+   * Refer to the function specific documentation below for usage guidelines.

+   */

+

+  /**

+   * @addtogroup inv_park

+   * @{

+   */

+

+   /**

+   * @brief  Floating-point Inverse Park transform

+   * @param[in]       Id        input coordinate of rotor reference frame d

+   * @param[in]       Iq        input coordinate of rotor reference frame q

+   * @param[out]      *pIalpha  points to output two-phase orthogonal vector axis alpha

+   * @param[out]      *pIbeta   points to output two-phase orthogonal vector axis beta

+   * @param[in]       sinVal    sine value of rotation angle theta

+   * @param[in]       cosVal    cosine value of rotation angle theta

+   * @return none.

+   */

+

+  static __INLINE void arm_inv_park_f32(

+  float32_t Id,

+  float32_t Iq,

+  float32_t * pIalpha,

+  float32_t * pIbeta,

+  float32_t sinVal,

+  float32_t cosVal)

+  {

+    /* Calculate pIalpha using the equation, pIalpha = Id * cosVal - Iq * sinVal */

+    *pIalpha = Id * cosVal - Iq * sinVal;

+

+    /* Calculate pIbeta using the equation, pIbeta = Id * sinVal + Iq * cosVal */

+    *pIbeta = Id * sinVal + Iq * cosVal;

+

+  }

+

+

+  /**

+   * @brief  Inverse Park transform for	Q31 version

+   * @param[in]       Id        input coordinate of rotor reference frame d

+   * @param[in]       Iq        input coordinate of rotor reference frame q

+   * @param[out]      *pIalpha  points to output two-phase orthogonal vector axis alpha

+   * @param[out]      *pIbeta   points to output two-phase orthogonal vector axis beta

+   * @param[in]       sinVal    sine value of rotation angle theta

+   * @param[in]       cosVal    cosine value of rotation angle theta

+   * @return none.

+   *

+   * <b>Scaling and Overflow Behavior:</b>

+   * \par

+   * The function is implemented using an internal 32-bit accumulator.

+   * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.

+   * There is saturation on the addition, hence there is no risk of overflow.

+   */

+

+

+  static __INLINE void arm_inv_park_q31(

+  q31_t Id,

+  q31_t Iq,

+  q31_t * pIalpha,

+  q31_t * pIbeta,

+  q31_t sinVal,

+  q31_t cosVal)

+  {

+    q31_t product1, product2;                    /* Temporary variables used to store intermediate results */

+    q31_t product3, product4;                    /* Temporary variables used to store intermediate results */

+

+    /* Intermediate product is calculated by (Id * cosVal) */

+    product1 = (q31_t) (((q63_t) (Id) * (cosVal)) >> 31);

+

+    /* Intermediate product is calculated by (Iq * sinVal) */

+    product2 = (q31_t) (((q63_t) (Iq) * (sinVal)) >> 31);

+

+

+    /* Intermediate product is calculated by (Id * sinVal) */

+    product3 = (q31_t) (((q63_t) (Id) * (sinVal)) >> 31);

+

+    /* Intermediate product is calculated by (Iq * cosVal) */

+    product4 = (q31_t) (((q63_t) (Iq) * (cosVal)) >> 31);

+

+    /* Calculate pIalpha by using the two intermediate products 1 and 2 */

+    *pIalpha = __QSUB(product1, product2);

+

+    /* Calculate pIbeta by using the two intermediate products 3 and 4 */

+    *pIbeta = __QADD(product4, product3);

+

+  }

+

+  /**

+   * @} end of Inverse park group

+   */

+

+

+  /**

+   * @brief  Converts the elements of the Q31 vector to floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q31_to_float(

+  q31_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @ingroup groupInterpolation

+   */

+

+  /**

+   * @defgroup LinearInterpolate Linear Interpolation

+   *

+   * Linear interpolation is a method of curve fitting using linear polynomials.

+   * Linear interpolation works by effectively drawing a straight line between two neighboring samples and returning the appropriate point along that line

+   *

+   * \par

+   * \image html LinearInterp.gif "Linear interpolation"

+   *

+   * \par

+   * A  Linear Interpolate function calculates an output value(y), for the input(x)

+   * using linear interpolation of the input values x0, x1( nearest input values) and the output values y0 and y1(nearest output values)

+   *

+   * \par Algorithm:

+   * <pre>

+   *       y = y0 + (x - x0) * ((y1 - y0)/(x1-x0))

+   *       where x0, x1 are nearest values of input x

+   *             y0, y1 are nearest values to output y

+   * </pre>

+   *

+   * \par

+   * This set of functions implements Linear interpolation process

+   * for Q7, Q15, Q31, and floating-point data types.  The functions operate on a single

+   * sample of data and each call to the function returns a single processed value.

+   * <code>S</code> points to an instance of the Linear Interpolate function data structure.

+   * <code>x</code> is the input sample value. The functions returns the output value.

+   *

+   * \par

+   * if x is outside of the table boundary, Linear interpolation returns first value of the table

+   * if x is below input range and returns last value of table if x is above range.

+   */

+

+  /**

+   * @addtogroup LinearInterpolate

+   * @{

+   */

+

+  /**

+   * @brief  Process function for the floating-point Linear Interpolation Function.

+   * @param[in,out] *S is an instance of the floating-point Linear Interpolation structure

+   * @param[in] x input sample to process

+   * @return y processed output sample.

+   *

+   */

+

+  static __INLINE float32_t arm_linear_interp_f32(

+  arm_linear_interp_instance_f32 * S,

+  float32_t x)

+  {

+

+    float32_t y;

+    float32_t x0, x1;                            /* Nearest input values */

+    float32_t y0, y1;                            /* Nearest output values */

+    float32_t xSpacing = S->xSpacing;            /* spacing between input values */

+    int32_t i;                                   /* Index variable */

+    float32_t *pYData = S->pYData;               /* pointer to output table */

+

+    /* Calculation of index */

+    i = (int32_t) ((x - S->x1) / xSpacing);

+

+    if(i < 0)

+    {

+      /* Iniatilize output for below specified range as least output value of table */

+      y = pYData[0];

+    }

+    else if((uint32_t)i >= S->nValues)

+    {

+      /* Iniatilize output for above specified range as last output value of table */

+      y = pYData[S->nValues - 1];

+    }

+    else

+    {

+      /* Calculation of nearest input values */

+      x0 = S->x1 + i * xSpacing;

+      x1 = S->x1 + (i + 1) * xSpacing;

+

+      /* Read of nearest output values */

+      y0 = pYData[i];

+      y1 = pYData[i + 1];

+

+      /* Calculation of output */

+      y = y0 + (x - x0) * ((y1 - y0) / (x1 - x0));

+

+    }

+

+    /* returns output value */

+    return (y);

+  }

+

+   /**

+   *

+   * @brief  Process function for the Q31 Linear Interpolation Function.

+   * @param[in] *pYData  pointer to Q31 Linear Interpolation table

+   * @param[in] x input sample to process

+   * @param[in] nValues number of table values

+   * @return y processed output sample.

+   *

+   * \par

+   * Input sample <code>x</code> is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.

+   * This function can support maximum of table size 2^12.

+   *

+   */

+

+

+  static __INLINE q31_t arm_linear_interp_q31(

+  q31_t * pYData,

+  q31_t x,

+  uint32_t nValues)

+  {

+    q31_t y;                                     /* output */

+    q31_t y0, y1;                                /* Nearest output values */

+    q31_t fract;                                 /* fractional part */

+    int32_t index;                               /* Index to read nearest output values */

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    index = ((x & 0xFFF00000) >> 20);

+

+    if(index >= (int32_t)(nValues - 1))

+    {

+      return (pYData[nValues - 1]);

+    }

+    else if(index < 0)

+    {

+      return (pYData[0]);

+    }

+    else

+    {

+

+      /* 20 bits for the fractional part */

+      /* shift left by 11 to keep fract in 1.31 format */

+      fract = (x & 0x000FFFFF) << 11;

+

+      /* Read two nearest output values from the index in 1.31(q31) format */

+      y0 = pYData[index];

+      y1 = pYData[index + 1u];

+

+      /* Calculation of y0 * (1-fract) and y is in 2.30 format */

+      y = ((q31_t) ((q63_t) y0 * (0x7FFFFFFF - fract) >> 32));

+

+      /* Calculation of y0 * (1-fract) + y1 *fract and y is in 2.30 format */

+      y += ((q31_t) (((q63_t) y1 * fract) >> 32));

+

+      /* Convert y to 1.31 format */

+      return (y << 1u);

+

+    }

+

+  }

+

+  /**

+   *

+   * @brief  Process function for the Q15 Linear Interpolation Function.

+   * @param[in] *pYData  pointer to Q15 Linear Interpolation table

+   * @param[in] x input sample to process

+   * @param[in] nValues number of table values

+   * @return y processed output sample.

+   *

+   * \par

+   * Input sample <code>x</code> is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.

+   * This function can support maximum of table size 2^12.

+   *

+   */

+

+

+  static __INLINE q15_t arm_linear_interp_q15(

+  q15_t * pYData,

+  q31_t x,

+  uint32_t nValues)

+  {

+    q63_t y;                                     /* output */

+    q15_t y0, y1;                                /* Nearest output values */

+    q31_t fract;                                 /* fractional part */

+    int32_t index;                               /* Index to read nearest output values */

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    index = ((x & 0xFFF00000) >> 20u);

+

+    if(index >= (int32_t)(nValues - 1))

+    {

+      return (pYData[nValues - 1]);

+    }

+    else if(index < 0)

+    {

+      return (pYData[0]);

+    }

+    else

+    {

+      /* 20 bits for the fractional part */

+      /* fract is in 12.20 format */

+      fract = (x & 0x000FFFFF);

+

+      /* Read two nearest output values from the index */

+      y0 = pYData[index];

+      y1 = pYData[index + 1u];

+

+      /* Calculation of y0 * (1-fract) and y is in 13.35 format */

+      y = ((q63_t) y0 * (0xFFFFF - fract));

+

+      /* Calculation of (y0 * (1-fract) + y1 * fract) and y is in 13.35 format */

+      y += ((q63_t) y1 * (fract));

+

+      /* convert y to 1.15 format */

+      return (y >> 20);

+    }

+

+

+  }

+

+  /**

+   *

+   * @brief  Process function for the Q7 Linear Interpolation Function.

+   * @param[in] *pYData  pointer to Q7 Linear Interpolation table

+   * @param[in] x input sample to process

+   * @param[in] nValues number of table values

+   * @return y processed output sample.

+   *

+   * \par

+   * Input sample <code>x</code> is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.

+   * This function can support maximum of table size 2^12.

+   */

+

+

+  static __INLINE q7_t arm_linear_interp_q7(

+  q7_t * pYData,

+  q31_t x,

+  uint32_t nValues)

+  {

+    q31_t y;                                     /* output */

+    q7_t y0, y1;                                 /* Nearest output values */

+    q31_t fract;                                 /* fractional part */

+    uint32_t index;                              /* Index to read nearest output values */

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    if (x < 0)

+    {

+      return (pYData[0]);

+    }

+    index = (x >> 20) & 0xfff;

+

+

+    if(index >= (nValues - 1))

+    {

+      return (pYData[nValues - 1]);

+    }

+    else

+    {

+

+      /* 20 bits for the fractional part */

+      /* fract is in 12.20 format */

+      fract = (x & 0x000FFFFF);

+

+      /* Read two nearest output values from the index and are in 1.7(q7) format */

+      y0 = pYData[index];

+      y1 = pYData[index + 1u];

+

+      /* Calculation of y0 * (1-fract ) and y is in 13.27(q27) format */

+      y = ((y0 * (0xFFFFF - fract)));

+

+      /* Calculation of y1 * fract + y0 * (1-fract) and y is in 13.27(q27) format */

+      y += (y1 * fract);

+

+      /* convert y to 1.7(q7) format */

+      return (y >> 20u);

+

+    }

+

+  }

+  /**

+   * @} end of LinearInterpolate group

+   */

+

+  /**

+   * @brief  Fast approximation to the trigonometric sine function for floating-point data.

+   * @param[in] x input value in radians.

+   * @return  sin(x).

+   */

+

+  float32_t arm_sin_f32(

+  float32_t x);

+

+  /**

+   * @brief  Fast approximation to the trigonometric sine function for Q31 data.

+   * @param[in] x Scaled input value in radians.

+   * @return  sin(x).

+   */

+

+  q31_t arm_sin_q31(

+  q31_t x);

+

+  /**

+   * @brief  Fast approximation to the trigonometric sine function for Q15 data.

+   * @param[in] x Scaled input value in radians.

+   * @return  sin(x).

+   */

+

+  q15_t arm_sin_q15(

+  q15_t x);

+

+  /**

+   * @brief  Fast approximation to the trigonometric cosine function for floating-point data.

+   * @param[in] x input value in radians.

+   * @return  cos(x).

+   */

+

+  float32_t arm_cos_f32(

+  float32_t x);

+

+  /**

+   * @brief Fast approximation to the trigonometric cosine function for Q31 data.

+   * @param[in] x Scaled input value in radians.

+   * @return  cos(x).

+   */

+

+  q31_t arm_cos_q31(

+  q31_t x);

+

+  /**

+   * @brief  Fast approximation to the trigonometric cosine function for Q15 data.

+   * @param[in] x Scaled input value in radians.

+   * @return  cos(x).

+   */

+

+  q15_t arm_cos_q15(

+  q15_t x);

+

+

+  /**

+   * @ingroup groupFastMath

+   */

+

+

+  /**

+   * @defgroup SQRT Square Root

+   *

+   * Computes the square root of a number.

+   * There are separate functions for Q15, Q31, and floating-point data types.

+   * The square root function is computed using the Newton-Raphson algorithm.

+   * This is an iterative algorithm of the form:

+   * <pre>

+   *      x1 = x0 - f(x0)/f'(x0)

+   * </pre>

+   * where <code>x1</code> is the current estimate,

+   * <code>x0</code> is the previous estimate, and

+   * <code>f'(x0)</code> is the derivative of <code>f()</code> evaluated at <code>x0</code>.

+   * For the square root function, the algorithm reduces to:

+   * <pre>

+   *     x0 = in/2                         [initial guess]

+   *     x1 = 1/2 * ( x0 + in / x0)        [each iteration]

+   * </pre>

+   */

+

+

+  /**

+   * @addtogroup SQRT

+   * @{

+   */

+

+  /**

+   * @brief  Floating-point square root function.

+   * @param[in]  in     input value.

+   * @param[out] *pOut  square root of input value.

+   * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if

+   * <code>in</code> is negative value and returns zero output for negative values.

+   */

+

+  static __INLINE arm_status arm_sqrt_f32(

+  float32_t in,

+  float32_t * pOut)

+  {

+    if(in >= 0.0f)

+    {

+

+//      #if __FPU_USED

+#if (__FPU_USED == 1) && defined ( __CC_ARM   )

+      *pOut = __sqrtf(in);

+#else

+      *pOut = sqrtf(in);

+#endif

+

+      return (ARM_MATH_SUCCESS);

+    }

+    else

+    {

+      *pOut = 0.0f;

+      return (ARM_MATH_ARGUMENT_ERROR);

+    }

+

+  }

+

+

+  /**

+   * @brief Q31 square root function.

+   * @param[in]   in    input value.  The range of the input value is [0 +1) or 0x00000000 to 0x7FFFFFFF.

+   * @param[out]  *pOut square root of input value.

+   * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if

+   * <code>in</code> is negative value and returns zero output for negative values.

+   */

+  arm_status arm_sqrt_q31(

+  q31_t in,

+  q31_t * pOut);

+

+  /**

+   * @brief  Q15 square root function.

+   * @param[in]   in     input value.  The range of the input value is [0 +1) or 0x0000 to 0x7FFF.

+   * @param[out]  *pOut  square root of input value.

+   * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if

+   * <code>in</code> is negative value and returns zero output for negative values.

+   */

+  arm_status arm_sqrt_q15(

+  q15_t in,

+  q15_t * pOut);

+

+  /**

+   * @} end of SQRT group

+   */

+

+

+

+

+

+

+  /**

+   * @brief floating-point Circular write function.

+   */

+

+  static __INLINE void arm_circularWrite_f32(

+  int32_t * circBuffer,

+  int32_t L,

+  uint16_t * writeOffset,

+  int32_t bufferInc,

+  const int32_t * src,

+  int32_t srcInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0u;

+    int32_t wOffset;

+

+    /* Copy the value of Index pointer that points

+     * to the current location where the input samples to be copied */

+    wOffset = *writeOffset;

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the input sample to the circular buffer */

+      circBuffer[wOffset] = *src;

+

+      /* Update the input pointer */

+      src += srcInc;

+

+      /* Circularly update wOffset.  Watch out for positive and negative value */

+      wOffset += bufferInc;

+      if(wOffset >= L)

+        wOffset -= L;

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *writeOffset = wOffset;

+  }

+

+

+

+  /**

+   * @brief floating-point Circular Read function.

+   */

+  static __INLINE void arm_circularRead_f32(

+  int32_t * circBuffer,

+  int32_t L,

+  int32_t * readOffset,

+  int32_t bufferInc,

+  int32_t * dst,

+  int32_t * dst_base,

+  int32_t dst_length,

+  int32_t dstInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0u;

+    int32_t rOffset, dst_end;

+

+    /* Copy the value of Index pointer that points

+     * to the current location from where the input samples to be read */

+    rOffset = *readOffset;

+    dst_end = (int32_t) (dst_base + dst_length);

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the sample from the circular buffer to the destination buffer */

+      *dst = circBuffer[rOffset];

+

+      /* Update the input pointer */

+      dst += dstInc;

+

+      if(dst == (int32_t *) dst_end)

+      {

+        dst = dst_base;

+      }

+

+      /* Circularly update rOffset.  Watch out for positive and negative value  */

+      rOffset += bufferInc;

+

+      if(rOffset >= L)

+      {

+        rOffset -= L;

+      }

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *readOffset = rOffset;

+  }

+

+  /**

+   * @brief Q15 Circular write function.

+   */

+

+  static __INLINE void arm_circularWrite_q15(

+  q15_t * circBuffer,

+  int32_t L,

+  uint16_t * writeOffset,

+  int32_t bufferInc,

+  const q15_t * src,

+  int32_t srcInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0u;

+    int32_t wOffset;

+

+    /* Copy the value of Index pointer that points

+     * to the current location where the input samples to be copied */

+    wOffset = *writeOffset;

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the input sample to the circular buffer */

+      circBuffer[wOffset] = *src;

+

+      /* Update the input pointer */

+      src += srcInc;

+

+      /* Circularly update wOffset.  Watch out for positive and negative value */

+      wOffset += bufferInc;

+      if(wOffset >= L)

+        wOffset -= L;

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *writeOffset = wOffset;

+  }

+

+

+

+  /**

+   * @brief Q15 Circular Read function.

+   */

+  static __INLINE void arm_circularRead_q15(

+  q15_t * circBuffer,

+  int32_t L,

+  int32_t * readOffset,

+  int32_t bufferInc,

+  q15_t * dst,

+  q15_t * dst_base,

+  int32_t dst_length,

+  int32_t dstInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0;

+    int32_t rOffset, dst_end;

+

+    /* Copy the value of Index pointer that points

+     * to the current location from where the input samples to be read */

+    rOffset = *readOffset;

+

+    dst_end = (int32_t) (dst_base + dst_length);

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the sample from the circular buffer to the destination buffer */

+      *dst = circBuffer[rOffset];

+

+      /* Update the input pointer */

+      dst += dstInc;

+

+      if(dst == (q15_t *) dst_end)

+      {

+        dst = dst_base;

+      }

+

+      /* Circularly update wOffset.  Watch out for positive and negative value */

+      rOffset += bufferInc;

+

+      if(rOffset >= L)

+      {

+        rOffset -= L;

+      }

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *readOffset = rOffset;

+  }

+

+

+  /**

+   * @brief Q7 Circular write function.

+   */

+

+  static __INLINE void arm_circularWrite_q7(

+  q7_t * circBuffer,

+  int32_t L,

+  uint16_t * writeOffset,

+  int32_t bufferInc,

+  const q7_t * src,

+  int32_t srcInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0u;

+    int32_t wOffset;

+

+    /* Copy the value of Index pointer that points

+     * to the current location where the input samples to be copied */

+    wOffset = *writeOffset;

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the input sample to the circular buffer */

+      circBuffer[wOffset] = *src;

+

+      /* Update the input pointer */

+      src += srcInc;

+

+      /* Circularly update wOffset.  Watch out for positive and negative value */

+      wOffset += bufferInc;

+      if(wOffset >= L)

+        wOffset -= L;

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *writeOffset = wOffset;

+  }

+

+

+

+  /**

+   * @brief Q7 Circular Read function.

+   */

+  static __INLINE void arm_circularRead_q7(

+  q7_t * circBuffer,

+  int32_t L,

+  int32_t * readOffset,

+  int32_t bufferInc,

+  q7_t * dst,

+  q7_t * dst_base,

+  int32_t dst_length,

+  int32_t dstInc,

+  uint32_t blockSize)

+  {

+    uint32_t i = 0;

+    int32_t rOffset, dst_end;

+

+    /* Copy the value of Index pointer that points

+     * to the current location from where the input samples to be read */

+    rOffset = *readOffset;

+

+    dst_end = (int32_t) (dst_base + dst_length);

+

+    /* Loop over the blockSize */

+    i = blockSize;

+

+    while(i > 0u)

+    {

+      /* copy the sample from the circular buffer to the destination buffer */

+      *dst = circBuffer[rOffset];

+

+      /* Update the input pointer */

+      dst += dstInc;

+

+      if(dst == (q7_t *) dst_end)

+      {

+        dst = dst_base;

+      }

+

+      /* Circularly update rOffset.  Watch out for positive and negative value */

+      rOffset += bufferInc;

+

+      if(rOffset >= L)

+      {

+        rOffset -= L;

+      }

+

+      /* Decrement the loop counter */

+      i--;

+    }

+

+    /* Update the index pointer */

+    *readOffset = rOffset;

+  }

+

+

+  /**

+   * @brief  Sum of the squares of the elements of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_power_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q63_t * pResult);

+

+  /**

+   * @brief  Sum of the squares of the elements of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_power_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult);

+

+  /**

+   * @brief  Sum of the squares of the elements of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_power_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q63_t * pResult);

+

+  /**

+   * @brief  Sum of the squares of the elements of a Q7 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_power_q7(

+  q7_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult);

+

+  /**

+   * @brief  Mean value of a Q7 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_mean_q7(

+  q7_t * pSrc,

+  uint32_t blockSize,

+  q7_t * pResult);

+

+  /**

+   * @brief  Mean value of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+  void arm_mean_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult);

+

+  /**

+   * @brief  Mean value of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+  void arm_mean_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult);

+

+  /**

+   * @brief  Mean value of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+  void arm_mean_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult);

+

+  /**

+   * @brief  Variance of the elements of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_var_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult);

+

+  /**

+   * @brief  Variance of the elements of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_var_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult);

+

+  /**

+   * @brief  Variance of the elements of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_var_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult);

+

+  /**

+   * @brief  Root Mean Square of the elements of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_rms_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult);

+

+  /**

+   * @brief  Root Mean Square of the elements of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_rms_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult);

+

+  /**

+   * @brief  Root Mean Square of the elements of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_rms_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult);

+

+  /**

+   * @brief  Standard deviation of the elements of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_std_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult);

+

+  /**

+   * @brief  Standard deviation of the elements of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_std_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult);

+

+  /**

+   * @brief  Standard deviation of the elements of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output value.

+   * @return none.

+   */

+

+  void arm_std_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult);

+

+  /**

+   * @brief  Floating-point complex magnitude

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_f32(

+  float32_t * pSrc,

+  float32_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q31 complex magnitude

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_q31(

+  q31_t * pSrc,

+  q31_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q15 complex magnitude

+   * @param[in]  *pSrc points to the complex input vector

+   * @param[out]  *pDst points to the real output vector

+   * @param[in]  numSamples number of complex samples in the input vector

+   * @return none.

+   */

+

+  void arm_cmplx_mag_q15(

+  q15_t * pSrc,

+  q15_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q15 complex dot product

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @param[out]  *realResult real part of the result returned here

+   * @param[out]  *imagResult imaginary part of the result returned here

+   * @return none.

+   */

+

+  void arm_cmplx_dot_prod_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  uint32_t numSamples,

+  q31_t * realResult,

+  q31_t * imagResult);

+

+  /**

+   * @brief  Q31 complex dot product

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @param[out]  *realResult real part of the result returned here

+   * @param[out]  *imagResult imaginary part of the result returned here

+   * @return none.

+   */

+

+  void arm_cmplx_dot_prod_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  uint32_t numSamples,

+  q63_t * realResult,

+  q63_t * imagResult);

+

+  /**

+   * @brief  Floating-point complex dot product

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @param[out]  *realResult real part of the result returned here

+   * @param[out]  *imagResult imaginary part of the result returned here

+   * @return none.

+   */

+

+  void arm_cmplx_dot_prod_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  uint32_t numSamples,

+  float32_t * realResult,

+  float32_t * imagResult);

+

+  /**

+   * @brief  Q15 complex-by-real multiplication

+   * @param[in]  *pSrcCmplx points to the complex input vector

+   * @param[in]  *pSrcReal points to the real input vector

+   * @param[out]  *pCmplxDst points to the complex output vector

+   * @param[in]  numSamples number of samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_real_q15(

+  q15_t * pSrcCmplx,

+  q15_t * pSrcReal,

+  q15_t * pCmplxDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q31 complex-by-real multiplication

+   * @param[in]  *pSrcCmplx points to the complex input vector

+   * @param[in]  *pSrcReal points to the real input vector

+   * @param[out]  *pCmplxDst points to the complex output vector

+   * @param[in]  numSamples number of samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_real_q31(

+  q31_t * pSrcCmplx,

+  q31_t * pSrcReal,

+  q31_t * pCmplxDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Floating-point complex-by-real multiplication

+   * @param[in]  *pSrcCmplx points to the complex input vector

+   * @param[in]  *pSrcReal points to the real input vector

+   * @param[out]  *pCmplxDst points to the complex output vector

+   * @param[in]  numSamples number of samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_real_f32(

+  float32_t * pSrcCmplx,

+  float32_t * pSrcReal,

+  float32_t * pCmplxDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Minimum value of a Q7 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *result is output pointer

+   * @param[in]  index is the array index of the minimum value in the input buffer.

+   * @return none.

+   */

+

+  void arm_min_q7(

+  q7_t * pSrc,

+  uint32_t blockSize,

+  q7_t * result,

+  uint32_t * index);

+

+  /**

+   * @brief  Minimum value of a Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output pointer

+   * @param[in]  *pIndex is the array index of the minimum value in the input buffer.

+   * @return none.

+   */

+

+  void arm_min_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult,

+  uint32_t * pIndex);

+

+  /**

+   * @brief  Minimum value of a Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output pointer

+   * @param[out]  *pIndex is the array index of the minimum value in the input buffer.

+   * @return none.

+   */

+  void arm_min_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult,

+  uint32_t * pIndex);

+

+  /**

+   * @brief  Minimum value of a floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @param[out]  *pResult is output pointer

+   * @param[out]  *pIndex is the array index of the minimum value in the input buffer.

+   * @return none.

+   */

+

+  void arm_min_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult,

+  uint32_t * pIndex);

+

+/**

+ * @brief Maximum value of a Q7 vector.

+ * @param[in]       *pSrc points to the input buffer

+ * @param[in]       blockSize length of the input vector

+ * @param[out]      *pResult maximum value returned here

+ * @param[out]      *pIndex index of maximum value returned here

+ * @return none.

+ */

+

+  void arm_max_q7(

+  q7_t * pSrc,

+  uint32_t blockSize,

+  q7_t * pResult,

+  uint32_t * pIndex);

+

+/**

+ * @brief Maximum value of a Q15 vector.

+ * @param[in]       *pSrc points to the input buffer

+ * @param[in]       blockSize length of the input vector

+ * @param[out]      *pResult maximum value returned here

+ * @param[out]      *pIndex index of maximum value returned here

+ * @return none.

+ */

+

+  void arm_max_q15(

+  q15_t * pSrc,

+  uint32_t blockSize,

+  q15_t * pResult,

+  uint32_t * pIndex);

+

+/**

+ * @brief Maximum value of a Q31 vector.

+ * @param[in]       *pSrc points to the input buffer

+ * @param[in]       blockSize length of the input vector

+ * @param[out]      *pResult maximum value returned here

+ * @param[out]      *pIndex index of maximum value returned here

+ * @return none.

+ */

+

+  void arm_max_q31(

+  q31_t * pSrc,

+  uint32_t blockSize,

+  q31_t * pResult,

+  uint32_t * pIndex);

+

+/**

+ * @brief Maximum value of a floating-point vector.

+ * @param[in]       *pSrc points to the input buffer

+ * @param[in]       blockSize length of the input vector

+ * @param[out]      *pResult maximum value returned here

+ * @param[out]      *pIndex index of maximum value returned here

+ * @return none.

+ */

+

+  void arm_max_f32(

+  float32_t * pSrc,

+  uint32_t blockSize,

+  float32_t * pResult,

+  uint32_t * pIndex);

+

+  /**

+   * @brief  Q15 complex-by-complex multiplication

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[out]  *pDst  points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_cmplx_q15(

+  q15_t * pSrcA,

+  q15_t * pSrcB,

+  q15_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Q31 complex-by-complex multiplication

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[out]  *pDst  points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_cmplx_q31(

+  q31_t * pSrcA,

+  q31_t * pSrcB,

+  q31_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief  Floating-point complex-by-complex multiplication

+   * @param[in]  *pSrcA points to the first input vector

+   * @param[in]  *pSrcB points to the second input vector

+   * @param[out]  *pDst  points to the output vector

+   * @param[in]  numSamples number of complex samples in each vector

+   * @return none.

+   */

+

+  void arm_cmplx_mult_cmplx_f32(

+  float32_t * pSrcA,

+  float32_t * pSrcB,

+  float32_t * pDst,

+  uint32_t numSamples);

+

+  /**

+   * @brief Converts the elements of the floating-point vector to Q31 vector.

+   * @param[in]       *pSrc points to the floating-point input vector

+   * @param[out]      *pDst points to the Q31 output vector

+   * @param[in]       blockSize length of the input vector

+   * @return none.

+   */

+  void arm_float_to_q31(

+  float32_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Converts the elements of the floating-point vector to Q15 vector.

+   * @param[in]       *pSrc points to the floating-point input vector

+   * @param[out]      *pDst points to the Q15 output vector

+   * @param[in]       blockSize length of the input vector

+   * @return          none

+   */

+  void arm_float_to_q15(

+  float32_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief Converts the elements of the floating-point vector to Q7 vector.

+   * @param[in]       *pSrc points to the floating-point input vector

+   * @param[out]      *pDst points to the Q7 output vector

+   * @param[in]       blockSize length of the input vector

+   * @return          none

+   */

+  void arm_float_to_q7(

+  float32_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Converts the elements of the Q31 vector to Q15 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q31_to_q15(

+  q31_t * pSrc,

+  q15_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Converts the elements of the Q31 vector to Q7 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q31_to_q7(

+  q31_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+  /**

+   * @brief  Converts the elements of the Q15 vector to floating-point vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q15_to_float(

+  q15_t * pSrc,

+  float32_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Converts the elements of the Q15 vector to Q31 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q15_to_q31(

+  q15_t * pSrc,

+  q31_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @brief  Converts the elements of the Q15 vector to Q7 vector.

+   * @param[in]  *pSrc is input pointer

+   * @param[out]  *pDst is output pointer

+   * @param[in]  blockSize is the number of samples to process

+   * @return none.

+   */

+  void arm_q15_to_q7(

+  q15_t * pSrc,

+  q7_t * pDst,

+  uint32_t blockSize);

+

+

+  /**

+   * @ingroup groupInterpolation

+   */

+

+  /**

+   * @defgroup BilinearInterpolate Bilinear Interpolation

+   *

+   * Bilinear interpolation is an extension of linear interpolation applied to a two dimensional grid.

+   * The underlying function <code>f(x, y)</code> is sampled on a regular grid and the interpolation process

+   * determines values between the grid points.

+   * Bilinear interpolation is equivalent to two step linear interpolation, first in the x-dimension and then in the y-dimension.

+   * Bilinear interpolation is often used in image processing to rescale images.

+   * The CMSIS DSP library provides bilinear interpolation functions for Q7, Q15, Q31, and floating-point data types.

+   *

+   * <b>Algorithm</b>

+   * \par

+   * The instance structure used by the bilinear interpolation functions describes a two dimensional data table.

+   * For floating-point, the instance structure is defined as:

+   * <pre>

+   *   typedef struct

+   *   {

+   *     uint16_t numRows;

+   *     uint16_t numCols;

+   *     float32_t *pData;

+   * } arm_bilinear_interp_instance_f32;

+   * </pre>

+   *

+   * \par

+   * where <code>numRows</code> specifies the number of rows in the table;

+   * <code>numCols</code> specifies the number of columns in the table;

+   * and <code>pData</code> points to an array of size <code>numRows*numCols</code> values.

+   * The data table <code>pTable</code> is organized in row order and the supplied data values fall on integer indexes.

+   * That is, table element (x,y) is located at <code>pTable[x + y*numCols]</code> where x and y are integers.

+   *

+   * \par

+   * Let <code>(x, y)</code> specify the desired interpolation point.  Then define:

+   * <pre>

+   *     XF = floor(x)

+   *     YF = floor(y)

+   * </pre>

+   * \par

+   * The interpolated output point is computed as:

+   * <pre>

+   *  f(x, y) = f(XF, YF) * (1-(x-XF)) * (1-(y-YF))

+   *           + f(XF+1, YF) * (x-XF)*(1-(y-YF))

+   *           + f(XF, YF+1) * (1-(x-XF))*(y-YF)

+   *           + f(XF+1, YF+1) * (x-XF)*(y-YF)

+   * </pre>

+   * Note that the coordinates (x, y) contain integer and fractional components.

+   * The integer components specify which portion of the table to use while the

+   * fractional components control the interpolation processor.

+   *

+   * \par

+   * if (x,y) are outside of the table boundary, Bilinear interpolation returns zero output.

+   */

+

+  /**

+   * @addtogroup BilinearInterpolate

+   * @{

+   */

+

+  /**

+  *

+  * @brief  Floating-point bilinear interpolation.

+  * @param[in,out] *S points to an instance of the interpolation structure.

+  * @param[in] X interpolation coordinate.

+  * @param[in] Y interpolation coordinate.

+  * @return out interpolated value.

+  */

+

+

+  static __INLINE float32_t arm_bilinear_interp_f32(

+  const arm_bilinear_interp_instance_f32 * S,

+  float32_t X,

+  float32_t Y)

+  {

+    float32_t out;

+    float32_t f00, f01, f10, f11;

+    float32_t *pData = S->pData;

+    int32_t xIndex, yIndex, index;

+    float32_t xdiff, ydiff;

+    float32_t b1, b2, b3, b4;

+

+    xIndex = (int32_t) X;

+    yIndex = (int32_t) Y;

+

+    /* Care taken for table outside boundary */

+    /* Returns zero output when values are outside table boundary */

+    if(xIndex < 0 || xIndex > (S->numRows - 1) || yIndex < 0

+       || yIndex > (S->numCols - 1))

+    {

+      return (0);

+    }

+

+    /* Calculation of index for two nearest points in X-direction */

+    index = (xIndex - 1) + (yIndex - 1) * S->numCols;

+

+

+    /* Read two nearest points in X-direction */

+    f00 = pData[index];

+    f01 = pData[index + 1];

+

+    /* Calculation of index for two nearest points in Y-direction */

+    index = (xIndex - 1) + (yIndex) * S->numCols;

+

+

+    /* Read two nearest points in Y-direction */

+    f10 = pData[index];

+    f11 = pData[index + 1];

+

+    /* Calculation of intermediate values */

+    b1 = f00;

+    b2 = f01 - f00;

+    b3 = f10 - f00;

+    b4 = f00 - f01 - f10 + f11;

+

+    /* Calculation of fractional part in X */

+    xdiff = X - xIndex;

+

+    /* Calculation of fractional part in Y */

+    ydiff = Y - yIndex;

+

+    /* Calculation of bi-linear interpolated output */

+    out = b1 + b2 * xdiff + b3 * ydiff + b4 * xdiff * ydiff;

+

+    /* return to application */

+    return (out);

+

+  }

+

+  /**

+  *

+  * @brief  Q31 bilinear interpolation.

+  * @param[in,out] *S points to an instance of the interpolation structure.

+  * @param[in] X interpolation coordinate in 12.20 format.

+  * @param[in] Y interpolation coordinate in 12.20 format.

+  * @return out interpolated value.

+  */

+

+  static __INLINE q31_t arm_bilinear_interp_q31(

+  arm_bilinear_interp_instance_q31 * S,

+  q31_t X,

+  q31_t Y)

+  {

+    q31_t out;                                   /* Temporary output */

+    q31_t acc = 0;                               /* output */

+    q31_t xfract, yfract;                        /* X, Y fractional parts */

+    q31_t x1, x2, y1, y2;                        /* Nearest output values */

+    int32_t rI, cI;                              /* Row and column indices */

+    q31_t *pYData = S->pData;                    /* pointer to output table values */

+    uint32_t nCols = S->numCols;                 /* num of rows */

+

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    rI = ((X & 0xFFF00000) >> 20u);

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    cI = ((Y & 0xFFF00000) >> 20u);

+

+    /* Care taken for table outside boundary */

+    /* Returns zero output when values are outside table boundary */

+    if(rI < 0 || rI > (S->numRows - 1) || cI < 0 || cI > (S->numCols - 1))

+    {

+      return (0);

+    }

+

+    /* 20 bits for the fractional part */

+    /* shift left xfract by 11 to keep 1.31 format */

+    xfract = (X & 0x000FFFFF) << 11u;

+

+    /* Read two nearest output values from the index */

+    x1 = pYData[(rI) + nCols * (cI)];

+    x2 = pYData[(rI) + nCols * (cI) + 1u];

+

+    /* 20 bits for the fractional part */

+    /* shift left yfract by 11 to keep 1.31 format */

+    yfract = (Y & 0x000FFFFF) << 11u;

+

+    /* Read two nearest output values from the index */

+    y1 = pYData[(rI) + nCols * (cI + 1)];

+    y2 = pYData[(rI) + nCols * (cI + 1) + 1u];

+

+    /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 3.29(q29) format */

+    out = ((q31_t) (((q63_t) x1 * (0x7FFFFFFF - xfract)) >> 32));

+    acc = ((q31_t) (((q63_t) out * (0x7FFFFFFF - yfract)) >> 32));

+

+    /* x2 * (xfract) * (1-yfract)  in 3.29(q29) and adding to acc */

+    out = ((q31_t) ((q63_t) x2 * (0x7FFFFFFF - yfract) >> 32));

+    acc += ((q31_t) ((q63_t) out * (xfract) >> 32));

+

+    /* y1 * (1 - xfract) * (yfract)  in 3.29(q29) and adding to acc */

+    out = ((q31_t) ((q63_t) y1 * (0x7FFFFFFF - xfract) >> 32));

+    acc += ((q31_t) ((q63_t) out * (yfract) >> 32));

+

+    /* y2 * (xfract) * (yfract)  in 3.29(q29) and adding to acc */

+    out = ((q31_t) ((q63_t) y2 * (xfract) >> 32));

+    acc += ((q31_t) ((q63_t) out * (yfract) >> 32));

+

+    /* Convert acc to 1.31(q31) format */

+    return (acc << 2u);

+

+  }

+

+  /**

+  * @brief  Q15 bilinear interpolation.

+  * @param[in,out] *S points to an instance of the interpolation structure.

+  * @param[in] X interpolation coordinate in 12.20 format.

+  * @param[in] Y interpolation coordinate in 12.20 format.

+  * @return out interpolated value.

+  */

+

+  static __INLINE q15_t arm_bilinear_interp_q15(

+  arm_bilinear_interp_instance_q15 * S,

+  q31_t X,

+  q31_t Y)

+  {

+    q63_t acc = 0;                               /* output */

+    q31_t out;                                   /* Temporary output */

+    q15_t x1, x2, y1, y2;                        /* Nearest output values */

+    q31_t xfract, yfract;                        /* X, Y fractional parts */

+    int32_t rI, cI;                              /* Row and column indices */

+    q15_t *pYData = S->pData;                    /* pointer to output table values */

+    uint32_t nCols = S->numCols;                 /* num of rows */

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    rI = ((X & 0xFFF00000) >> 20);

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    cI = ((Y & 0xFFF00000) >> 20);

+

+    /* Care taken for table outside boundary */

+    /* Returns zero output when values are outside table boundary */

+    if(rI < 0 || rI > (S->numRows - 1) || cI < 0 || cI > (S->numCols - 1))

+    {

+      return (0);

+    }

+

+    /* 20 bits for the fractional part */

+    /* xfract should be in 12.20 format */

+    xfract = (X & 0x000FFFFF);

+

+    /* Read two nearest output values from the index */

+    x1 = pYData[(rI) + nCols * (cI)];

+    x2 = pYData[(rI) + nCols * (cI) + 1u];

+

+

+    /* 20 bits for the fractional part */

+    /* yfract should be in 12.20 format */

+    yfract = (Y & 0x000FFFFF);

+

+    /* Read two nearest output values from the index */

+    y1 = pYData[(rI) + nCols * (cI + 1)];

+    y2 = pYData[(rI) + nCols * (cI + 1) + 1u];

+

+    /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 13.51 format */

+

+    /* x1 is in 1.15(q15), xfract in 12.20 format and out is in 13.35 format */

+    /* convert 13.35 to 13.31 by right shifting  and out is in 1.31 */

+    out = (q31_t) (((q63_t) x1 * (0xFFFFF - xfract)) >> 4u);

+    acc = ((q63_t) out * (0xFFFFF - yfract));

+

+    /* x2 * (xfract) * (1-yfract)  in 1.51 and adding to acc */

+    out = (q31_t) (((q63_t) x2 * (0xFFFFF - yfract)) >> 4u);

+    acc += ((q63_t) out * (xfract));

+

+    /* y1 * (1 - xfract) * (yfract)  in 1.51 and adding to acc */

+    out = (q31_t) (((q63_t) y1 * (0xFFFFF - xfract)) >> 4u);

+    acc += ((q63_t) out * (yfract));

+

+    /* y2 * (xfract) * (yfract)  in 1.51 and adding to acc */

+    out = (q31_t) (((q63_t) y2 * (xfract)) >> 4u);

+    acc += ((q63_t) out * (yfract));

+

+    /* acc is in 13.51 format and down shift acc by 36 times */

+    /* Convert out to 1.15 format */

+    return (acc >> 36);

+

+  }

+

+  /**

+  * @brief  Q7 bilinear interpolation.

+  * @param[in,out] *S points to an instance of the interpolation structure.

+  * @param[in] X interpolation coordinate in 12.20 format.

+  * @param[in] Y interpolation coordinate in 12.20 format.

+  * @return out interpolated value.

+  */

+

+  static __INLINE q7_t arm_bilinear_interp_q7(

+  arm_bilinear_interp_instance_q7 * S,

+  q31_t X,

+  q31_t Y)

+  {

+    q63_t acc = 0;                               /* output */

+    q31_t out;                                   /* Temporary output */

+    q31_t xfract, yfract;                        /* X, Y fractional parts */

+    q7_t x1, x2, y1, y2;                         /* Nearest output values */

+    int32_t rI, cI;                              /* Row and column indices */

+    q7_t *pYData = S->pData;                     /* pointer to output table values */

+    uint32_t nCols = S->numCols;                 /* num of rows */

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    rI = ((X & 0xFFF00000) >> 20);

+

+    /* Input is in 12.20 format */

+    /* 12 bits for the table index */

+    /* Index value calculation */

+    cI = ((Y & 0xFFF00000) >> 20);

+

+    /* Care taken for table outside boundary */

+    /* Returns zero output when values are outside table boundary */

+    if(rI < 0 || rI > (S->numRows - 1) || cI < 0 || cI > (S->numCols - 1))

+    {

+      return (0);

+    }

+

+    /* 20 bits for the fractional part */

+    /* xfract should be in 12.20 format */

+    xfract = (X & 0x000FFFFF);

+

+    /* Read two nearest output values from the index */

+    x1 = pYData[(rI) + nCols * (cI)];

+    x2 = pYData[(rI) + nCols * (cI) + 1u];

+

+

+    /* 20 bits for the fractional part */

+    /* yfract should be in 12.20 format */

+    yfract = (Y & 0x000FFFFF);

+

+    /* Read two nearest output values from the index */

+    y1 = pYData[(rI) + nCols * (cI + 1)];

+    y2 = pYData[(rI) + nCols * (cI + 1) + 1u];

+

+    /* Calculation of x1 * (1-xfract ) * (1-yfract) and acc is in 16.47 format */

+    out = ((x1 * (0xFFFFF - xfract)));

+    acc = (((q63_t) out * (0xFFFFF - yfract)));

+

+    /* x2 * (xfract) * (1-yfract)  in 2.22 and adding to acc */

+    out = ((x2 * (0xFFFFF - yfract)));

+    acc += (((q63_t) out * (xfract)));

+

+    /* y1 * (1 - xfract) * (yfract)  in 2.22 and adding to acc */

+    out = ((y1 * (0xFFFFF - xfract)));

+    acc += (((q63_t) out * (yfract)));

+

+    /* y2 * (xfract) * (yfract)  in 2.22 and adding to acc */

+    out = ((y2 * (yfract)));

+    acc += (((q63_t) out * (xfract)));

+

+    /* acc in 16.47 format and down shift by 40 to convert to 1.7 format */

+    return (acc >> 40);

+

+  }

+

+  /**

+   * @} end of BilinearInterpolate group

+   */

+   

+

+//SMMLAR

+#define multAcc_32x32_keep32_R(a, x, y) \

+    a = (q31_t) (((((q63_t) a) << 32) + ((q63_t) x * y) + 0x80000000LL ) >> 32)

+

+//SMMLSR

+#define multSub_32x32_keep32_R(a, x, y) \

+    a = (q31_t) (((((q63_t) a) << 32) - ((q63_t) x * y) + 0x80000000LL ) >> 32)

+

+//SMMULR

+#define mult_32x32_keep32_R(a, x, y) \

+    a = (q31_t) (((q63_t) x * y + 0x80000000LL ) >> 32)

+

+//SMMLA

+#define multAcc_32x32_keep32(a, x, y) \

+    a += (q31_t) (((q63_t) x * y) >> 32)

+

+//SMMLS

+#define multSub_32x32_keep32(a, x, y) \

+    a -= (q31_t) (((q63_t) x * y) >> 32)

+

+//SMMUL

+#define mult_32x32_keep32(a, x, y) \

+    a = (q31_t) (((q63_t) x * y ) >> 32)

+

+

+#if defined ( __CC_ARM ) //Keil

+

+//Enter low optimization region - place directly above function definition

+    #ifdef ARM_MATH_CM4

+      #define LOW_OPTIMIZATION_ENTER \

+         _Pragma ("push")         \

+         _Pragma ("O1")

+    #else

+      #define LOW_OPTIMIZATION_ENTER 

+    #endif

+

+//Exit low optimization region - place directly after end of function definition

+    #ifdef ARM_MATH_CM4

+      #define LOW_OPTIMIZATION_EXIT \

+         _Pragma ("pop")

+    #else

+      #define LOW_OPTIMIZATION_EXIT  

+    #endif

+

+//Enter low optimization region - place directly above function definition

+  #define IAR_ONLY_LOW_OPTIMIZATION_ENTER

+

+//Exit low optimization region - place directly after end of function definition

+  #define IAR_ONLY_LOW_OPTIMIZATION_EXIT

+

+#elif defined(__ICCARM__) //IAR

+

+//Enter low optimization region - place directly above function definition

+    #ifdef ARM_MATH_CM4

+      #define LOW_OPTIMIZATION_ENTER \

+         _Pragma ("optimize=low")

+    #else

+      #define LOW_OPTIMIZATION_ENTER   

+    #endif

+

+//Exit low optimization region - place directly after end of function definition

+  #define LOW_OPTIMIZATION_EXIT

+

+//Enter low optimization region - place directly above function definition

+    #ifdef ARM_MATH_CM4

+      #define IAR_ONLY_LOW_OPTIMIZATION_ENTER \

+         _Pragma ("optimize=low")

+    #else

+      #define IAR_ONLY_LOW_OPTIMIZATION_ENTER   

+    #endif

+

+//Exit low optimization region - place directly after end of function definition

+  #define IAR_ONLY_LOW_OPTIMIZATION_EXIT

+

+#elif defined(__GNUC__)

+

+  #define LOW_OPTIMIZATION_ENTER __attribute__(( optimize("-O1") ))

+

+  #define LOW_OPTIMIZATION_EXIT

+

+  #define IAR_ONLY_LOW_OPTIMIZATION_ENTER

+

+  #define IAR_ONLY_LOW_OPTIMIZATION_EXIT

+

+#elif defined(__CSMC__)		// Cosmic

+

+#define LOW_OPTIMIZATION_ENTER

+#define LOW_OPTIMIZATION_EXIT

+#define IAR_ONLY_LOW_OPTIMIZATION_ENTER

+#define IAR_ONLY_LOW_OPTIMIZATION_EXIT

+

+#elif defined(__TASKING__)		// TASKING

+

+#define LOW_OPTIMIZATION_ENTER

+#define LOW_OPTIMIZATION_EXIT

+#define IAR_ONLY_LOW_OPTIMIZATION_ENTER

+#define IAR_ONLY_LOW_OPTIMIZATION_EXIT

+

+#endif

+

+

+#ifdef	__cplusplus

+}

+#endif

+

+

+#endif /* _ARM_MATH_H */

+

+/**

+ *

+ * End of file.

+ */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0.h
new file mode 100644
index 0000000..1110d17
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0.h
@@ -0,0 +1,740 @@
+/**************************************************************************//**

+ * @file     core_cm0.h

+ * @brief    CMSIS Cortex-M0 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CM0_H_GENERIC

+#define __CORE_CM0_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup Cortex_M0

+  @{

+ */

+

+/*  CMSIS CM0 definitions */

+#define __CM0_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version   */

+#define __CM0_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version    */

+#define __CM0_CMSIS_VERSION       ((__CM0_CMSIS_VERSION_MAIN << 16) | \

+                                    __CM0_CMSIS_VERSION_SUB          )     /*!< CMSIS HAL version number         */

+

+#define __CORTEX_M                (0x00)                                   /*!< Cortex-M Core                    */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    This core does not support an FPU at all

+*/

+#define __FPU_USED       0

+

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI__VFP_SUPPORT____

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM0_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_CM0_H_DEPENDANT

+#define __CORE_CM0_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __CM0_REV

+    #define __CM0_REV               0x0000

+    #warning "__CM0_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          2

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group Cortex_M0 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:28;              /*!< bit:  0..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t _reserved1:3;               /*!< bit: 25..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:1;               /*!< bit:      0  Reserved                           */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[1];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[31];

+  __IO uint32_t ICER[1];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register          */

+       uint32_t RSERVED1[31];

+  __IO uint32_t ISPR[1];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register           */

+       uint32_t RESERVED2[31];

+  __IO uint32_t ICPR[1];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register         */

+       uint32_t RESERVED3[31];

+       uint32_t RESERVED4[64];

+  __IO uint32_t IP[8];                   /*!< Offset: 0x300 (R/W)  Interrupt Priority Register              */

+}  NVIC_Type;

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+       uint32_t RESERVED0;

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+       uint32_t RESERVED1;

+  __IO uint32_t SHP[2];                  /*!< Offset: 0x01C (R/W)  System Handlers Priority Registers. [0] is RESERVED   */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR)

+                are only accessible over DAP and not via processor. Therefore

+                they are not covered by the Cortex-M0 header file.

+  @{

+ */

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M0 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address              */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                 */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address */

+

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/* Interrupt Priorities are WORD accessible only under ARMv6M                   */

+/* The following MACROS handle generation of the register offset and byte masks */

+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)

+#define _SHP_IDX(IRQn)           ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >>    2UL)      )

+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[0] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)(IRQn) < 0) {

+    SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+  else {

+    NVIC->IP[_IP_IDX(IRQn)]  = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)(IRQn) < 0) {

+    return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                     /* Ensure all outstanding memory accesses included

+                                                                  buffered write are completed before reset */

+  SCB->AIRCR  = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                 SCB_AIRCR_SYSRESETREQ_Msk);

+  __DSB();                                                     /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                        /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) { return (1UL); }    /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM0_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0plus.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0plus.h
new file mode 100644
index 0000000..62e914b
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm0plus.h
@@ -0,0 +1,854 @@
+/**************************************************************************//**

+ * @file     core_cm0plus.h

+ * @brief    CMSIS Cortex-M0+ Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CM0PLUS_H_GENERIC

+#define __CORE_CM0PLUS_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup Cortex-M0+

+  @{

+ */

+

+/*  CMSIS CM0P definitions */

+#define __CM0PLUS_CMSIS_VERSION_MAIN (0x04)                                /*!< [31:16] CMSIS HAL main version   */

+#define __CM0PLUS_CMSIS_VERSION_SUB  (0x00)                                /*!< [15:0]  CMSIS HAL sub version    */

+#define __CM0PLUS_CMSIS_VERSION      ((__CM0PLUS_CMSIS_VERSION_MAIN << 16) | \

+                                       __CM0PLUS_CMSIS_VERSION_SUB)        /*!< CMSIS HAL version number         */

+

+#define __CORTEX_M                (0x00)                                   /*!< Cortex-M Core                    */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    This core does not support an FPU at all

+*/

+#define __FPU_USED       0

+

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI__VFP_SUPPORT____

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM0PLUS_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_CM0PLUS_H_DEPENDANT

+#define __CORE_CM0PLUS_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __CM0PLUS_REV

+    #define __CM0PLUS_REV             0x0000

+    #warning "__CM0PLUS_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __VTOR_PRESENT

+    #define __VTOR_PRESENT            0

+    #warning "__VTOR_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          2

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group Cortex-M0+ */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core MPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:28;              /*!< bit:  0..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t _reserved1:3;               /*!< bit: 25..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t nPRIV:1;                    /*!< bit:      0  Execution privilege in Thread mode */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+#define CONTROL_nPRIV_Pos                   0                                             /*!< CONTROL: nPRIV Position */

+#define CONTROL_nPRIV_Msk                  (1UL /*<< CONTROL_nPRIV_Pos*/)                 /*!< CONTROL: nPRIV Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[1];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[31];

+  __IO uint32_t ICER[1];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register          */

+       uint32_t RSERVED1[31];

+  __IO uint32_t ISPR[1];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register           */

+       uint32_t RESERVED2[31];

+  __IO uint32_t ICPR[1];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register         */

+       uint32_t RESERVED3[31];

+       uint32_t RESERVED4[64];

+  __IO uint32_t IP[8];                   /*!< Offset: 0x300 (R/W)  Interrupt Priority Register              */

+}  NVIC_Type;

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+#if (__VTOR_PRESENT == 1)

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+#else

+       uint32_t RESERVED0;

+#endif

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+       uint32_t RESERVED1;

+  __IO uint32_t SHP[2];                  /*!< Offset: 0x01C (R/W)  System Handlers Priority Registers. [0] is RESERVED   */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+#if (__VTOR_PRESENT == 1)

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_VTOR_TBLOFF_Pos                 8                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0xFFFFFFUL << SCB_VTOR_TBLOFF_Pos)            /*!< SCB VTOR: TBLOFF Mask */

+#endif

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   8                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0xFFFFFFUL << MPU_RBAR_ADDR_Pos)              /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Cortex-M0+ Core Debug Registers (DCB registers, SHCSR, and DFSR)

+                are only accessible over DAP and not via processor. Therefore

+                they are not covered by the Cortex-M0 header file.

+  @{

+ */

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M0+ Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address              */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                 */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address */

+

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/* Interrupt Priorities are WORD accessible only under ARMv6M                   */

+/* The following MACROS handle generation of the register offset and byte masks */

+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)

+#define _SHP_IDX(IRQn)           ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >>    2UL)      )

+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[0] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)(IRQn) < 0) {

+    SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+  else {

+    NVIC->IP[_IP_IDX(IRQn)]  = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)(IRQn) < 0) {

+    return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                     /* Ensure all outstanding memory accesses included

+                                                                  buffered write are completed before reset */

+  SCB->AIRCR  = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                 SCB_AIRCR_SYSRESETREQ_Msk);

+  __DSB();                                                     /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                        /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) {return (1UL);}      /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM0PLUS_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm3.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm3.h
new file mode 100644
index 0000000..d324f9b
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm3.h
@@ -0,0 +1,1693 @@
+/**************************************************************************//**

+ * @file     core_cm3.h

+ * @brief    CMSIS Cortex-M3 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CM3_H_GENERIC

+#define __CORE_CM3_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup Cortex_M3

+  @{

+ */

+

+/*  CMSIS CM3 definitions */

+#define __CM3_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version   */

+#define __CM3_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version    */

+#define __CM3_CMSIS_VERSION       ((__CM3_CMSIS_VERSION_MAIN << 16) | \

+                                    __CM3_CMSIS_VERSION_SUB          )     /*!< CMSIS HAL version number         */

+

+#define __CORTEX_M                (0x03)                                   /*!< Cortex-M Core                    */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    This core does not support an FPU at all

+*/

+#define __FPU_USED       0

+

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI__VFP_SUPPORT____

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM3_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_CM3_H_DEPENDANT

+#define __CORE_CM3_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __CM3_REV

+    #define __CM3_REV               0x0200

+    #warning "__CM3_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          4

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group Cortex_M3 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core Debug Register

+  - Core MPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:27;              /*!< bit:  0..26  Reserved                           */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+#define APSR_Q_Pos                         27                                             /*!< APSR: Q Position */

+#define APSR_Q_Msk                         (1UL << APSR_Q_Pos)                            /*!< APSR: Q Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t IT:2;                       /*!< bit: 25..26  saved IT state   (read 0)          */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_Q_Pos                         27                                             /*!< xPSR: Q Position */

+#define xPSR_Q_Msk                         (1UL << xPSR_Q_Pos)                            /*!< xPSR: Q Mask */

+

+#define xPSR_IT_Pos                        25                                             /*!< xPSR: IT Position */

+#define xPSR_IT_Msk                        (3UL << xPSR_IT_Pos)                           /*!< xPSR: IT Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t nPRIV:1;                    /*!< bit:      0  Execution privilege in Thread mode */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+#define CONTROL_nPRIV_Pos                   0                                             /*!< CONTROL: nPRIV Position */

+#define CONTROL_nPRIV_Msk                  (1UL /*<< CONTROL_nPRIV_Pos*/)                 /*!< CONTROL: nPRIV Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[8];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[24];

+  __IO uint32_t ICER[8];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register         */

+       uint32_t RSERVED1[24];

+  __IO uint32_t ISPR[8];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register          */

+       uint32_t RESERVED2[24];

+  __IO uint32_t ICPR[8];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register        */

+       uint32_t RESERVED3[24];

+  __IO uint32_t IABR[8];                 /*!< Offset: 0x200 (R/W)  Interrupt Active bit Register           */

+       uint32_t RESERVED4[56];

+  __IO uint8_t  IP[240];                 /*!< Offset: 0x300 (R/W)  Interrupt Priority Register (8Bit wide) */

+       uint32_t RESERVED5[644];

+  __O  uint32_t STIR;                    /*!< Offset: 0xE00 ( /W)  Software Trigger Interrupt Register     */

+}  NVIC_Type;

+

+/* Software Triggered Interrupt Register Definitions */

+#define NVIC_STIR_INTID_Pos                 0                                          /*!< STIR: INTLINESNUM Position */

+#define NVIC_STIR_INTID_Msk                (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/)        /*!< STIR: INTLINESNUM Mask */

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+  __IO uint8_t  SHP[12];                 /*!< Offset: 0x018 (R/W)  System Handlers Priority Registers (4-7, 8-11, 12-15) */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+  __IO uint32_t CFSR;                    /*!< Offset: 0x028 (R/W)  Configurable Fault Status Register                    */

+  __IO uint32_t HFSR;                    /*!< Offset: 0x02C (R/W)  HardFault Status Register                             */

+  __IO uint32_t DFSR;                    /*!< Offset: 0x030 (R/W)  Debug Fault Status Register                           */

+  __IO uint32_t MMFAR;                   /*!< Offset: 0x034 (R/W)  MemManage Fault Address Register                      */

+  __IO uint32_t BFAR;                    /*!< Offset: 0x038 (R/W)  BusFault Address Register                             */

+  __IO uint32_t AFSR;                    /*!< Offset: 0x03C (R/W)  Auxiliary Fault Status Register                       */

+  __I  uint32_t PFR[2];                  /*!< Offset: 0x040 (R/ )  Processor Feature Register                            */

+  __I  uint32_t DFR;                     /*!< Offset: 0x048 (R/ )  Debug Feature Register                                */

+  __I  uint32_t ADR;                     /*!< Offset: 0x04C (R/ )  Auxiliary Feature Register                            */

+  __I  uint32_t MMFR[4];                 /*!< Offset: 0x050 (R/ )  Memory Model Feature Register                         */

+  __I  uint32_t ISAR[5];                 /*!< Offset: 0x060 (R/ )  Instruction Set Attributes Register                   */

+       uint32_t RESERVED0[5];

+  __IO uint32_t CPACR;                   /*!< Offset: 0x088 (R/W)  Coprocessor Access Control Register                   */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_RETTOBASE_Pos             11                                             /*!< SCB ICSR: RETTOBASE Position */

+#define SCB_ICSR_RETTOBASE_Msk             (1UL << SCB_ICSR_RETTOBASE_Pos)                /*!< SCB ICSR: RETTOBASE Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Vector Table Offset Register Definitions */

+#if (__CM3_REV < 0x0201)                   /* core r2p1 */

+#define SCB_VTOR_TBLBASE_Pos               29                                             /*!< SCB VTOR: TBLBASE Position */

+#define SCB_VTOR_TBLBASE_Msk               (1UL << SCB_VTOR_TBLBASE_Pos)                  /*!< SCB VTOR: TBLBASE Mask */

+

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos)            /*!< SCB VTOR: TBLOFF Mask */

+#else

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos)           /*!< SCB VTOR: TBLOFF Mask */

+#endif

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_PRIGROUP_Pos              8                                             /*!< SCB AIRCR: PRIGROUP Position */

+#define SCB_AIRCR_PRIGROUP_Msk             (7UL << SCB_AIRCR_PRIGROUP_Pos)                /*!< SCB AIRCR: PRIGROUP Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+#define SCB_AIRCR_VECTRESET_Pos             0                                             /*!< SCB AIRCR: VECTRESET Position */

+#define SCB_AIRCR_VECTRESET_Msk            (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/)           /*!< SCB AIRCR: VECTRESET Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_BFHFNMIGN_Pos               8                                             /*!< SCB CCR: BFHFNMIGN Position */

+#define SCB_CCR_BFHFNMIGN_Msk              (1UL << SCB_CCR_BFHFNMIGN_Pos)                 /*!< SCB CCR: BFHFNMIGN Mask */

+

+#define SCB_CCR_DIV_0_TRP_Pos               4                                             /*!< SCB CCR: DIV_0_TRP Position */

+#define SCB_CCR_DIV_0_TRP_Msk              (1UL << SCB_CCR_DIV_0_TRP_Pos)                 /*!< SCB CCR: DIV_0_TRP Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+#define SCB_CCR_USERSETMPEND_Pos            1                                             /*!< SCB CCR: USERSETMPEND Position */

+#define SCB_CCR_USERSETMPEND_Msk           (1UL << SCB_CCR_USERSETMPEND_Pos)              /*!< SCB CCR: USERSETMPEND Mask */

+

+#define SCB_CCR_NONBASETHRDENA_Pos          0                                             /*!< SCB CCR: NONBASETHRDENA Position */

+#define SCB_CCR_NONBASETHRDENA_Msk         (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/)        /*!< SCB CCR: NONBASETHRDENA Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_USGFAULTENA_Pos          18                                             /*!< SCB SHCSR: USGFAULTENA Position */

+#define SCB_SHCSR_USGFAULTENA_Msk          (1UL << SCB_SHCSR_USGFAULTENA_Pos)             /*!< SCB SHCSR: USGFAULTENA Mask */

+

+#define SCB_SHCSR_BUSFAULTENA_Pos          17                                             /*!< SCB SHCSR: BUSFAULTENA Position */

+#define SCB_SHCSR_BUSFAULTENA_Msk          (1UL << SCB_SHCSR_BUSFAULTENA_Pos)             /*!< SCB SHCSR: BUSFAULTENA Mask */

+

+#define SCB_SHCSR_MEMFAULTENA_Pos          16                                             /*!< SCB SHCSR: MEMFAULTENA Position */

+#define SCB_SHCSR_MEMFAULTENA_Msk          (1UL << SCB_SHCSR_MEMFAULTENA_Pos)             /*!< SCB SHCSR: MEMFAULTENA Mask */

+

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+#define SCB_SHCSR_BUSFAULTPENDED_Pos       14                                             /*!< SCB SHCSR: BUSFAULTPENDED Position */

+#define SCB_SHCSR_BUSFAULTPENDED_Msk       (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos)          /*!< SCB SHCSR: BUSFAULTPENDED Mask */

+

+#define SCB_SHCSR_MEMFAULTPENDED_Pos       13                                             /*!< SCB SHCSR: MEMFAULTPENDED Position */

+#define SCB_SHCSR_MEMFAULTPENDED_Msk       (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos)          /*!< SCB SHCSR: MEMFAULTPENDED Mask */

+

+#define SCB_SHCSR_USGFAULTPENDED_Pos       12                                             /*!< SCB SHCSR: USGFAULTPENDED Position */

+#define SCB_SHCSR_USGFAULTPENDED_Msk       (1UL << SCB_SHCSR_USGFAULTPENDED_Pos)          /*!< SCB SHCSR: USGFAULTPENDED Mask */

+

+#define SCB_SHCSR_SYSTICKACT_Pos           11                                             /*!< SCB SHCSR: SYSTICKACT Position */

+#define SCB_SHCSR_SYSTICKACT_Msk           (1UL << SCB_SHCSR_SYSTICKACT_Pos)              /*!< SCB SHCSR: SYSTICKACT Mask */

+

+#define SCB_SHCSR_PENDSVACT_Pos            10                                             /*!< SCB SHCSR: PENDSVACT Position */

+#define SCB_SHCSR_PENDSVACT_Msk            (1UL << SCB_SHCSR_PENDSVACT_Pos)               /*!< SCB SHCSR: PENDSVACT Mask */

+

+#define SCB_SHCSR_MONITORACT_Pos            8                                             /*!< SCB SHCSR: MONITORACT Position */

+#define SCB_SHCSR_MONITORACT_Msk           (1UL << SCB_SHCSR_MONITORACT_Pos)              /*!< SCB SHCSR: MONITORACT Mask */

+

+#define SCB_SHCSR_SVCALLACT_Pos             7                                             /*!< SCB SHCSR: SVCALLACT Position */

+#define SCB_SHCSR_SVCALLACT_Msk            (1UL << SCB_SHCSR_SVCALLACT_Pos)               /*!< SCB SHCSR: SVCALLACT Mask */

+

+#define SCB_SHCSR_USGFAULTACT_Pos           3                                             /*!< SCB SHCSR: USGFAULTACT Position */

+#define SCB_SHCSR_USGFAULTACT_Msk          (1UL << SCB_SHCSR_USGFAULTACT_Pos)             /*!< SCB SHCSR: USGFAULTACT Mask */

+

+#define SCB_SHCSR_BUSFAULTACT_Pos           1                                             /*!< SCB SHCSR: BUSFAULTACT Position */

+#define SCB_SHCSR_BUSFAULTACT_Msk          (1UL << SCB_SHCSR_BUSFAULTACT_Pos)             /*!< SCB SHCSR: BUSFAULTACT Mask */

+

+#define SCB_SHCSR_MEMFAULTACT_Pos           0                                             /*!< SCB SHCSR: MEMFAULTACT Position */

+#define SCB_SHCSR_MEMFAULTACT_Msk          (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/)         /*!< SCB SHCSR: MEMFAULTACT Mask */

+

+/* SCB Configurable Fault Status Registers Definitions */

+#define SCB_CFSR_USGFAULTSR_Pos            16                                             /*!< SCB CFSR: Usage Fault Status Register Position */

+#define SCB_CFSR_USGFAULTSR_Msk            (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos)          /*!< SCB CFSR: Usage Fault Status Register Mask */

+

+#define SCB_CFSR_BUSFAULTSR_Pos             8                                             /*!< SCB CFSR: Bus Fault Status Register Position */

+#define SCB_CFSR_BUSFAULTSR_Msk            (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos)            /*!< SCB CFSR: Bus Fault Status Register Mask */

+

+#define SCB_CFSR_MEMFAULTSR_Pos             0                                             /*!< SCB CFSR: Memory Manage Fault Status Register Position */

+#define SCB_CFSR_MEMFAULTSR_Msk            (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/)        /*!< SCB CFSR: Memory Manage Fault Status Register Mask */

+

+/* SCB Hard Fault Status Registers Definitions */

+#define SCB_HFSR_DEBUGEVT_Pos              31                                             /*!< SCB HFSR: DEBUGEVT Position */

+#define SCB_HFSR_DEBUGEVT_Msk              (1UL << SCB_HFSR_DEBUGEVT_Pos)                 /*!< SCB HFSR: DEBUGEVT Mask */

+

+#define SCB_HFSR_FORCED_Pos                30                                             /*!< SCB HFSR: FORCED Position */

+#define SCB_HFSR_FORCED_Msk                (1UL << SCB_HFSR_FORCED_Pos)                   /*!< SCB HFSR: FORCED Mask */

+

+#define SCB_HFSR_VECTTBL_Pos                1                                             /*!< SCB HFSR: VECTTBL Position */

+#define SCB_HFSR_VECTTBL_Msk               (1UL << SCB_HFSR_VECTTBL_Pos)                  /*!< SCB HFSR: VECTTBL Mask */

+

+/* SCB Debug Fault Status Register Definitions */

+#define SCB_DFSR_EXTERNAL_Pos               4                                             /*!< SCB DFSR: EXTERNAL Position */

+#define SCB_DFSR_EXTERNAL_Msk              (1UL << SCB_DFSR_EXTERNAL_Pos)                 /*!< SCB DFSR: EXTERNAL Mask */

+

+#define SCB_DFSR_VCATCH_Pos                 3                                             /*!< SCB DFSR: VCATCH Position */

+#define SCB_DFSR_VCATCH_Msk                (1UL << SCB_DFSR_VCATCH_Pos)                   /*!< SCB DFSR: VCATCH Mask */

+

+#define SCB_DFSR_DWTTRAP_Pos                2                                             /*!< SCB DFSR: DWTTRAP Position */

+#define SCB_DFSR_DWTTRAP_Msk               (1UL << SCB_DFSR_DWTTRAP_Pos)                  /*!< SCB DFSR: DWTTRAP Mask */

+

+#define SCB_DFSR_BKPT_Pos                   1                                             /*!< SCB DFSR: BKPT Position */

+#define SCB_DFSR_BKPT_Msk                  (1UL << SCB_DFSR_BKPT_Pos)                     /*!< SCB DFSR: BKPT Mask */

+

+#define SCB_DFSR_HALTED_Pos                 0                                             /*!< SCB DFSR: HALTED Position */

+#define SCB_DFSR_HALTED_Msk                (1UL /*<< SCB_DFSR_HALTED_Pos*/)               /*!< SCB DFSR: HALTED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)

+    \brief      Type definitions for the System Control and ID Register not in the SCB

+  @{

+ */

+

+/** \brief  Structure type to access the System Control and ID Register not in the SCB.

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __I  uint32_t ICTR;                    /*!< Offset: 0x004 (R/ )  Interrupt Controller Type Register      */

+#if ((defined __CM3_REV) && (__CM3_REV >= 0x200))

+  __IO uint32_t ACTLR;                   /*!< Offset: 0x008 (R/W)  Auxiliary Control Register      */

+#else

+       uint32_t RESERVED1[1];

+#endif

+} SCnSCB_Type;

+

+/* Interrupt Controller Type Register Definitions */

+#define SCnSCB_ICTR_INTLINESNUM_Pos         0                                          /*!< ICTR: INTLINESNUM Position */

+#define SCnSCB_ICTR_INTLINESNUM_Msk        (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/)  /*!< ICTR: INTLINESNUM Mask */

+

+/* Auxiliary Control Register Definitions */

+

+#define SCnSCB_ACTLR_DISFOLD_Pos            2                                          /*!< ACTLR: DISFOLD Position */

+#define SCnSCB_ACTLR_DISFOLD_Msk           (1UL << SCnSCB_ACTLR_DISFOLD_Pos)           /*!< ACTLR: DISFOLD Mask */

+

+#define SCnSCB_ACTLR_DISDEFWBUF_Pos         1                                          /*!< ACTLR: DISDEFWBUF Position */

+#define SCnSCB_ACTLR_DISDEFWBUF_Msk        (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos)        /*!< ACTLR: DISDEFWBUF Mask */

+

+#define SCnSCB_ACTLR_DISMCYCINT_Pos         0                                          /*!< ACTLR: DISMCYCINT Position */

+#define SCnSCB_ACTLR_DISMCYCINT_Msk        (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/)    /*!< ACTLR: DISMCYCINT Mask */

+

+/*@} end of group CMSIS_SCnotSCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_ITM     Instrumentation Trace Macrocell (ITM)

+    \brief      Type definitions for the Instrumentation Trace Macrocell (ITM)

+  @{

+ */

+

+/** \brief  Structure type to access the Instrumentation Trace Macrocell Register (ITM).

+ */

+typedef struct

+{

+  __O  union

+  {

+    __O  uint8_t    u8;                  /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 8-bit                   */

+    __O  uint16_t   u16;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 16-bit                  */

+    __O  uint32_t   u32;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 32-bit                  */

+  }  PORT [32];                          /*!< Offset: 0x000 ( /W)  ITM Stimulus Port Registers               */

+       uint32_t RESERVED0[864];

+  __IO uint32_t TER;                     /*!< Offset: 0xE00 (R/W)  ITM Trace Enable Register                 */

+       uint32_t RESERVED1[15];

+  __IO uint32_t TPR;                     /*!< Offset: 0xE40 (R/W)  ITM Trace Privilege Register              */

+       uint32_t RESERVED2[15];

+  __IO uint32_t TCR;                     /*!< Offset: 0xE80 (R/W)  ITM Trace Control Register                */

+       uint32_t RESERVED3[29];

+  __O  uint32_t IWR;                     /*!< Offset: 0xEF8 ( /W)  ITM Integration Write Register            */

+  __I  uint32_t IRR;                     /*!< Offset: 0xEFC (R/ )  ITM Integration Read Register             */

+  __IO uint32_t IMCR;                    /*!< Offset: 0xF00 (R/W)  ITM Integration Mode Control Register     */

+       uint32_t RESERVED4[43];

+  __O  uint32_t LAR;                     /*!< Offset: 0xFB0 ( /W)  ITM Lock Access Register                  */

+  __I  uint32_t LSR;                     /*!< Offset: 0xFB4 (R/ )  ITM Lock Status Register                  */

+       uint32_t RESERVED5[6];

+  __I  uint32_t PID4;                    /*!< Offset: 0xFD0 (R/ )  ITM Peripheral Identification Register #4 */

+  __I  uint32_t PID5;                    /*!< Offset: 0xFD4 (R/ )  ITM Peripheral Identification Register #5 */

+  __I  uint32_t PID6;                    /*!< Offset: 0xFD8 (R/ )  ITM Peripheral Identification Register #6 */

+  __I  uint32_t PID7;                    /*!< Offset: 0xFDC (R/ )  ITM Peripheral Identification Register #7 */

+  __I  uint32_t PID0;                    /*!< Offset: 0xFE0 (R/ )  ITM Peripheral Identification Register #0 */

+  __I  uint32_t PID1;                    /*!< Offset: 0xFE4 (R/ )  ITM Peripheral Identification Register #1 */

+  __I  uint32_t PID2;                    /*!< Offset: 0xFE8 (R/ )  ITM Peripheral Identification Register #2 */

+  __I  uint32_t PID3;                    /*!< Offset: 0xFEC (R/ )  ITM Peripheral Identification Register #3 */

+  __I  uint32_t CID0;                    /*!< Offset: 0xFF0 (R/ )  ITM Component  Identification Register #0 */

+  __I  uint32_t CID1;                    /*!< Offset: 0xFF4 (R/ )  ITM Component  Identification Register #1 */

+  __I  uint32_t CID2;                    /*!< Offset: 0xFF8 (R/ )  ITM Component  Identification Register #2 */

+  __I  uint32_t CID3;                    /*!< Offset: 0xFFC (R/ )  ITM Component  Identification Register #3 */

+} ITM_Type;

+

+/* ITM Trace Privilege Register Definitions */

+#define ITM_TPR_PRIVMASK_Pos                0                                             /*!< ITM TPR: PRIVMASK Position */

+#define ITM_TPR_PRIVMASK_Msk               (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/)            /*!< ITM TPR: PRIVMASK Mask */

+

+/* ITM Trace Control Register Definitions */

+#define ITM_TCR_BUSY_Pos                   23                                             /*!< ITM TCR: BUSY Position */

+#define ITM_TCR_BUSY_Msk                   (1UL << ITM_TCR_BUSY_Pos)                      /*!< ITM TCR: BUSY Mask */

+

+#define ITM_TCR_TraceBusID_Pos             16                                             /*!< ITM TCR: ATBID Position */

+#define ITM_TCR_TraceBusID_Msk             (0x7FUL << ITM_TCR_TraceBusID_Pos)             /*!< ITM TCR: ATBID Mask */

+

+#define ITM_TCR_GTSFREQ_Pos                10                                             /*!< ITM TCR: Global timestamp frequency Position */

+#define ITM_TCR_GTSFREQ_Msk                (3UL << ITM_TCR_GTSFREQ_Pos)                   /*!< ITM TCR: Global timestamp frequency Mask */

+

+#define ITM_TCR_TSPrescale_Pos              8                                             /*!< ITM TCR: TSPrescale Position */

+#define ITM_TCR_TSPrescale_Msk             (3UL << ITM_TCR_TSPrescale_Pos)                /*!< ITM TCR: TSPrescale Mask */

+

+#define ITM_TCR_SWOENA_Pos                  4                                             /*!< ITM TCR: SWOENA Position */

+#define ITM_TCR_SWOENA_Msk                 (1UL << ITM_TCR_SWOENA_Pos)                    /*!< ITM TCR: SWOENA Mask */

+

+#define ITM_TCR_DWTENA_Pos                  3                                             /*!< ITM TCR: DWTENA Position */

+#define ITM_TCR_DWTENA_Msk                 (1UL << ITM_TCR_DWTENA_Pos)                    /*!< ITM TCR: DWTENA Mask */

+

+#define ITM_TCR_SYNCENA_Pos                 2                                             /*!< ITM TCR: SYNCENA Position */

+#define ITM_TCR_SYNCENA_Msk                (1UL << ITM_TCR_SYNCENA_Pos)                   /*!< ITM TCR: SYNCENA Mask */

+

+#define ITM_TCR_TSENA_Pos                   1                                             /*!< ITM TCR: TSENA Position */

+#define ITM_TCR_TSENA_Msk                  (1UL << ITM_TCR_TSENA_Pos)                     /*!< ITM TCR: TSENA Mask */

+

+#define ITM_TCR_ITMENA_Pos                  0                                             /*!< ITM TCR: ITM Enable bit Position */

+#define ITM_TCR_ITMENA_Msk                 (1UL /*<< ITM_TCR_ITMENA_Pos*/)                /*!< ITM TCR: ITM Enable bit Mask */

+

+/* ITM Integration Write Register Definitions */

+#define ITM_IWR_ATVALIDM_Pos                0                                             /*!< ITM IWR: ATVALIDM Position */

+#define ITM_IWR_ATVALIDM_Msk               (1UL /*<< ITM_IWR_ATVALIDM_Pos*/)              /*!< ITM IWR: ATVALIDM Mask */

+

+/* ITM Integration Read Register Definitions */

+#define ITM_IRR_ATREADYM_Pos                0                                             /*!< ITM IRR: ATREADYM Position */

+#define ITM_IRR_ATREADYM_Msk               (1UL /*<< ITM_IRR_ATREADYM_Pos*/)              /*!< ITM IRR: ATREADYM Mask */

+

+/* ITM Integration Mode Control Register Definitions */

+#define ITM_IMCR_INTEGRATION_Pos            0                                             /*!< ITM IMCR: INTEGRATION Position */

+#define ITM_IMCR_INTEGRATION_Msk           (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/)          /*!< ITM IMCR: INTEGRATION Mask */

+

+/* ITM Lock Status Register Definitions */

+#define ITM_LSR_ByteAcc_Pos                 2                                             /*!< ITM LSR: ByteAcc Position */

+#define ITM_LSR_ByteAcc_Msk                (1UL << ITM_LSR_ByteAcc_Pos)                   /*!< ITM LSR: ByteAcc Mask */

+

+#define ITM_LSR_Access_Pos                  1                                             /*!< ITM LSR: Access Position */

+#define ITM_LSR_Access_Msk                 (1UL << ITM_LSR_Access_Pos)                    /*!< ITM LSR: Access Mask */

+

+#define ITM_LSR_Present_Pos                 0                                             /*!< ITM LSR: Present Position */

+#define ITM_LSR_Present_Msk                (1UL /*<< ITM_LSR_Present_Pos*/)               /*!< ITM LSR: Present Mask */

+

+/*@}*/ /* end of group CMSIS_ITM */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_DWT     Data Watchpoint and Trace (DWT)

+    \brief      Type definitions for the Data Watchpoint and Trace (DWT)

+  @{

+ */

+

+/** \brief  Structure type to access the Data Watchpoint and Trace Register (DWT).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  Control Register                          */

+  __IO uint32_t CYCCNT;                  /*!< Offset: 0x004 (R/W)  Cycle Count Register                      */

+  __IO uint32_t CPICNT;                  /*!< Offset: 0x008 (R/W)  CPI Count Register                        */

+  __IO uint32_t EXCCNT;                  /*!< Offset: 0x00C (R/W)  Exception Overhead Count Register         */

+  __IO uint32_t SLEEPCNT;                /*!< Offset: 0x010 (R/W)  Sleep Count Register                      */

+  __IO uint32_t LSUCNT;                  /*!< Offset: 0x014 (R/W)  LSU Count Register                        */

+  __IO uint32_t FOLDCNT;                 /*!< Offset: 0x018 (R/W)  Folded-instruction Count Register         */

+  __I  uint32_t PCSR;                    /*!< Offset: 0x01C (R/ )  Program Counter Sample Register           */

+  __IO uint32_t COMP0;                   /*!< Offset: 0x020 (R/W)  Comparator Register 0                     */

+  __IO uint32_t MASK0;                   /*!< Offset: 0x024 (R/W)  Mask Register 0                           */

+  __IO uint32_t FUNCTION0;               /*!< Offset: 0x028 (R/W)  Function Register 0                       */

+       uint32_t RESERVED0[1];

+  __IO uint32_t COMP1;                   /*!< Offset: 0x030 (R/W)  Comparator Register 1                     */

+  __IO uint32_t MASK1;                   /*!< Offset: 0x034 (R/W)  Mask Register 1                           */

+  __IO uint32_t FUNCTION1;               /*!< Offset: 0x038 (R/W)  Function Register 1                       */

+       uint32_t RESERVED1[1];

+  __IO uint32_t COMP2;                   /*!< Offset: 0x040 (R/W)  Comparator Register 2                     */

+  __IO uint32_t MASK2;                   /*!< Offset: 0x044 (R/W)  Mask Register 2                           */

+  __IO uint32_t FUNCTION2;               /*!< Offset: 0x048 (R/W)  Function Register 2                       */

+       uint32_t RESERVED2[1];

+  __IO uint32_t COMP3;                   /*!< Offset: 0x050 (R/W)  Comparator Register 3                     */

+  __IO uint32_t MASK3;                   /*!< Offset: 0x054 (R/W)  Mask Register 3                           */

+  __IO uint32_t FUNCTION3;               /*!< Offset: 0x058 (R/W)  Function Register 3                       */

+} DWT_Type;

+

+/* DWT Control Register Definitions */

+#define DWT_CTRL_NUMCOMP_Pos               28                                          /*!< DWT CTRL: NUMCOMP Position */

+#define DWT_CTRL_NUMCOMP_Msk               (0xFUL << DWT_CTRL_NUMCOMP_Pos)             /*!< DWT CTRL: NUMCOMP Mask */

+

+#define DWT_CTRL_NOTRCPKT_Pos              27                                          /*!< DWT CTRL: NOTRCPKT Position */

+#define DWT_CTRL_NOTRCPKT_Msk              (0x1UL << DWT_CTRL_NOTRCPKT_Pos)            /*!< DWT CTRL: NOTRCPKT Mask */

+

+#define DWT_CTRL_NOEXTTRIG_Pos             26                                          /*!< DWT CTRL: NOEXTTRIG Position */

+#define DWT_CTRL_NOEXTTRIG_Msk             (0x1UL << DWT_CTRL_NOEXTTRIG_Pos)           /*!< DWT CTRL: NOEXTTRIG Mask */

+

+#define DWT_CTRL_NOCYCCNT_Pos              25                                          /*!< DWT CTRL: NOCYCCNT Position */

+#define DWT_CTRL_NOCYCCNT_Msk              (0x1UL << DWT_CTRL_NOCYCCNT_Pos)            /*!< DWT CTRL: NOCYCCNT Mask */

+

+#define DWT_CTRL_NOPRFCNT_Pos              24                                          /*!< DWT CTRL: NOPRFCNT Position */

+#define DWT_CTRL_NOPRFCNT_Msk              (0x1UL << DWT_CTRL_NOPRFCNT_Pos)            /*!< DWT CTRL: NOPRFCNT Mask */

+

+#define DWT_CTRL_CYCEVTENA_Pos             22                                          /*!< DWT CTRL: CYCEVTENA Position */

+#define DWT_CTRL_CYCEVTENA_Msk             (0x1UL << DWT_CTRL_CYCEVTENA_Pos)           /*!< DWT CTRL: CYCEVTENA Mask */

+

+#define DWT_CTRL_FOLDEVTENA_Pos            21                                          /*!< DWT CTRL: FOLDEVTENA Position */

+#define DWT_CTRL_FOLDEVTENA_Msk            (0x1UL << DWT_CTRL_FOLDEVTENA_Pos)          /*!< DWT CTRL: FOLDEVTENA Mask */

+

+#define DWT_CTRL_LSUEVTENA_Pos             20                                          /*!< DWT CTRL: LSUEVTENA Position */

+#define DWT_CTRL_LSUEVTENA_Msk             (0x1UL << DWT_CTRL_LSUEVTENA_Pos)           /*!< DWT CTRL: LSUEVTENA Mask */

+

+#define DWT_CTRL_SLEEPEVTENA_Pos           19                                          /*!< DWT CTRL: SLEEPEVTENA Position */

+#define DWT_CTRL_SLEEPEVTENA_Msk           (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos)         /*!< DWT CTRL: SLEEPEVTENA Mask */

+

+#define DWT_CTRL_EXCEVTENA_Pos             18                                          /*!< DWT CTRL: EXCEVTENA Position */

+#define DWT_CTRL_EXCEVTENA_Msk             (0x1UL << DWT_CTRL_EXCEVTENA_Pos)           /*!< DWT CTRL: EXCEVTENA Mask */

+

+#define DWT_CTRL_CPIEVTENA_Pos             17                                          /*!< DWT CTRL: CPIEVTENA Position */

+#define DWT_CTRL_CPIEVTENA_Msk             (0x1UL << DWT_CTRL_CPIEVTENA_Pos)           /*!< DWT CTRL: CPIEVTENA Mask */

+

+#define DWT_CTRL_EXCTRCENA_Pos             16                                          /*!< DWT CTRL: EXCTRCENA Position */

+#define DWT_CTRL_EXCTRCENA_Msk             (0x1UL << DWT_CTRL_EXCTRCENA_Pos)           /*!< DWT CTRL: EXCTRCENA Mask */

+

+#define DWT_CTRL_PCSAMPLENA_Pos            12                                          /*!< DWT CTRL: PCSAMPLENA Position */

+#define DWT_CTRL_PCSAMPLENA_Msk            (0x1UL << DWT_CTRL_PCSAMPLENA_Pos)          /*!< DWT CTRL: PCSAMPLENA Mask */

+

+#define DWT_CTRL_SYNCTAP_Pos               10                                          /*!< DWT CTRL: SYNCTAP Position */

+#define DWT_CTRL_SYNCTAP_Msk               (0x3UL << DWT_CTRL_SYNCTAP_Pos)             /*!< DWT CTRL: SYNCTAP Mask */

+

+#define DWT_CTRL_CYCTAP_Pos                 9                                          /*!< DWT CTRL: CYCTAP Position */

+#define DWT_CTRL_CYCTAP_Msk                (0x1UL << DWT_CTRL_CYCTAP_Pos)              /*!< DWT CTRL: CYCTAP Mask */

+

+#define DWT_CTRL_POSTINIT_Pos               5                                          /*!< DWT CTRL: POSTINIT Position */

+#define DWT_CTRL_POSTINIT_Msk              (0xFUL << DWT_CTRL_POSTINIT_Pos)            /*!< DWT CTRL: POSTINIT Mask */

+

+#define DWT_CTRL_POSTPRESET_Pos             1                                          /*!< DWT CTRL: POSTPRESET Position */

+#define DWT_CTRL_POSTPRESET_Msk            (0xFUL << DWT_CTRL_POSTPRESET_Pos)          /*!< DWT CTRL: POSTPRESET Mask */

+

+#define DWT_CTRL_CYCCNTENA_Pos              0                                          /*!< DWT CTRL: CYCCNTENA Position */

+#define DWT_CTRL_CYCCNTENA_Msk             (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/)       /*!< DWT CTRL: CYCCNTENA Mask */

+

+/* DWT CPI Count Register Definitions */

+#define DWT_CPICNT_CPICNT_Pos               0                                          /*!< DWT CPICNT: CPICNT Position */

+#define DWT_CPICNT_CPICNT_Msk              (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/)       /*!< DWT CPICNT: CPICNT Mask */

+

+/* DWT Exception Overhead Count Register Definitions */

+#define DWT_EXCCNT_EXCCNT_Pos               0                                          /*!< DWT EXCCNT: EXCCNT Position */

+#define DWT_EXCCNT_EXCCNT_Msk              (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/)       /*!< DWT EXCCNT: EXCCNT Mask */

+

+/* DWT Sleep Count Register Definitions */

+#define DWT_SLEEPCNT_SLEEPCNT_Pos           0                                          /*!< DWT SLEEPCNT: SLEEPCNT Position */

+#define DWT_SLEEPCNT_SLEEPCNT_Msk          (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/)   /*!< DWT SLEEPCNT: SLEEPCNT Mask */

+

+/* DWT LSU Count Register Definitions */

+#define DWT_LSUCNT_LSUCNT_Pos               0                                          /*!< DWT LSUCNT: LSUCNT Position */

+#define DWT_LSUCNT_LSUCNT_Msk              (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/)       /*!< DWT LSUCNT: LSUCNT Mask */

+

+/* DWT Folded-instruction Count Register Definitions */

+#define DWT_FOLDCNT_FOLDCNT_Pos             0                                          /*!< DWT FOLDCNT: FOLDCNT Position */

+#define DWT_FOLDCNT_FOLDCNT_Msk            (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/)     /*!< DWT FOLDCNT: FOLDCNT Mask */

+

+/* DWT Comparator Mask Register Definitions */

+#define DWT_MASK_MASK_Pos                   0                                          /*!< DWT MASK: MASK Position */

+#define DWT_MASK_MASK_Msk                  (0x1FUL /*<< DWT_MASK_MASK_Pos*/)           /*!< DWT MASK: MASK Mask */

+

+/* DWT Comparator Function Register Definitions */

+#define DWT_FUNCTION_MATCHED_Pos           24                                          /*!< DWT FUNCTION: MATCHED Position */

+#define DWT_FUNCTION_MATCHED_Msk           (0x1UL << DWT_FUNCTION_MATCHED_Pos)         /*!< DWT FUNCTION: MATCHED Mask */

+

+#define DWT_FUNCTION_DATAVADDR1_Pos        16                                          /*!< DWT FUNCTION: DATAVADDR1 Position */

+#define DWT_FUNCTION_DATAVADDR1_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos)      /*!< DWT FUNCTION: DATAVADDR1 Mask */

+

+#define DWT_FUNCTION_DATAVADDR0_Pos        12                                          /*!< DWT FUNCTION: DATAVADDR0 Position */

+#define DWT_FUNCTION_DATAVADDR0_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos)      /*!< DWT FUNCTION: DATAVADDR0 Mask */

+

+#define DWT_FUNCTION_DATAVSIZE_Pos         10                                          /*!< DWT FUNCTION: DATAVSIZE Position */

+#define DWT_FUNCTION_DATAVSIZE_Msk         (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos)       /*!< DWT FUNCTION: DATAVSIZE Mask */

+

+#define DWT_FUNCTION_LNK1ENA_Pos            9                                          /*!< DWT FUNCTION: LNK1ENA Position */

+#define DWT_FUNCTION_LNK1ENA_Msk           (0x1UL << DWT_FUNCTION_LNK1ENA_Pos)         /*!< DWT FUNCTION: LNK1ENA Mask */

+

+#define DWT_FUNCTION_DATAVMATCH_Pos         8                                          /*!< DWT FUNCTION: DATAVMATCH Position */

+#define DWT_FUNCTION_DATAVMATCH_Msk        (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos)      /*!< DWT FUNCTION: DATAVMATCH Mask */

+

+#define DWT_FUNCTION_CYCMATCH_Pos           7                                          /*!< DWT FUNCTION: CYCMATCH Position */

+#define DWT_FUNCTION_CYCMATCH_Msk          (0x1UL << DWT_FUNCTION_CYCMATCH_Pos)        /*!< DWT FUNCTION: CYCMATCH Mask */

+

+#define DWT_FUNCTION_EMITRANGE_Pos          5                                          /*!< DWT FUNCTION: EMITRANGE Position */

+#define DWT_FUNCTION_EMITRANGE_Msk         (0x1UL << DWT_FUNCTION_EMITRANGE_Pos)       /*!< DWT FUNCTION: EMITRANGE Mask */

+

+#define DWT_FUNCTION_FUNCTION_Pos           0                                          /*!< DWT FUNCTION: FUNCTION Position */

+#define DWT_FUNCTION_FUNCTION_Msk          (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/)    /*!< DWT FUNCTION: FUNCTION Mask */

+

+/*@}*/ /* end of group CMSIS_DWT */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_TPI     Trace Port Interface (TPI)

+    \brief      Type definitions for the Trace Port Interface (TPI)

+  @{

+ */

+

+/** \brief  Structure type to access the Trace Port Interface Register (TPI).

+ */

+typedef struct

+{

+  __IO uint32_t SSPSR;                   /*!< Offset: 0x000 (R/ )  Supported Parallel Port Size Register     */

+  __IO uint32_t CSPSR;                   /*!< Offset: 0x004 (R/W)  Current Parallel Port Size Register */

+       uint32_t RESERVED0[2];

+  __IO uint32_t ACPR;                    /*!< Offset: 0x010 (R/W)  Asynchronous Clock Prescaler Register */

+       uint32_t RESERVED1[55];

+  __IO uint32_t SPPR;                    /*!< Offset: 0x0F0 (R/W)  Selected Pin Protocol Register */

+       uint32_t RESERVED2[131];

+  __I  uint32_t FFSR;                    /*!< Offset: 0x300 (R/ )  Formatter and Flush Status Register */

+  __IO uint32_t FFCR;                    /*!< Offset: 0x304 (R/W)  Formatter and Flush Control Register */

+  __I  uint32_t FSCR;                    /*!< Offset: 0x308 (R/ )  Formatter Synchronization Counter Register */

+       uint32_t RESERVED3[759];

+  __I  uint32_t TRIGGER;                 /*!< Offset: 0xEE8 (R/ )  TRIGGER */

+  __I  uint32_t FIFO0;                   /*!< Offset: 0xEEC (R/ )  Integration ETM Data */

+  __I  uint32_t ITATBCTR2;               /*!< Offset: 0xEF0 (R/ )  ITATBCTR2 */

+       uint32_t RESERVED4[1];

+  __I  uint32_t ITATBCTR0;               /*!< Offset: 0xEF8 (R/ )  ITATBCTR0 */

+  __I  uint32_t FIFO1;                   /*!< Offset: 0xEFC (R/ )  Integration ITM Data */

+  __IO uint32_t ITCTRL;                  /*!< Offset: 0xF00 (R/W)  Integration Mode Control */

+       uint32_t RESERVED5[39];

+  __IO uint32_t CLAIMSET;                /*!< Offset: 0xFA0 (R/W)  Claim tag set */

+  __IO uint32_t CLAIMCLR;                /*!< Offset: 0xFA4 (R/W)  Claim tag clear */

+       uint32_t RESERVED7[8];

+  __I  uint32_t DEVID;                   /*!< Offset: 0xFC8 (R/ )  TPIU_DEVID */

+  __I  uint32_t DEVTYPE;                 /*!< Offset: 0xFCC (R/ )  TPIU_DEVTYPE */

+} TPI_Type;

+

+/* TPI Asynchronous Clock Prescaler Register Definitions */

+#define TPI_ACPR_PRESCALER_Pos              0                                          /*!< TPI ACPR: PRESCALER Position */

+#define TPI_ACPR_PRESCALER_Msk             (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/)    /*!< TPI ACPR: PRESCALER Mask */

+

+/* TPI Selected Pin Protocol Register Definitions */

+#define TPI_SPPR_TXMODE_Pos                 0                                          /*!< TPI SPPR: TXMODE Position */

+#define TPI_SPPR_TXMODE_Msk                (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/)          /*!< TPI SPPR: TXMODE Mask */

+

+/* TPI Formatter and Flush Status Register Definitions */

+#define TPI_FFSR_FtNonStop_Pos              3                                          /*!< TPI FFSR: FtNonStop Position */

+#define TPI_FFSR_FtNonStop_Msk             (0x1UL << TPI_FFSR_FtNonStop_Pos)           /*!< TPI FFSR: FtNonStop Mask */

+

+#define TPI_FFSR_TCPresent_Pos              2                                          /*!< TPI FFSR: TCPresent Position */

+#define TPI_FFSR_TCPresent_Msk             (0x1UL << TPI_FFSR_TCPresent_Pos)           /*!< TPI FFSR: TCPresent Mask */

+

+#define TPI_FFSR_FtStopped_Pos              1                                          /*!< TPI FFSR: FtStopped Position */

+#define TPI_FFSR_FtStopped_Msk             (0x1UL << TPI_FFSR_FtStopped_Pos)           /*!< TPI FFSR: FtStopped Mask */

+

+#define TPI_FFSR_FlInProg_Pos               0                                          /*!< TPI FFSR: FlInProg Position */

+#define TPI_FFSR_FlInProg_Msk              (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/)        /*!< TPI FFSR: FlInProg Mask */

+

+/* TPI Formatter and Flush Control Register Definitions */

+#define TPI_FFCR_TrigIn_Pos                 8                                          /*!< TPI FFCR: TrigIn Position */

+#define TPI_FFCR_TrigIn_Msk                (0x1UL << TPI_FFCR_TrigIn_Pos)              /*!< TPI FFCR: TrigIn Mask */

+

+#define TPI_FFCR_EnFCont_Pos                1                                          /*!< TPI FFCR: EnFCont Position */

+#define TPI_FFCR_EnFCont_Msk               (0x1UL << TPI_FFCR_EnFCont_Pos)             /*!< TPI FFCR: EnFCont Mask */

+

+/* TPI TRIGGER Register Definitions */

+#define TPI_TRIGGER_TRIGGER_Pos             0                                          /*!< TPI TRIGGER: TRIGGER Position */

+#define TPI_TRIGGER_TRIGGER_Msk            (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/)      /*!< TPI TRIGGER: TRIGGER Mask */

+

+/* TPI Integration ETM Data Register Definitions (FIFO0) */

+#define TPI_FIFO0_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO0: ITM_ATVALID Position */

+#define TPI_FIFO0_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos)        /*!< TPI FIFO0: ITM_ATVALID Mask */

+

+#define TPI_FIFO0_ITM_bytecount_Pos        27                                          /*!< TPI FIFO0: ITM_bytecount Position */

+#define TPI_FIFO0_ITM_bytecount_Msk        (0x3UL << TPI_FIFO0_ITM_bytecount_Pos)      /*!< TPI FIFO0: ITM_bytecount Mask */

+

+#define TPI_FIFO0_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO0: ETM_ATVALID Position */

+#define TPI_FIFO0_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos)        /*!< TPI FIFO0: ETM_ATVALID Mask */

+

+#define TPI_FIFO0_ETM_bytecount_Pos        24                                          /*!< TPI FIFO0: ETM_bytecount Position */

+#define TPI_FIFO0_ETM_bytecount_Msk        (0x3UL << TPI_FIFO0_ETM_bytecount_Pos)      /*!< TPI FIFO0: ETM_bytecount Mask */

+

+#define TPI_FIFO0_ETM2_Pos                 16                                          /*!< TPI FIFO0: ETM2 Position */

+#define TPI_FIFO0_ETM2_Msk                 (0xFFUL << TPI_FIFO0_ETM2_Pos)              /*!< TPI FIFO0: ETM2 Mask */

+

+#define TPI_FIFO0_ETM1_Pos                  8                                          /*!< TPI FIFO0: ETM1 Position */

+#define TPI_FIFO0_ETM1_Msk                 (0xFFUL << TPI_FIFO0_ETM1_Pos)              /*!< TPI FIFO0: ETM1 Mask */

+

+#define TPI_FIFO0_ETM0_Pos                  0                                          /*!< TPI FIFO0: ETM0 Position */

+#define TPI_FIFO0_ETM0_Msk                 (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/)          /*!< TPI FIFO0: ETM0 Mask */

+

+/* TPI ITATBCTR2 Register Definitions */

+#define TPI_ITATBCTR2_ATREADY_Pos           0                                          /*!< TPI ITATBCTR2: ATREADY Position */

+#define TPI_ITATBCTR2_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR2_ATREADY_Pos*/)    /*!< TPI ITATBCTR2: ATREADY Mask */

+

+/* TPI Integration ITM Data Register Definitions (FIFO1) */

+#define TPI_FIFO1_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO1: ITM_ATVALID Position */

+#define TPI_FIFO1_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos)        /*!< TPI FIFO1: ITM_ATVALID Mask */

+

+#define TPI_FIFO1_ITM_bytecount_Pos        27                                          /*!< TPI FIFO1: ITM_bytecount Position */

+#define TPI_FIFO1_ITM_bytecount_Msk        (0x3UL << TPI_FIFO1_ITM_bytecount_Pos)      /*!< TPI FIFO1: ITM_bytecount Mask */

+

+#define TPI_FIFO1_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO1: ETM_ATVALID Position */

+#define TPI_FIFO1_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos)        /*!< TPI FIFO1: ETM_ATVALID Mask */

+

+#define TPI_FIFO1_ETM_bytecount_Pos        24                                          /*!< TPI FIFO1: ETM_bytecount Position */

+#define TPI_FIFO1_ETM_bytecount_Msk        (0x3UL << TPI_FIFO1_ETM_bytecount_Pos)      /*!< TPI FIFO1: ETM_bytecount Mask */

+

+#define TPI_FIFO1_ITM2_Pos                 16                                          /*!< TPI FIFO1: ITM2 Position */

+#define TPI_FIFO1_ITM2_Msk                 (0xFFUL << TPI_FIFO1_ITM2_Pos)              /*!< TPI FIFO1: ITM2 Mask */

+

+#define TPI_FIFO1_ITM1_Pos                  8                                          /*!< TPI FIFO1: ITM1 Position */

+#define TPI_FIFO1_ITM1_Msk                 (0xFFUL << TPI_FIFO1_ITM1_Pos)              /*!< TPI FIFO1: ITM1 Mask */

+

+#define TPI_FIFO1_ITM0_Pos                  0                                          /*!< TPI FIFO1: ITM0 Position */

+#define TPI_FIFO1_ITM0_Msk                 (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/)          /*!< TPI FIFO1: ITM0 Mask */

+

+/* TPI ITATBCTR0 Register Definitions */

+#define TPI_ITATBCTR0_ATREADY_Pos           0                                          /*!< TPI ITATBCTR0: ATREADY Position */

+#define TPI_ITATBCTR0_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR0_ATREADY_Pos*/)    /*!< TPI ITATBCTR0: ATREADY Mask */

+

+/* TPI Integration Mode Control Register Definitions */

+#define TPI_ITCTRL_Mode_Pos                 0                                          /*!< TPI ITCTRL: Mode Position */

+#define TPI_ITCTRL_Mode_Msk                (0x1UL /*<< TPI_ITCTRL_Mode_Pos*/)          /*!< TPI ITCTRL: Mode Mask */

+

+/* TPI DEVID Register Definitions */

+#define TPI_DEVID_NRZVALID_Pos             11                                          /*!< TPI DEVID: NRZVALID Position */

+#define TPI_DEVID_NRZVALID_Msk             (0x1UL << TPI_DEVID_NRZVALID_Pos)           /*!< TPI DEVID: NRZVALID Mask */

+

+#define TPI_DEVID_MANCVALID_Pos            10                                          /*!< TPI DEVID: MANCVALID Position */

+#define TPI_DEVID_MANCVALID_Msk            (0x1UL << TPI_DEVID_MANCVALID_Pos)          /*!< TPI DEVID: MANCVALID Mask */

+

+#define TPI_DEVID_PTINVALID_Pos             9                                          /*!< TPI DEVID: PTINVALID Position */

+#define TPI_DEVID_PTINVALID_Msk            (0x1UL << TPI_DEVID_PTINVALID_Pos)          /*!< TPI DEVID: PTINVALID Mask */

+

+#define TPI_DEVID_MinBufSz_Pos              6                                          /*!< TPI DEVID: MinBufSz Position */

+#define TPI_DEVID_MinBufSz_Msk             (0x7UL << TPI_DEVID_MinBufSz_Pos)           /*!< TPI DEVID: MinBufSz Mask */

+

+#define TPI_DEVID_AsynClkIn_Pos             5                                          /*!< TPI DEVID: AsynClkIn Position */

+#define TPI_DEVID_AsynClkIn_Msk            (0x1UL << TPI_DEVID_AsynClkIn_Pos)          /*!< TPI DEVID: AsynClkIn Mask */

+

+#define TPI_DEVID_NrTraceInput_Pos          0                                          /*!< TPI DEVID: NrTraceInput Position */

+#define TPI_DEVID_NrTraceInput_Msk         (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/)  /*!< TPI DEVID: NrTraceInput Mask */

+

+/* TPI DEVTYPE Register Definitions */

+#define TPI_DEVTYPE_MajorType_Pos           4                                          /*!< TPI DEVTYPE: MajorType Position */

+#define TPI_DEVTYPE_MajorType_Msk          (0xFUL << TPI_DEVTYPE_MajorType_Pos)        /*!< TPI DEVTYPE: MajorType Mask */

+

+#define TPI_DEVTYPE_SubType_Pos             0                                          /*!< TPI DEVTYPE: SubType Position */

+#define TPI_DEVTYPE_SubType_Msk            (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/)      /*!< TPI DEVTYPE: SubType Mask */

+

+/*@}*/ /* end of group CMSIS_TPI */

+

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+  __IO uint32_t RBAR_A1;                 /*!< Offset: 0x014 (R/W)  MPU Alias 1 Region Base Address Register       */

+  __IO uint32_t RASR_A1;                 /*!< Offset: 0x018 (R/W)  MPU Alias 1 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A2;                 /*!< Offset: 0x01C (R/W)  MPU Alias 2 Region Base Address Register       */

+  __IO uint32_t RASR_A2;                 /*!< Offset: 0x020 (R/W)  MPU Alias 2 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A3;                 /*!< Offset: 0x024 (R/W)  MPU Alias 3 Region Base Address Register       */

+  __IO uint32_t RASR_A3;                 /*!< Offset: 0x028 (R/W)  MPU Alias 3 Region Attribute and Size Register */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   5                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos)             /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Type definitions for the Core Debug Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Core Debug Register (CoreDebug).

+ */

+typedef struct

+{

+  __IO uint32_t DHCSR;                   /*!< Offset: 0x000 (R/W)  Debug Halting Control and Status Register    */

+  __O  uint32_t DCRSR;                   /*!< Offset: 0x004 ( /W)  Debug Core Register Selector Register        */

+  __IO uint32_t DCRDR;                   /*!< Offset: 0x008 (R/W)  Debug Core Register Data Register            */

+  __IO uint32_t DEMCR;                   /*!< Offset: 0x00C (R/W)  Debug Exception and Monitor Control Register */

+} CoreDebug_Type;

+

+/* Debug Halting Control and Status Register */

+#define CoreDebug_DHCSR_DBGKEY_Pos         16                                             /*!< CoreDebug DHCSR: DBGKEY Position */

+#define CoreDebug_DHCSR_DBGKEY_Msk         (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos)       /*!< CoreDebug DHCSR: DBGKEY Mask */

+

+#define CoreDebug_DHCSR_S_RESET_ST_Pos     25                                             /*!< CoreDebug DHCSR: S_RESET_ST Position */

+#define CoreDebug_DHCSR_S_RESET_ST_Msk     (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos)        /*!< CoreDebug DHCSR: S_RESET_ST Mask */

+

+#define CoreDebug_DHCSR_S_RETIRE_ST_Pos    24                                             /*!< CoreDebug DHCSR: S_RETIRE_ST Position */

+#define CoreDebug_DHCSR_S_RETIRE_ST_Msk    (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos)       /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */

+

+#define CoreDebug_DHCSR_S_LOCKUP_Pos       19                                             /*!< CoreDebug DHCSR: S_LOCKUP Position */

+#define CoreDebug_DHCSR_S_LOCKUP_Msk       (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos)          /*!< CoreDebug DHCSR: S_LOCKUP Mask */

+

+#define CoreDebug_DHCSR_S_SLEEP_Pos        18                                             /*!< CoreDebug DHCSR: S_SLEEP Position */

+#define CoreDebug_DHCSR_S_SLEEP_Msk        (1UL << CoreDebug_DHCSR_S_SLEEP_Pos)           /*!< CoreDebug DHCSR: S_SLEEP Mask */

+

+#define CoreDebug_DHCSR_S_HALT_Pos         17                                             /*!< CoreDebug DHCSR: S_HALT Position */

+#define CoreDebug_DHCSR_S_HALT_Msk         (1UL << CoreDebug_DHCSR_S_HALT_Pos)            /*!< CoreDebug DHCSR: S_HALT Mask */

+

+#define CoreDebug_DHCSR_S_REGRDY_Pos       16                                             /*!< CoreDebug DHCSR: S_REGRDY Position */

+#define CoreDebug_DHCSR_S_REGRDY_Msk       (1UL << CoreDebug_DHCSR_S_REGRDY_Pos)          /*!< CoreDebug DHCSR: S_REGRDY Mask */

+

+#define CoreDebug_DHCSR_C_SNAPSTALL_Pos     5                                             /*!< CoreDebug DHCSR: C_SNAPSTALL Position */

+#define CoreDebug_DHCSR_C_SNAPSTALL_Msk    (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos)       /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */

+

+#define CoreDebug_DHCSR_C_MASKINTS_Pos      3                                             /*!< CoreDebug DHCSR: C_MASKINTS Position */

+#define CoreDebug_DHCSR_C_MASKINTS_Msk     (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos)        /*!< CoreDebug DHCSR: C_MASKINTS Mask */

+

+#define CoreDebug_DHCSR_C_STEP_Pos          2                                             /*!< CoreDebug DHCSR: C_STEP Position */

+#define CoreDebug_DHCSR_C_STEP_Msk         (1UL << CoreDebug_DHCSR_C_STEP_Pos)            /*!< CoreDebug DHCSR: C_STEP Mask */

+

+#define CoreDebug_DHCSR_C_HALT_Pos          1                                             /*!< CoreDebug DHCSR: C_HALT Position */

+#define CoreDebug_DHCSR_C_HALT_Msk         (1UL << CoreDebug_DHCSR_C_HALT_Pos)            /*!< CoreDebug DHCSR: C_HALT Mask */

+

+#define CoreDebug_DHCSR_C_DEBUGEN_Pos       0                                             /*!< CoreDebug DHCSR: C_DEBUGEN Position */

+#define CoreDebug_DHCSR_C_DEBUGEN_Msk      (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/)     /*!< CoreDebug DHCSR: C_DEBUGEN Mask */

+

+/* Debug Core Register Selector Register */

+#define CoreDebug_DCRSR_REGWnR_Pos         16                                             /*!< CoreDebug DCRSR: REGWnR Position */

+#define CoreDebug_DCRSR_REGWnR_Msk         (1UL << CoreDebug_DCRSR_REGWnR_Pos)            /*!< CoreDebug DCRSR: REGWnR Mask */

+

+#define CoreDebug_DCRSR_REGSEL_Pos          0                                             /*!< CoreDebug DCRSR: REGSEL Position */

+#define CoreDebug_DCRSR_REGSEL_Msk         (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/)     /*!< CoreDebug DCRSR: REGSEL Mask */

+

+/* Debug Exception and Monitor Control Register */

+#define CoreDebug_DEMCR_TRCENA_Pos         24                                             /*!< CoreDebug DEMCR: TRCENA Position */

+#define CoreDebug_DEMCR_TRCENA_Msk         (1UL << CoreDebug_DEMCR_TRCENA_Pos)            /*!< CoreDebug DEMCR: TRCENA Mask */

+

+#define CoreDebug_DEMCR_MON_REQ_Pos        19                                             /*!< CoreDebug DEMCR: MON_REQ Position */

+#define CoreDebug_DEMCR_MON_REQ_Msk        (1UL << CoreDebug_DEMCR_MON_REQ_Pos)           /*!< CoreDebug DEMCR: MON_REQ Mask */

+

+#define CoreDebug_DEMCR_MON_STEP_Pos       18                                             /*!< CoreDebug DEMCR: MON_STEP Position */

+#define CoreDebug_DEMCR_MON_STEP_Msk       (1UL << CoreDebug_DEMCR_MON_STEP_Pos)          /*!< CoreDebug DEMCR: MON_STEP Mask */

+

+#define CoreDebug_DEMCR_MON_PEND_Pos       17                                             /*!< CoreDebug DEMCR: MON_PEND Position */

+#define CoreDebug_DEMCR_MON_PEND_Msk       (1UL << CoreDebug_DEMCR_MON_PEND_Pos)          /*!< CoreDebug DEMCR: MON_PEND Mask */

+

+#define CoreDebug_DEMCR_MON_EN_Pos         16                                             /*!< CoreDebug DEMCR: MON_EN Position */

+#define CoreDebug_DEMCR_MON_EN_Msk         (1UL << CoreDebug_DEMCR_MON_EN_Pos)            /*!< CoreDebug DEMCR: MON_EN Mask */

+

+#define CoreDebug_DEMCR_VC_HARDERR_Pos     10                                             /*!< CoreDebug DEMCR: VC_HARDERR Position */

+#define CoreDebug_DEMCR_VC_HARDERR_Msk     (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos)        /*!< CoreDebug DEMCR: VC_HARDERR Mask */

+

+#define CoreDebug_DEMCR_VC_INTERR_Pos       9                                             /*!< CoreDebug DEMCR: VC_INTERR Position */

+#define CoreDebug_DEMCR_VC_INTERR_Msk      (1UL << CoreDebug_DEMCR_VC_INTERR_Pos)         /*!< CoreDebug DEMCR: VC_INTERR Mask */

+

+#define CoreDebug_DEMCR_VC_BUSERR_Pos       8                                             /*!< CoreDebug DEMCR: VC_BUSERR Position */

+#define CoreDebug_DEMCR_VC_BUSERR_Msk      (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos)         /*!< CoreDebug DEMCR: VC_BUSERR Mask */

+

+#define CoreDebug_DEMCR_VC_STATERR_Pos      7                                             /*!< CoreDebug DEMCR: VC_STATERR Position */

+#define CoreDebug_DEMCR_VC_STATERR_Msk     (1UL << CoreDebug_DEMCR_VC_STATERR_Pos)        /*!< CoreDebug DEMCR: VC_STATERR Mask */

+

+#define CoreDebug_DEMCR_VC_CHKERR_Pos       6                                             /*!< CoreDebug DEMCR: VC_CHKERR Position */

+#define CoreDebug_DEMCR_VC_CHKERR_Msk      (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos)         /*!< CoreDebug DEMCR: VC_CHKERR Mask */

+

+#define CoreDebug_DEMCR_VC_NOCPERR_Pos      5                                             /*!< CoreDebug DEMCR: VC_NOCPERR Position */

+#define CoreDebug_DEMCR_VC_NOCPERR_Msk     (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos)        /*!< CoreDebug DEMCR: VC_NOCPERR Mask */

+

+#define CoreDebug_DEMCR_VC_MMERR_Pos        4                                             /*!< CoreDebug DEMCR: VC_MMERR Position */

+#define CoreDebug_DEMCR_VC_MMERR_Msk       (1UL << CoreDebug_DEMCR_VC_MMERR_Pos)          /*!< CoreDebug DEMCR: VC_MMERR Mask */

+

+#define CoreDebug_DEMCR_VC_CORERESET_Pos    0                                             /*!< CoreDebug DEMCR: VC_CORERESET Position */

+#define CoreDebug_DEMCR_VC_CORERESET_Msk   (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/)  /*!< CoreDebug DEMCR: VC_CORERESET Mask */

+

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M3 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address  */

+#define ITM_BASE            (0xE0000000UL)                            /*!< ITM Base Address                   */

+#define DWT_BASE            (0xE0001000UL)                            /*!< DWT Base Address                   */

+#define TPI_BASE            (0xE0040000UL)                            /*!< TPI Base Address                   */

+#define CoreDebug_BASE      (0xE000EDF0UL)                            /*!< Core Debug Base Address            */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address               */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                  */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address  */

+

+#define SCnSCB              ((SCnSCB_Type    *)     SCS_BASE      )   /*!< System control Register not in SCB */

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+#define ITM                 ((ITM_Type       *)     ITM_BASE      )   /*!< ITM configuration struct           */

+#define DWT                 ((DWT_Type       *)     DWT_BASE      )   /*!< DWT configuration struct           */

+#define TPI                 ((TPI_Type       *)     TPI_BASE      )   /*!< TPI configuration struct           */

+#define CoreDebug           ((CoreDebug_Type *)     CoreDebug_BASE)   /*!< Core Debug configuration struct    */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Debug Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/** \brief  Set Priority Grouping

+

+  The function sets the priority grouping field using the required unlock sequence.

+  The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field.

+  Only values from 0..7 are used.

+  In case of a conflict between priority grouping and available

+  priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]      PriorityGroup  Priority grouping field.

+ */

+__STATIC_INLINE void NVIC_SetPriorityGrouping(uint32_t PriorityGroup)

+{

+  uint32_t reg_value;

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);             /* only values 0..7 are used          */

+

+  reg_value  =  SCB->AIRCR;                                                   /* read old register configuration    */

+  reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk));             /* clear bits to change               */

+  reg_value  =  (reg_value                                   |

+                ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                (PriorityGroupTmp << 8)                       );              /* Insert write key and priorty group */

+  SCB->AIRCR =  reg_value;

+}

+

+

+/** \brief  Get Priority Grouping

+

+  The function reads the priority grouping field from the NVIC Interrupt Controller.

+

+    \return                Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field).

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriorityGrouping(void)

+{

+  return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos));

+}

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Active Interrupt

+

+    The function reads the active register in NVIC and returns the active bit.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not active.

+    \return             1  Interrupt status is active.

+ */

+__STATIC_INLINE uint32_t NVIC_GetActive(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->IABR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)IRQn < 0) {

+    SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+  else {

+    NVIC->IP[((uint32_t)(int32_t)IRQn)]               = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)IRQn < 0) {

+    return(((uint32_t)SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return(((uint32_t)NVIC->IP[((uint32_t)(int32_t)IRQn)]               >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  Encode Priority

+

+    The function encodes the priority for an interrupt with the given priority group,

+    preemptive priority value, and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]     PriorityGroup  Used priority group.

+    \param [in]   PreemptPriority  Preemptive priority value (starting from 0).

+    \param [in]       SubPriority  Subpriority value (starting from 0).

+    \return                        Encoded priority. Value can be used in the function \ref NVIC_SetPriority().

+ */

+__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  return (

+           ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |

+           ((SubPriority     & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL)))

+         );

+}

+

+

+/** \brief  Decode Priority

+

+    The function decodes an interrupt priority value with a given priority group to

+    preemptive priority value and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.

+

+    \param [in]         Priority   Priority value, which can be retrieved with the function \ref NVIC_GetPriority().

+    \param [in]     PriorityGroup  Used priority group.

+    \param [out] pPreemptPriority  Preemptive priority value (starting from 0).

+    \param [out]     pSubPriority  Subpriority value (starting from 0).

+ */

+__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);

+  *pSubPriority     = (Priority                   ) & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL);

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                          /* Ensure all outstanding memory accesses included

+                                                                       buffered write are completed before reset */

+  SCB->AIRCR  = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos)    |

+                           (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) |

+                            SCB_AIRCR_SYSRESETREQ_Msk    );         /* Keep priority group unchanged */

+  __DSB();                                                          /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                             /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) { return (1UL); }    /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+/* ##################################### Debug In/Output function ########################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_core_DebugFunctions ITM Functions

+    \brief   Functions that access the ITM debug interface.

+  @{

+ */

+

+extern volatile int32_t ITM_RxBuffer;                    /*!< External variable to receive characters.                         */

+#define                 ITM_RXBUFFER_EMPTY    0x5AA55AA5 /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */

+

+

+/** \brief  ITM Send Character

+

+    The function transmits a character via the ITM channel 0, and

+    \li Just returns when no debugger is connected that has booked the output.

+    \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.

+

+    \param [in]     ch  Character to transmit.

+

+    \returns            Character to transmit.

+ */

+__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch)

+{

+  if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) &&      /* ITM enabled */

+      ((ITM->TER & 1UL               ) != 0UL)   )     /* ITM Port #0 enabled */

+  {

+    while (ITM->PORT[0].u32 == 0UL) { __NOP(); }

+    ITM->PORT[0].u8 = (uint8_t)ch;

+  }

+  return (ch);

+}

+

+

+/** \brief  ITM Receive Character

+

+    The function inputs a character via the external variable \ref ITM_RxBuffer.

+

+    \return             Received character.

+    \return         -1  No character pending.

+ */

+__STATIC_INLINE int32_t ITM_ReceiveChar (void) {

+  int32_t ch = -1;                           /* no character available */

+

+  if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) {

+    ch = ITM_RxBuffer;

+    ITM_RxBuffer = ITM_RXBUFFER_EMPTY;       /* ready for next character */

+  }

+

+  return (ch);

+}

+

+

+/** \brief  ITM Check Character

+

+    The function checks whether a character is pending for reading in the variable \ref ITM_RxBuffer.

+

+    \return          0  No character available.

+    \return          1  Character available.

+ */

+__STATIC_INLINE int32_t ITM_CheckChar (void) {

+

+  if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) {

+    return (0);                                 /* no character available */

+  } else {

+    return (1);                                 /*    character available */

+  }

+}

+

+/*@} end of CMSIS_core_DebugFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM3_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4.h
new file mode 100644
index 0000000..544d414
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4.h
@@ -0,0 +1,1858 @@
+/**************************************************************************//**

+ * @file     core_cm4.h

+ * @brief    CMSIS Cortex-M4 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CM4_H_GENERIC

+#define __CORE_CM4_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup Cortex_M4

+  @{

+ */

+

+/*  CMSIS CM4 definitions */

+#define __CM4_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version   */

+#define __CM4_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version    */

+#define __CM4_CMSIS_VERSION       ((__CM4_CMSIS_VERSION_MAIN << 16) | \

+                                    __CM4_CMSIS_VERSION_SUB          )     /*!< CMSIS HAL version number         */

+

+#define __CORTEX_M                (0x04)                                   /*!< Cortex-M Core                    */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions.

+*/

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI_VFP_SUPPORT__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+#include <core_cmSimd.h>                 /* Compiler specific SIMD Intrinsics               */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM4_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_CM4_H_DEPENDANT

+#define __CORE_CM4_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __CM4_REV

+    #define __CM4_REV               0x0000

+    #warning "__CM4_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __FPU_PRESENT

+    #define __FPU_PRESENT             0

+    #warning "__FPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          4

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group Cortex_M4 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core Debug Register

+  - Core MPU Register

+  - Core FPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:16;              /*!< bit:  0..15  Reserved                           */

+    uint32_t GE:4;                       /*!< bit: 16..19  Greater than or Equal flags        */

+    uint32_t _reserved1:7;               /*!< bit: 20..26  Reserved                           */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+#define APSR_Q_Pos                         27                                             /*!< APSR: Q Position */

+#define APSR_Q_Msk                         (1UL << APSR_Q_Pos)                            /*!< APSR: Q Mask */

+

+#define APSR_GE_Pos                        16                                             /*!< APSR: GE Position */

+#define APSR_GE_Msk                        (0xFUL << APSR_GE_Pos)                         /*!< APSR: GE Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:7;               /*!< bit:  9..15  Reserved                           */

+    uint32_t GE:4;                       /*!< bit: 16..19  Greater than or Equal flags        */

+    uint32_t _reserved1:4;               /*!< bit: 20..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t IT:2;                       /*!< bit: 25..26  saved IT state   (read 0)          */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_Q_Pos                         27                                             /*!< xPSR: Q Position */

+#define xPSR_Q_Msk                         (1UL << xPSR_Q_Pos)                            /*!< xPSR: Q Mask */

+

+#define xPSR_IT_Pos                        25                                             /*!< xPSR: IT Position */

+#define xPSR_IT_Msk                        (3UL << xPSR_IT_Pos)                           /*!< xPSR: IT Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_GE_Pos                        16                                             /*!< xPSR: GE Position */

+#define xPSR_GE_Msk                        (0xFUL << xPSR_GE_Pos)                         /*!< xPSR: GE Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t nPRIV:1;                    /*!< bit:      0  Execution privilege in Thread mode */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t FPCA:1;                     /*!< bit:      2  FP extension active flag           */

+    uint32_t _reserved0:29;              /*!< bit:  3..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_FPCA_Pos                    2                                             /*!< CONTROL: FPCA Position */

+#define CONTROL_FPCA_Msk                   (1UL << CONTROL_FPCA_Pos)                      /*!< CONTROL: FPCA Mask */

+

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+#define CONTROL_nPRIV_Pos                   0                                             /*!< CONTROL: nPRIV Position */

+#define CONTROL_nPRIV_Msk                  (1UL /*<< CONTROL_nPRIV_Pos*/)                 /*!< CONTROL: nPRIV Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[8];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[24];

+  __IO uint32_t ICER[8];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register         */

+       uint32_t RSERVED1[24];

+  __IO uint32_t ISPR[8];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register          */

+       uint32_t RESERVED2[24];

+  __IO uint32_t ICPR[8];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register        */

+       uint32_t RESERVED3[24];

+  __IO uint32_t IABR[8];                 /*!< Offset: 0x200 (R/W)  Interrupt Active bit Register           */

+       uint32_t RESERVED4[56];

+  __IO uint8_t  IP[240];                 /*!< Offset: 0x300 (R/W)  Interrupt Priority Register (8Bit wide) */

+       uint32_t RESERVED5[644];

+  __O  uint32_t STIR;                    /*!< Offset: 0xE00 ( /W)  Software Trigger Interrupt Register     */

+}  NVIC_Type;

+

+/* Software Triggered Interrupt Register Definitions */

+#define NVIC_STIR_INTID_Pos                 0                                          /*!< STIR: INTLINESNUM Position */

+#define NVIC_STIR_INTID_Msk                (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/)        /*!< STIR: INTLINESNUM Mask */

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+  __IO uint8_t  SHP[12];                 /*!< Offset: 0x018 (R/W)  System Handlers Priority Registers (4-7, 8-11, 12-15) */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+  __IO uint32_t CFSR;                    /*!< Offset: 0x028 (R/W)  Configurable Fault Status Register                    */

+  __IO uint32_t HFSR;                    /*!< Offset: 0x02C (R/W)  HardFault Status Register                             */

+  __IO uint32_t DFSR;                    /*!< Offset: 0x030 (R/W)  Debug Fault Status Register                           */

+  __IO uint32_t MMFAR;                   /*!< Offset: 0x034 (R/W)  MemManage Fault Address Register                      */

+  __IO uint32_t BFAR;                    /*!< Offset: 0x038 (R/W)  BusFault Address Register                             */

+  __IO uint32_t AFSR;                    /*!< Offset: 0x03C (R/W)  Auxiliary Fault Status Register                       */

+  __I  uint32_t PFR[2];                  /*!< Offset: 0x040 (R/ )  Processor Feature Register                            */

+  __I  uint32_t DFR;                     /*!< Offset: 0x048 (R/ )  Debug Feature Register                                */

+  __I  uint32_t ADR;                     /*!< Offset: 0x04C (R/ )  Auxiliary Feature Register                            */

+  __I  uint32_t MMFR[4];                 /*!< Offset: 0x050 (R/ )  Memory Model Feature Register                         */

+  __I  uint32_t ISAR[5];                 /*!< Offset: 0x060 (R/ )  Instruction Set Attributes Register                   */

+       uint32_t RESERVED0[5];

+  __IO uint32_t CPACR;                   /*!< Offset: 0x088 (R/W)  Coprocessor Access Control Register                   */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_RETTOBASE_Pos             11                                             /*!< SCB ICSR: RETTOBASE Position */

+#define SCB_ICSR_RETTOBASE_Msk             (1UL << SCB_ICSR_RETTOBASE_Pos)                /*!< SCB ICSR: RETTOBASE Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Vector Table Offset Register Definitions */

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos)           /*!< SCB VTOR: TBLOFF Mask */

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_PRIGROUP_Pos              8                                             /*!< SCB AIRCR: PRIGROUP Position */

+#define SCB_AIRCR_PRIGROUP_Msk             (7UL << SCB_AIRCR_PRIGROUP_Pos)                /*!< SCB AIRCR: PRIGROUP Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+#define SCB_AIRCR_VECTRESET_Pos             0                                             /*!< SCB AIRCR: VECTRESET Position */

+#define SCB_AIRCR_VECTRESET_Msk            (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/)           /*!< SCB AIRCR: VECTRESET Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_BFHFNMIGN_Pos               8                                             /*!< SCB CCR: BFHFNMIGN Position */

+#define SCB_CCR_BFHFNMIGN_Msk              (1UL << SCB_CCR_BFHFNMIGN_Pos)                 /*!< SCB CCR: BFHFNMIGN Mask */

+

+#define SCB_CCR_DIV_0_TRP_Pos               4                                             /*!< SCB CCR: DIV_0_TRP Position */

+#define SCB_CCR_DIV_0_TRP_Msk              (1UL << SCB_CCR_DIV_0_TRP_Pos)                 /*!< SCB CCR: DIV_0_TRP Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+#define SCB_CCR_USERSETMPEND_Pos            1                                             /*!< SCB CCR: USERSETMPEND Position */

+#define SCB_CCR_USERSETMPEND_Msk           (1UL << SCB_CCR_USERSETMPEND_Pos)              /*!< SCB CCR: USERSETMPEND Mask */

+

+#define SCB_CCR_NONBASETHRDENA_Pos          0                                             /*!< SCB CCR: NONBASETHRDENA Position */

+#define SCB_CCR_NONBASETHRDENA_Msk         (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/)        /*!< SCB CCR: NONBASETHRDENA Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_USGFAULTENA_Pos          18                                             /*!< SCB SHCSR: USGFAULTENA Position */

+#define SCB_SHCSR_USGFAULTENA_Msk          (1UL << SCB_SHCSR_USGFAULTENA_Pos)             /*!< SCB SHCSR: USGFAULTENA Mask */

+

+#define SCB_SHCSR_BUSFAULTENA_Pos          17                                             /*!< SCB SHCSR: BUSFAULTENA Position */

+#define SCB_SHCSR_BUSFAULTENA_Msk          (1UL << SCB_SHCSR_BUSFAULTENA_Pos)             /*!< SCB SHCSR: BUSFAULTENA Mask */

+

+#define SCB_SHCSR_MEMFAULTENA_Pos          16                                             /*!< SCB SHCSR: MEMFAULTENA Position */

+#define SCB_SHCSR_MEMFAULTENA_Msk          (1UL << SCB_SHCSR_MEMFAULTENA_Pos)             /*!< SCB SHCSR: MEMFAULTENA Mask */

+

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+#define SCB_SHCSR_BUSFAULTPENDED_Pos       14                                             /*!< SCB SHCSR: BUSFAULTPENDED Position */

+#define SCB_SHCSR_BUSFAULTPENDED_Msk       (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos)          /*!< SCB SHCSR: BUSFAULTPENDED Mask */

+

+#define SCB_SHCSR_MEMFAULTPENDED_Pos       13                                             /*!< SCB SHCSR: MEMFAULTPENDED Position */

+#define SCB_SHCSR_MEMFAULTPENDED_Msk       (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos)          /*!< SCB SHCSR: MEMFAULTPENDED Mask */

+

+#define SCB_SHCSR_USGFAULTPENDED_Pos       12                                             /*!< SCB SHCSR: USGFAULTPENDED Position */

+#define SCB_SHCSR_USGFAULTPENDED_Msk       (1UL << SCB_SHCSR_USGFAULTPENDED_Pos)          /*!< SCB SHCSR: USGFAULTPENDED Mask */

+

+#define SCB_SHCSR_SYSTICKACT_Pos           11                                             /*!< SCB SHCSR: SYSTICKACT Position */

+#define SCB_SHCSR_SYSTICKACT_Msk           (1UL << SCB_SHCSR_SYSTICKACT_Pos)              /*!< SCB SHCSR: SYSTICKACT Mask */

+

+#define SCB_SHCSR_PENDSVACT_Pos            10                                             /*!< SCB SHCSR: PENDSVACT Position */

+#define SCB_SHCSR_PENDSVACT_Msk            (1UL << SCB_SHCSR_PENDSVACT_Pos)               /*!< SCB SHCSR: PENDSVACT Mask */

+

+#define SCB_SHCSR_MONITORACT_Pos            8                                             /*!< SCB SHCSR: MONITORACT Position */

+#define SCB_SHCSR_MONITORACT_Msk           (1UL << SCB_SHCSR_MONITORACT_Pos)              /*!< SCB SHCSR: MONITORACT Mask */

+

+#define SCB_SHCSR_SVCALLACT_Pos             7                                             /*!< SCB SHCSR: SVCALLACT Position */

+#define SCB_SHCSR_SVCALLACT_Msk            (1UL << SCB_SHCSR_SVCALLACT_Pos)               /*!< SCB SHCSR: SVCALLACT Mask */

+

+#define SCB_SHCSR_USGFAULTACT_Pos           3                                             /*!< SCB SHCSR: USGFAULTACT Position */

+#define SCB_SHCSR_USGFAULTACT_Msk          (1UL << SCB_SHCSR_USGFAULTACT_Pos)             /*!< SCB SHCSR: USGFAULTACT Mask */

+

+#define SCB_SHCSR_BUSFAULTACT_Pos           1                                             /*!< SCB SHCSR: BUSFAULTACT Position */

+#define SCB_SHCSR_BUSFAULTACT_Msk          (1UL << SCB_SHCSR_BUSFAULTACT_Pos)             /*!< SCB SHCSR: BUSFAULTACT Mask */

+

+#define SCB_SHCSR_MEMFAULTACT_Pos           0                                             /*!< SCB SHCSR: MEMFAULTACT Position */

+#define SCB_SHCSR_MEMFAULTACT_Msk          (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/)         /*!< SCB SHCSR: MEMFAULTACT Mask */

+

+/* SCB Configurable Fault Status Registers Definitions */

+#define SCB_CFSR_USGFAULTSR_Pos            16                                             /*!< SCB CFSR: Usage Fault Status Register Position */

+#define SCB_CFSR_USGFAULTSR_Msk            (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos)          /*!< SCB CFSR: Usage Fault Status Register Mask */

+

+#define SCB_CFSR_BUSFAULTSR_Pos             8                                             /*!< SCB CFSR: Bus Fault Status Register Position */

+#define SCB_CFSR_BUSFAULTSR_Msk            (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos)            /*!< SCB CFSR: Bus Fault Status Register Mask */

+

+#define SCB_CFSR_MEMFAULTSR_Pos             0                                             /*!< SCB CFSR: Memory Manage Fault Status Register Position */

+#define SCB_CFSR_MEMFAULTSR_Msk            (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/)        /*!< SCB CFSR: Memory Manage Fault Status Register Mask */

+

+/* SCB Hard Fault Status Registers Definitions */

+#define SCB_HFSR_DEBUGEVT_Pos              31                                             /*!< SCB HFSR: DEBUGEVT Position */

+#define SCB_HFSR_DEBUGEVT_Msk              (1UL << SCB_HFSR_DEBUGEVT_Pos)                 /*!< SCB HFSR: DEBUGEVT Mask */

+

+#define SCB_HFSR_FORCED_Pos                30                                             /*!< SCB HFSR: FORCED Position */

+#define SCB_HFSR_FORCED_Msk                (1UL << SCB_HFSR_FORCED_Pos)                   /*!< SCB HFSR: FORCED Mask */

+

+#define SCB_HFSR_VECTTBL_Pos                1                                             /*!< SCB HFSR: VECTTBL Position */

+#define SCB_HFSR_VECTTBL_Msk               (1UL << SCB_HFSR_VECTTBL_Pos)                  /*!< SCB HFSR: VECTTBL Mask */

+

+/* SCB Debug Fault Status Register Definitions */

+#define SCB_DFSR_EXTERNAL_Pos               4                                             /*!< SCB DFSR: EXTERNAL Position */

+#define SCB_DFSR_EXTERNAL_Msk              (1UL << SCB_DFSR_EXTERNAL_Pos)                 /*!< SCB DFSR: EXTERNAL Mask */

+

+#define SCB_DFSR_VCATCH_Pos                 3                                             /*!< SCB DFSR: VCATCH Position */

+#define SCB_DFSR_VCATCH_Msk                (1UL << SCB_DFSR_VCATCH_Pos)                   /*!< SCB DFSR: VCATCH Mask */

+

+#define SCB_DFSR_DWTTRAP_Pos                2                                             /*!< SCB DFSR: DWTTRAP Position */

+#define SCB_DFSR_DWTTRAP_Msk               (1UL << SCB_DFSR_DWTTRAP_Pos)                  /*!< SCB DFSR: DWTTRAP Mask */

+

+#define SCB_DFSR_BKPT_Pos                   1                                             /*!< SCB DFSR: BKPT Position */

+#define SCB_DFSR_BKPT_Msk                  (1UL << SCB_DFSR_BKPT_Pos)                     /*!< SCB DFSR: BKPT Mask */

+

+#define SCB_DFSR_HALTED_Pos                 0                                             /*!< SCB DFSR: HALTED Position */

+#define SCB_DFSR_HALTED_Msk                (1UL /*<< SCB_DFSR_HALTED_Pos*/)               /*!< SCB DFSR: HALTED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)

+    \brief      Type definitions for the System Control and ID Register not in the SCB

+  @{

+ */

+

+/** \brief  Structure type to access the System Control and ID Register not in the SCB.

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __I  uint32_t ICTR;                    /*!< Offset: 0x004 (R/ )  Interrupt Controller Type Register      */

+  __IO uint32_t ACTLR;                   /*!< Offset: 0x008 (R/W)  Auxiliary Control Register              */

+} SCnSCB_Type;

+

+/* Interrupt Controller Type Register Definitions */

+#define SCnSCB_ICTR_INTLINESNUM_Pos         0                                          /*!< ICTR: INTLINESNUM Position */

+#define SCnSCB_ICTR_INTLINESNUM_Msk        (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/)  /*!< ICTR: INTLINESNUM Mask */

+

+/* Auxiliary Control Register Definitions */

+#define SCnSCB_ACTLR_DISOOFP_Pos            9                                          /*!< ACTLR: DISOOFP Position */

+#define SCnSCB_ACTLR_DISOOFP_Msk           (1UL << SCnSCB_ACTLR_DISOOFP_Pos)           /*!< ACTLR: DISOOFP Mask */

+

+#define SCnSCB_ACTLR_DISFPCA_Pos            8                                          /*!< ACTLR: DISFPCA Position */

+#define SCnSCB_ACTLR_DISFPCA_Msk           (1UL << SCnSCB_ACTLR_DISFPCA_Pos)           /*!< ACTLR: DISFPCA Mask */

+

+#define SCnSCB_ACTLR_DISFOLD_Pos            2                                          /*!< ACTLR: DISFOLD Position */

+#define SCnSCB_ACTLR_DISFOLD_Msk           (1UL << SCnSCB_ACTLR_DISFOLD_Pos)           /*!< ACTLR: DISFOLD Mask */

+

+#define SCnSCB_ACTLR_DISDEFWBUF_Pos         1                                          /*!< ACTLR: DISDEFWBUF Position */

+#define SCnSCB_ACTLR_DISDEFWBUF_Msk        (1UL << SCnSCB_ACTLR_DISDEFWBUF_Pos)        /*!< ACTLR: DISDEFWBUF Mask */

+

+#define SCnSCB_ACTLR_DISMCYCINT_Pos         0                                          /*!< ACTLR: DISMCYCINT Position */

+#define SCnSCB_ACTLR_DISMCYCINT_Msk        (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/)    /*!< ACTLR: DISMCYCINT Mask */

+

+/*@} end of group CMSIS_SCnotSCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_ITM     Instrumentation Trace Macrocell (ITM)

+    \brief      Type definitions for the Instrumentation Trace Macrocell (ITM)

+  @{

+ */

+

+/** \brief  Structure type to access the Instrumentation Trace Macrocell Register (ITM).

+ */

+typedef struct

+{

+  __O  union

+  {

+    __O  uint8_t    u8;                  /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 8-bit                   */

+    __O  uint16_t   u16;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 16-bit                  */

+    __O  uint32_t   u32;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 32-bit                  */

+  }  PORT [32];                          /*!< Offset: 0x000 ( /W)  ITM Stimulus Port Registers               */

+       uint32_t RESERVED0[864];

+  __IO uint32_t TER;                     /*!< Offset: 0xE00 (R/W)  ITM Trace Enable Register                 */

+       uint32_t RESERVED1[15];

+  __IO uint32_t TPR;                     /*!< Offset: 0xE40 (R/W)  ITM Trace Privilege Register              */

+       uint32_t RESERVED2[15];

+  __IO uint32_t TCR;                     /*!< Offset: 0xE80 (R/W)  ITM Trace Control Register                */

+       uint32_t RESERVED3[29];

+  __O  uint32_t IWR;                     /*!< Offset: 0xEF8 ( /W)  ITM Integration Write Register            */

+  __I  uint32_t IRR;                     /*!< Offset: 0xEFC (R/ )  ITM Integration Read Register             */

+  __IO uint32_t IMCR;                    /*!< Offset: 0xF00 (R/W)  ITM Integration Mode Control Register     */

+       uint32_t RESERVED4[43];

+  __O  uint32_t LAR;                     /*!< Offset: 0xFB0 ( /W)  ITM Lock Access Register                  */

+  __I  uint32_t LSR;                     /*!< Offset: 0xFB4 (R/ )  ITM Lock Status Register                  */

+       uint32_t RESERVED5[6];

+  __I  uint32_t PID4;                    /*!< Offset: 0xFD0 (R/ )  ITM Peripheral Identification Register #4 */

+  __I  uint32_t PID5;                    /*!< Offset: 0xFD4 (R/ )  ITM Peripheral Identification Register #5 */

+  __I  uint32_t PID6;                    /*!< Offset: 0xFD8 (R/ )  ITM Peripheral Identification Register #6 */

+  __I  uint32_t PID7;                    /*!< Offset: 0xFDC (R/ )  ITM Peripheral Identification Register #7 */

+  __I  uint32_t PID0;                    /*!< Offset: 0xFE0 (R/ )  ITM Peripheral Identification Register #0 */

+  __I  uint32_t PID1;                    /*!< Offset: 0xFE4 (R/ )  ITM Peripheral Identification Register #1 */

+  __I  uint32_t PID2;                    /*!< Offset: 0xFE8 (R/ )  ITM Peripheral Identification Register #2 */

+  __I  uint32_t PID3;                    /*!< Offset: 0xFEC (R/ )  ITM Peripheral Identification Register #3 */

+  __I  uint32_t CID0;                    /*!< Offset: 0xFF0 (R/ )  ITM Component  Identification Register #0 */

+  __I  uint32_t CID1;                    /*!< Offset: 0xFF4 (R/ )  ITM Component  Identification Register #1 */

+  __I  uint32_t CID2;                    /*!< Offset: 0xFF8 (R/ )  ITM Component  Identification Register #2 */

+  __I  uint32_t CID3;                    /*!< Offset: 0xFFC (R/ )  ITM Component  Identification Register #3 */

+} ITM_Type;

+

+/* ITM Trace Privilege Register Definitions */

+#define ITM_TPR_PRIVMASK_Pos                0                                             /*!< ITM TPR: PRIVMASK Position */

+#define ITM_TPR_PRIVMASK_Msk               (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/)            /*!< ITM TPR: PRIVMASK Mask */

+

+/* ITM Trace Control Register Definitions */

+#define ITM_TCR_BUSY_Pos                   23                                             /*!< ITM TCR: BUSY Position */

+#define ITM_TCR_BUSY_Msk                   (1UL << ITM_TCR_BUSY_Pos)                      /*!< ITM TCR: BUSY Mask */

+

+#define ITM_TCR_TraceBusID_Pos             16                                             /*!< ITM TCR: ATBID Position */

+#define ITM_TCR_TraceBusID_Msk             (0x7FUL << ITM_TCR_TraceBusID_Pos)             /*!< ITM TCR: ATBID Mask */

+

+#define ITM_TCR_GTSFREQ_Pos                10                                             /*!< ITM TCR: Global timestamp frequency Position */

+#define ITM_TCR_GTSFREQ_Msk                (3UL << ITM_TCR_GTSFREQ_Pos)                   /*!< ITM TCR: Global timestamp frequency Mask */

+

+#define ITM_TCR_TSPrescale_Pos              8                                             /*!< ITM TCR: TSPrescale Position */

+#define ITM_TCR_TSPrescale_Msk             (3UL << ITM_TCR_TSPrescale_Pos)                /*!< ITM TCR: TSPrescale Mask */

+

+#define ITM_TCR_SWOENA_Pos                  4                                             /*!< ITM TCR: SWOENA Position */

+#define ITM_TCR_SWOENA_Msk                 (1UL << ITM_TCR_SWOENA_Pos)                    /*!< ITM TCR: SWOENA Mask */

+

+#define ITM_TCR_DWTENA_Pos                  3                                             /*!< ITM TCR: DWTENA Position */

+#define ITM_TCR_DWTENA_Msk                 (1UL << ITM_TCR_DWTENA_Pos)                    /*!< ITM TCR: DWTENA Mask */

+

+#define ITM_TCR_SYNCENA_Pos                 2                                             /*!< ITM TCR: SYNCENA Position */

+#define ITM_TCR_SYNCENA_Msk                (1UL << ITM_TCR_SYNCENA_Pos)                   /*!< ITM TCR: SYNCENA Mask */

+

+#define ITM_TCR_TSENA_Pos                   1                                             /*!< ITM TCR: TSENA Position */

+#define ITM_TCR_TSENA_Msk                  (1UL << ITM_TCR_TSENA_Pos)                     /*!< ITM TCR: TSENA Mask */

+

+#define ITM_TCR_ITMENA_Pos                  0                                             /*!< ITM TCR: ITM Enable bit Position */

+#define ITM_TCR_ITMENA_Msk                 (1UL /*<< ITM_TCR_ITMENA_Pos*/)                /*!< ITM TCR: ITM Enable bit Mask */

+

+/* ITM Integration Write Register Definitions */

+#define ITM_IWR_ATVALIDM_Pos                0                                             /*!< ITM IWR: ATVALIDM Position */

+#define ITM_IWR_ATVALIDM_Msk               (1UL /*<< ITM_IWR_ATVALIDM_Pos*/)              /*!< ITM IWR: ATVALIDM Mask */

+

+/* ITM Integration Read Register Definitions */

+#define ITM_IRR_ATREADYM_Pos                0                                             /*!< ITM IRR: ATREADYM Position */

+#define ITM_IRR_ATREADYM_Msk               (1UL /*<< ITM_IRR_ATREADYM_Pos*/)              /*!< ITM IRR: ATREADYM Mask */

+

+/* ITM Integration Mode Control Register Definitions */

+#define ITM_IMCR_INTEGRATION_Pos            0                                             /*!< ITM IMCR: INTEGRATION Position */

+#define ITM_IMCR_INTEGRATION_Msk           (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/)          /*!< ITM IMCR: INTEGRATION Mask */

+

+/* ITM Lock Status Register Definitions */

+#define ITM_LSR_ByteAcc_Pos                 2                                             /*!< ITM LSR: ByteAcc Position */

+#define ITM_LSR_ByteAcc_Msk                (1UL << ITM_LSR_ByteAcc_Pos)                   /*!< ITM LSR: ByteAcc Mask */

+

+#define ITM_LSR_Access_Pos                  1                                             /*!< ITM LSR: Access Position */

+#define ITM_LSR_Access_Msk                 (1UL << ITM_LSR_Access_Pos)                    /*!< ITM LSR: Access Mask */

+

+#define ITM_LSR_Present_Pos                 0                                             /*!< ITM LSR: Present Position */

+#define ITM_LSR_Present_Msk                (1UL /*<< ITM_LSR_Present_Pos*/)               /*!< ITM LSR: Present Mask */

+

+/*@}*/ /* end of group CMSIS_ITM */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_DWT     Data Watchpoint and Trace (DWT)

+    \brief      Type definitions for the Data Watchpoint and Trace (DWT)

+  @{

+ */

+

+/** \brief  Structure type to access the Data Watchpoint and Trace Register (DWT).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  Control Register                          */

+  __IO uint32_t CYCCNT;                  /*!< Offset: 0x004 (R/W)  Cycle Count Register                      */

+  __IO uint32_t CPICNT;                  /*!< Offset: 0x008 (R/W)  CPI Count Register                        */

+  __IO uint32_t EXCCNT;                  /*!< Offset: 0x00C (R/W)  Exception Overhead Count Register         */

+  __IO uint32_t SLEEPCNT;                /*!< Offset: 0x010 (R/W)  Sleep Count Register                      */

+  __IO uint32_t LSUCNT;                  /*!< Offset: 0x014 (R/W)  LSU Count Register                        */

+  __IO uint32_t FOLDCNT;                 /*!< Offset: 0x018 (R/W)  Folded-instruction Count Register         */

+  __I  uint32_t PCSR;                    /*!< Offset: 0x01C (R/ )  Program Counter Sample Register           */

+  __IO uint32_t COMP0;                   /*!< Offset: 0x020 (R/W)  Comparator Register 0                     */

+  __IO uint32_t MASK0;                   /*!< Offset: 0x024 (R/W)  Mask Register 0                           */

+  __IO uint32_t FUNCTION0;               /*!< Offset: 0x028 (R/W)  Function Register 0                       */

+       uint32_t RESERVED0[1];

+  __IO uint32_t COMP1;                   /*!< Offset: 0x030 (R/W)  Comparator Register 1                     */

+  __IO uint32_t MASK1;                   /*!< Offset: 0x034 (R/W)  Mask Register 1                           */

+  __IO uint32_t FUNCTION1;               /*!< Offset: 0x038 (R/W)  Function Register 1                       */

+       uint32_t RESERVED1[1];

+  __IO uint32_t COMP2;                   /*!< Offset: 0x040 (R/W)  Comparator Register 2                     */

+  __IO uint32_t MASK2;                   /*!< Offset: 0x044 (R/W)  Mask Register 2                           */

+  __IO uint32_t FUNCTION2;               /*!< Offset: 0x048 (R/W)  Function Register 2                       */

+       uint32_t RESERVED2[1];

+  __IO uint32_t COMP3;                   /*!< Offset: 0x050 (R/W)  Comparator Register 3                     */

+  __IO uint32_t MASK3;                   /*!< Offset: 0x054 (R/W)  Mask Register 3                           */

+  __IO uint32_t FUNCTION3;               /*!< Offset: 0x058 (R/W)  Function Register 3                       */

+} DWT_Type;

+

+/* DWT Control Register Definitions */

+#define DWT_CTRL_NUMCOMP_Pos               28                                          /*!< DWT CTRL: NUMCOMP Position */

+#define DWT_CTRL_NUMCOMP_Msk               (0xFUL << DWT_CTRL_NUMCOMP_Pos)             /*!< DWT CTRL: NUMCOMP Mask */

+

+#define DWT_CTRL_NOTRCPKT_Pos              27                                          /*!< DWT CTRL: NOTRCPKT Position */

+#define DWT_CTRL_NOTRCPKT_Msk              (0x1UL << DWT_CTRL_NOTRCPKT_Pos)            /*!< DWT CTRL: NOTRCPKT Mask */

+

+#define DWT_CTRL_NOEXTTRIG_Pos             26                                          /*!< DWT CTRL: NOEXTTRIG Position */

+#define DWT_CTRL_NOEXTTRIG_Msk             (0x1UL << DWT_CTRL_NOEXTTRIG_Pos)           /*!< DWT CTRL: NOEXTTRIG Mask */

+

+#define DWT_CTRL_NOCYCCNT_Pos              25                                          /*!< DWT CTRL: NOCYCCNT Position */

+#define DWT_CTRL_NOCYCCNT_Msk              (0x1UL << DWT_CTRL_NOCYCCNT_Pos)            /*!< DWT CTRL: NOCYCCNT Mask */

+

+#define DWT_CTRL_NOPRFCNT_Pos              24                                          /*!< DWT CTRL: NOPRFCNT Position */

+#define DWT_CTRL_NOPRFCNT_Msk              (0x1UL << DWT_CTRL_NOPRFCNT_Pos)            /*!< DWT CTRL: NOPRFCNT Mask */

+

+#define DWT_CTRL_CYCEVTENA_Pos             22                                          /*!< DWT CTRL: CYCEVTENA Position */

+#define DWT_CTRL_CYCEVTENA_Msk             (0x1UL << DWT_CTRL_CYCEVTENA_Pos)           /*!< DWT CTRL: CYCEVTENA Mask */

+

+#define DWT_CTRL_FOLDEVTENA_Pos            21                                          /*!< DWT CTRL: FOLDEVTENA Position */

+#define DWT_CTRL_FOLDEVTENA_Msk            (0x1UL << DWT_CTRL_FOLDEVTENA_Pos)          /*!< DWT CTRL: FOLDEVTENA Mask */

+

+#define DWT_CTRL_LSUEVTENA_Pos             20                                          /*!< DWT CTRL: LSUEVTENA Position */

+#define DWT_CTRL_LSUEVTENA_Msk             (0x1UL << DWT_CTRL_LSUEVTENA_Pos)           /*!< DWT CTRL: LSUEVTENA Mask */

+

+#define DWT_CTRL_SLEEPEVTENA_Pos           19                                          /*!< DWT CTRL: SLEEPEVTENA Position */

+#define DWT_CTRL_SLEEPEVTENA_Msk           (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos)         /*!< DWT CTRL: SLEEPEVTENA Mask */

+

+#define DWT_CTRL_EXCEVTENA_Pos             18                                          /*!< DWT CTRL: EXCEVTENA Position */

+#define DWT_CTRL_EXCEVTENA_Msk             (0x1UL << DWT_CTRL_EXCEVTENA_Pos)           /*!< DWT CTRL: EXCEVTENA Mask */

+

+#define DWT_CTRL_CPIEVTENA_Pos             17                                          /*!< DWT CTRL: CPIEVTENA Position */

+#define DWT_CTRL_CPIEVTENA_Msk             (0x1UL << DWT_CTRL_CPIEVTENA_Pos)           /*!< DWT CTRL: CPIEVTENA Mask */

+

+#define DWT_CTRL_EXCTRCENA_Pos             16                                          /*!< DWT CTRL: EXCTRCENA Position */

+#define DWT_CTRL_EXCTRCENA_Msk             (0x1UL << DWT_CTRL_EXCTRCENA_Pos)           /*!< DWT CTRL: EXCTRCENA Mask */

+

+#define DWT_CTRL_PCSAMPLENA_Pos            12                                          /*!< DWT CTRL: PCSAMPLENA Position */

+#define DWT_CTRL_PCSAMPLENA_Msk            (0x1UL << DWT_CTRL_PCSAMPLENA_Pos)          /*!< DWT CTRL: PCSAMPLENA Mask */

+

+#define DWT_CTRL_SYNCTAP_Pos               10                                          /*!< DWT CTRL: SYNCTAP Position */

+#define DWT_CTRL_SYNCTAP_Msk               (0x3UL << DWT_CTRL_SYNCTAP_Pos)             /*!< DWT CTRL: SYNCTAP Mask */

+

+#define DWT_CTRL_CYCTAP_Pos                 9                                          /*!< DWT CTRL: CYCTAP Position */

+#define DWT_CTRL_CYCTAP_Msk                (0x1UL << DWT_CTRL_CYCTAP_Pos)              /*!< DWT CTRL: CYCTAP Mask */

+

+#define DWT_CTRL_POSTINIT_Pos               5                                          /*!< DWT CTRL: POSTINIT Position */

+#define DWT_CTRL_POSTINIT_Msk              (0xFUL << DWT_CTRL_POSTINIT_Pos)            /*!< DWT CTRL: POSTINIT Mask */

+

+#define DWT_CTRL_POSTPRESET_Pos             1                                          /*!< DWT CTRL: POSTPRESET Position */

+#define DWT_CTRL_POSTPRESET_Msk            (0xFUL << DWT_CTRL_POSTPRESET_Pos)          /*!< DWT CTRL: POSTPRESET Mask */

+

+#define DWT_CTRL_CYCCNTENA_Pos              0                                          /*!< DWT CTRL: CYCCNTENA Position */

+#define DWT_CTRL_CYCCNTENA_Msk             (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/)       /*!< DWT CTRL: CYCCNTENA Mask */

+

+/* DWT CPI Count Register Definitions */

+#define DWT_CPICNT_CPICNT_Pos               0                                          /*!< DWT CPICNT: CPICNT Position */

+#define DWT_CPICNT_CPICNT_Msk              (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/)       /*!< DWT CPICNT: CPICNT Mask */

+

+/* DWT Exception Overhead Count Register Definitions */

+#define DWT_EXCCNT_EXCCNT_Pos               0                                          /*!< DWT EXCCNT: EXCCNT Position */

+#define DWT_EXCCNT_EXCCNT_Msk              (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/)       /*!< DWT EXCCNT: EXCCNT Mask */

+

+/* DWT Sleep Count Register Definitions */

+#define DWT_SLEEPCNT_SLEEPCNT_Pos           0                                          /*!< DWT SLEEPCNT: SLEEPCNT Position */

+#define DWT_SLEEPCNT_SLEEPCNT_Msk          (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/)   /*!< DWT SLEEPCNT: SLEEPCNT Mask */

+

+/* DWT LSU Count Register Definitions */

+#define DWT_LSUCNT_LSUCNT_Pos               0                                          /*!< DWT LSUCNT: LSUCNT Position */

+#define DWT_LSUCNT_LSUCNT_Msk              (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/)       /*!< DWT LSUCNT: LSUCNT Mask */

+

+/* DWT Folded-instruction Count Register Definitions */

+#define DWT_FOLDCNT_FOLDCNT_Pos             0                                          /*!< DWT FOLDCNT: FOLDCNT Position */

+#define DWT_FOLDCNT_FOLDCNT_Msk            (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/)     /*!< DWT FOLDCNT: FOLDCNT Mask */

+

+/* DWT Comparator Mask Register Definitions */

+#define DWT_MASK_MASK_Pos                   0                                          /*!< DWT MASK: MASK Position */

+#define DWT_MASK_MASK_Msk                  (0x1FUL /*<< DWT_MASK_MASK_Pos*/)           /*!< DWT MASK: MASK Mask */

+

+/* DWT Comparator Function Register Definitions */

+#define DWT_FUNCTION_MATCHED_Pos           24                                          /*!< DWT FUNCTION: MATCHED Position */

+#define DWT_FUNCTION_MATCHED_Msk           (0x1UL << DWT_FUNCTION_MATCHED_Pos)         /*!< DWT FUNCTION: MATCHED Mask */

+

+#define DWT_FUNCTION_DATAVADDR1_Pos        16                                          /*!< DWT FUNCTION: DATAVADDR1 Position */

+#define DWT_FUNCTION_DATAVADDR1_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos)      /*!< DWT FUNCTION: DATAVADDR1 Mask */

+

+#define DWT_FUNCTION_DATAVADDR0_Pos        12                                          /*!< DWT FUNCTION: DATAVADDR0 Position */

+#define DWT_FUNCTION_DATAVADDR0_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos)      /*!< DWT FUNCTION: DATAVADDR0 Mask */

+

+#define DWT_FUNCTION_DATAVSIZE_Pos         10                                          /*!< DWT FUNCTION: DATAVSIZE Position */

+#define DWT_FUNCTION_DATAVSIZE_Msk         (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos)       /*!< DWT FUNCTION: DATAVSIZE Mask */

+

+#define DWT_FUNCTION_LNK1ENA_Pos            9                                          /*!< DWT FUNCTION: LNK1ENA Position */

+#define DWT_FUNCTION_LNK1ENA_Msk           (0x1UL << DWT_FUNCTION_LNK1ENA_Pos)         /*!< DWT FUNCTION: LNK1ENA Mask */

+

+#define DWT_FUNCTION_DATAVMATCH_Pos         8                                          /*!< DWT FUNCTION: DATAVMATCH Position */

+#define DWT_FUNCTION_DATAVMATCH_Msk        (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos)      /*!< DWT FUNCTION: DATAVMATCH Mask */

+

+#define DWT_FUNCTION_CYCMATCH_Pos           7                                          /*!< DWT FUNCTION: CYCMATCH Position */

+#define DWT_FUNCTION_CYCMATCH_Msk          (0x1UL << DWT_FUNCTION_CYCMATCH_Pos)        /*!< DWT FUNCTION: CYCMATCH Mask */

+

+#define DWT_FUNCTION_EMITRANGE_Pos          5                                          /*!< DWT FUNCTION: EMITRANGE Position */

+#define DWT_FUNCTION_EMITRANGE_Msk         (0x1UL << DWT_FUNCTION_EMITRANGE_Pos)       /*!< DWT FUNCTION: EMITRANGE Mask */

+

+#define DWT_FUNCTION_FUNCTION_Pos           0                                          /*!< DWT FUNCTION: FUNCTION Position */

+#define DWT_FUNCTION_FUNCTION_Msk          (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/)    /*!< DWT FUNCTION: FUNCTION Mask */

+

+/*@}*/ /* end of group CMSIS_DWT */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_TPI     Trace Port Interface (TPI)

+    \brief      Type definitions for the Trace Port Interface (TPI)

+  @{

+ */

+

+/** \brief  Structure type to access the Trace Port Interface Register (TPI).

+ */

+typedef struct

+{

+  __IO uint32_t SSPSR;                   /*!< Offset: 0x000 (R/ )  Supported Parallel Port Size Register     */

+  __IO uint32_t CSPSR;                   /*!< Offset: 0x004 (R/W)  Current Parallel Port Size Register */

+       uint32_t RESERVED0[2];

+  __IO uint32_t ACPR;                    /*!< Offset: 0x010 (R/W)  Asynchronous Clock Prescaler Register */

+       uint32_t RESERVED1[55];

+  __IO uint32_t SPPR;                    /*!< Offset: 0x0F0 (R/W)  Selected Pin Protocol Register */

+       uint32_t RESERVED2[131];

+  __I  uint32_t FFSR;                    /*!< Offset: 0x300 (R/ )  Formatter and Flush Status Register */

+  __IO uint32_t FFCR;                    /*!< Offset: 0x304 (R/W)  Formatter and Flush Control Register */

+  __I  uint32_t FSCR;                    /*!< Offset: 0x308 (R/ )  Formatter Synchronization Counter Register */

+       uint32_t RESERVED3[759];

+  __I  uint32_t TRIGGER;                 /*!< Offset: 0xEE8 (R/ )  TRIGGER */

+  __I  uint32_t FIFO0;                   /*!< Offset: 0xEEC (R/ )  Integration ETM Data */

+  __I  uint32_t ITATBCTR2;               /*!< Offset: 0xEF0 (R/ )  ITATBCTR2 */

+       uint32_t RESERVED4[1];

+  __I  uint32_t ITATBCTR0;               /*!< Offset: 0xEF8 (R/ )  ITATBCTR0 */

+  __I  uint32_t FIFO1;                   /*!< Offset: 0xEFC (R/ )  Integration ITM Data */

+  __IO uint32_t ITCTRL;                  /*!< Offset: 0xF00 (R/W)  Integration Mode Control */

+       uint32_t RESERVED5[39];

+  __IO uint32_t CLAIMSET;                /*!< Offset: 0xFA0 (R/W)  Claim tag set */

+  __IO uint32_t CLAIMCLR;                /*!< Offset: 0xFA4 (R/W)  Claim tag clear */

+       uint32_t RESERVED7[8];

+  __I  uint32_t DEVID;                   /*!< Offset: 0xFC8 (R/ )  TPIU_DEVID */

+  __I  uint32_t DEVTYPE;                 /*!< Offset: 0xFCC (R/ )  TPIU_DEVTYPE */

+} TPI_Type;

+

+/* TPI Asynchronous Clock Prescaler Register Definitions */

+#define TPI_ACPR_PRESCALER_Pos              0                                          /*!< TPI ACPR: PRESCALER Position */

+#define TPI_ACPR_PRESCALER_Msk             (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/)    /*!< TPI ACPR: PRESCALER Mask */

+

+/* TPI Selected Pin Protocol Register Definitions */

+#define TPI_SPPR_TXMODE_Pos                 0                                          /*!< TPI SPPR: TXMODE Position */

+#define TPI_SPPR_TXMODE_Msk                (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/)          /*!< TPI SPPR: TXMODE Mask */

+

+/* TPI Formatter and Flush Status Register Definitions */

+#define TPI_FFSR_FtNonStop_Pos              3                                          /*!< TPI FFSR: FtNonStop Position */

+#define TPI_FFSR_FtNonStop_Msk             (0x1UL << TPI_FFSR_FtNonStop_Pos)           /*!< TPI FFSR: FtNonStop Mask */

+

+#define TPI_FFSR_TCPresent_Pos              2                                          /*!< TPI FFSR: TCPresent Position */

+#define TPI_FFSR_TCPresent_Msk             (0x1UL << TPI_FFSR_TCPresent_Pos)           /*!< TPI FFSR: TCPresent Mask */

+

+#define TPI_FFSR_FtStopped_Pos              1                                          /*!< TPI FFSR: FtStopped Position */

+#define TPI_FFSR_FtStopped_Msk             (0x1UL << TPI_FFSR_FtStopped_Pos)           /*!< TPI FFSR: FtStopped Mask */

+

+#define TPI_FFSR_FlInProg_Pos               0                                          /*!< TPI FFSR: FlInProg Position */

+#define TPI_FFSR_FlInProg_Msk              (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/)        /*!< TPI FFSR: FlInProg Mask */

+

+/* TPI Formatter and Flush Control Register Definitions */

+#define TPI_FFCR_TrigIn_Pos                 8                                          /*!< TPI FFCR: TrigIn Position */

+#define TPI_FFCR_TrigIn_Msk                (0x1UL << TPI_FFCR_TrigIn_Pos)              /*!< TPI FFCR: TrigIn Mask */

+

+#define TPI_FFCR_EnFCont_Pos                1                                          /*!< TPI FFCR: EnFCont Position */

+#define TPI_FFCR_EnFCont_Msk               (0x1UL << TPI_FFCR_EnFCont_Pos)             /*!< TPI FFCR: EnFCont Mask */

+

+/* TPI TRIGGER Register Definitions */

+#define TPI_TRIGGER_TRIGGER_Pos             0                                          /*!< TPI TRIGGER: TRIGGER Position */

+#define TPI_TRIGGER_TRIGGER_Msk            (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/)      /*!< TPI TRIGGER: TRIGGER Mask */

+

+/* TPI Integration ETM Data Register Definitions (FIFO0) */

+#define TPI_FIFO0_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO0: ITM_ATVALID Position */

+#define TPI_FIFO0_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos)        /*!< TPI FIFO0: ITM_ATVALID Mask */

+

+#define TPI_FIFO0_ITM_bytecount_Pos        27                                          /*!< TPI FIFO0: ITM_bytecount Position */

+#define TPI_FIFO0_ITM_bytecount_Msk        (0x3UL << TPI_FIFO0_ITM_bytecount_Pos)      /*!< TPI FIFO0: ITM_bytecount Mask */

+

+#define TPI_FIFO0_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO0: ETM_ATVALID Position */

+#define TPI_FIFO0_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos)        /*!< TPI FIFO0: ETM_ATVALID Mask */

+

+#define TPI_FIFO0_ETM_bytecount_Pos        24                                          /*!< TPI FIFO0: ETM_bytecount Position */

+#define TPI_FIFO0_ETM_bytecount_Msk        (0x3UL << TPI_FIFO0_ETM_bytecount_Pos)      /*!< TPI FIFO0: ETM_bytecount Mask */

+

+#define TPI_FIFO0_ETM2_Pos                 16                                          /*!< TPI FIFO0: ETM2 Position */

+#define TPI_FIFO0_ETM2_Msk                 (0xFFUL << TPI_FIFO0_ETM2_Pos)              /*!< TPI FIFO0: ETM2 Mask */

+

+#define TPI_FIFO0_ETM1_Pos                  8                                          /*!< TPI FIFO0: ETM1 Position */

+#define TPI_FIFO0_ETM1_Msk                 (0xFFUL << TPI_FIFO0_ETM1_Pos)              /*!< TPI FIFO0: ETM1 Mask */

+

+#define TPI_FIFO0_ETM0_Pos                  0                                          /*!< TPI FIFO0: ETM0 Position */

+#define TPI_FIFO0_ETM0_Msk                 (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/)          /*!< TPI FIFO0: ETM0 Mask */

+

+/* TPI ITATBCTR2 Register Definitions */

+#define TPI_ITATBCTR2_ATREADY_Pos           0                                          /*!< TPI ITATBCTR2: ATREADY Position */

+#define TPI_ITATBCTR2_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR2_ATREADY_Pos*/)    /*!< TPI ITATBCTR2: ATREADY Mask */

+

+/* TPI Integration ITM Data Register Definitions (FIFO1) */

+#define TPI_FIFO1_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO1: ITM_ATVALID Position */

+#define TPI_FIFO1_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos)        /*!< TPI FIFO1: ITM_ATVALID Mask */

+

+#define TPI_FIFO1_ITM_bytecount_Pos        27                                          /*!< TPI FIFO1: ITM_bytecount Position */

+#define TPI_FIFO1_ITM_bytecount_Msk        (0x3UL << TPI_FIFO1_ITM_bytecount_Pos)      /*!< TPI FIFO1: ITM_bytecount Mask */

+

+#define TPI_FIFO1_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO1: ETM_ATVALID Position */

+#define TPI_FIFO1_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos)        /*!< TPI FIFO1: ETM_ATVALID Mask */

+

+#define TPI_FIFO1_ETM_bytecount_Pos        24                                          /*!< TPI FIFO1: ETM_bytecount Position */

+#define TPI_FIFO1_ETM_bytecount_Msk        (0x3UL << TPI_FIFO1_ETM_bytecount_Pos)      /*!< TPI FIFO1: ETM_bytecount Mask */

+

+#define TPI_FIFO1_ITM2_Pos                 16                                          /*!< TPI FIFO1: ITM2 Position */

+#define TPI_FIFO1_ITM2_Msk                 (0xFFUL << TPI_FIFO1_ITM2_Pos)              /*!< TPI FIFO1: ITM2 Mask */

+

+#define TPI_FIFO1_ITM1_Pos                  8                                          /*!< TPI FIFO1: ITM1 Position */

+#define TPI_FIFO1_ITM1_Msk                 (0xFFUL << TPI_FIFO1_ITM1_Pos)              /*!< TPI FIFO1: ITM1 Mask */

+

+#define TPI_FIFO1_ITM0_Pos                  0                                          /*!< TPI FIFO1: ITM0 Position */

+#define TPI_FIFO1_ITM0_Msk                 (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/)          /*!< TPI FIFO1: ITM0 Mask */

+

+/* TPI ITATBCTR0 Register Definitions */

+#define TPI_ITATBCTR0_ATREADY_Pos           0                                          /*!< TPI ITATBCTR0: ATREADY Position */

+#define TPI_ITATBCTR0_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR0_ATREADY_Pos*/)    /*!< TPI ITATBCTR0: ATREADY Mask */

+

+/* TPI Integration Mode Control Register Definitions */

+#define TPI_ITCTRL_Mode_Pos                 0                                          /*!< TPI ITCTRL: Mode Position */

+#define TPI_ITCTRL_Mode_Msk                (0x1UL /*<< TPI_ITCTRL_Mode_Pos*/)          /*!< TPI ITCTRL: Mode Mask */

+

+/* TPI DEVID Register Definitions */

+#define TPI_DEVID_NRZVALID_Pos             11                                          /*!< TPI DEVID: NRZVALID Position */

+#define TPI_DEVID_NRZVALID_Msk             (0x1UL << TPI_DEVID_NRZVALID_Pos)           /*!< TPI DEVID: NRZVALID Mask */

+

+#define TPI_DEVID_MANCVALID_Pos            10                                          /*!< TPI DEVID: MANCVALID Position */

+#define TPI_DEVID_MANCVALID_Msk            (0x1UL << TPI_DEVID_MANCVALID_Pos)          /*!< TPI DEVID: MANCVALID Mask */

+

+#define TPI_DEVID_PTINVALID_Pos             9                                          /*!< TPI DEVID: PTINVALID Position */

+#define TPI_DEVID_PTINVALID_Msk            (0x1UL << TPI_DEVID_PTINVALID_Pos)          /*!< TPI DEVID: PTINVALID Mask */

+

+#define TPI_DEVID_MinBufSz_Pos              6                                          /*!< TPI DEVID: MinBufSz Position */

+#define TPI_DEVID_MinBufSz_Msk             (0x7UL << TPI_DEVID_MinBufSz_Pos)           /*!< TPI DEVID: MinBufSz Mask */

+

+#define TPI_DEVID_AsynClkIn_Pos             5                                          /*!< TPI DEVID: AsynClkIn Position */

+#define TPI_DEVID_AsynClkIn_Msk            (0x1UL << TPI_DEVID_AsynClkIn_Pos)          /*!< TPI DEVID: AsynClkIn Mask */

+

+#define TPI_DEVID_NrTraceInput_Pos          0                                          /*!< TPI DEVID: NrTraceInput Position */

+#define TPI_DEVID_NrTraceInput_Msk         (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/)  /*!< TPI DEVID: NrTraceInput Mask */

+

+/* TPI DEVTYPE Register Definitions */

+#define TPI_DEVTYPE_MajorType_Pos           4                                          /*!< TPI DEVTYPE: MajorType Position */

+#define TPI_DEVTYPE_MajorType_Msk          (0xFUL << TPI_DEVTYPE_MajorType_Pos)        /*!< TPI DEVTYPE: MajorType Mask */

+

+#define TPI_DEVTYPE_SubType_Pos             0                                          /*!< TPI DEVTYPE: SubType Position */

+#define TPI_DEVTYPE_SubType_Msk            (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/)      /*!< TPI DEVTYPE: SubType Mask */

+

+/*@}*/ /* end of group CMSIS_TPI */

+

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+  __IO uint32_t RBAR_A1;                 /*!< Offset: 0x014 (R/W)  MPU Alias 1 Region Base Address Register       */

+  __IO uint32_t RASR_A1;                 /*!< Offset: 0x018 (R/W)  MPU Alias 1 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A2;                 /*!< Offset: 0x01C (R/W)  MPU Alias 2 Region Base Address Register       */

+  __IO uint32_t RASR_A2;                 /*!< Offset: 0x020 (R/W)  MPU Alias 2 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A3;                 /*!< Offset: 0x024 (R/W)  MPU Alias 3 Region Base Address Register       */

+  __IO uint32_t RASR_A3;                 /*!< Offset: 0x028 (R/W)  MPU Alias 3 Region Attribute and Size Register */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   5                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos)             /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+#if (__FPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_FPU     Floating Point Unit (FPU)

+    \brief      Type definitions for the Floating Point Unit (FPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Floating Point Unit (FPU).

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __IO uint32_t FPCCR;                   /*!< Offset: 0x004 (R/W)  Floating-Point Context Control Register               */

+  __IO uint32_t FPCAR;                   /*!< Offset: 0x008 (R/W)  Floating-Point Context Address Register               */

+  __IO uint32_t FPDSCR;                  /*!< Offset: 0x00C (R/W)  Floating-Point Default Status Control Register        */

+  __I  uint32_t MVFR0;                   /*!< Offset: 0x010 (R/ )  Media and FP Feature Register 0                       */

+  __I  uint32_t MVFR1;                   /*!< Offset: 0x014 (R/ )  Media and FP Feature Register 1                       */

+} FPU_Type;

+

+/* Floating-Point Context Control Register */

+#define FPU_FPCCR_ASPEN_Pos                31                                             /*!< FPCCR: ASPEN bit Position */

+#define FPU_FPCCR_ASPEN_Msk                (1UL << FPU_FPCCR_ASPEN_Pos)                   /*!< FPCCR: ASPEN bit Mask */

+

+#define FPU_FPCCR_LSPEN_Pos                30                                             /*!< FPCCR: LSPEN Position */

+#define FPU_FPCCR_LSPEN_Msk                (1UL << FPU_FPCCR_LSPEN_Pos)                   /*!< FPCCR: LSPEN bit Mask */

+

+#define FPU_FPCCR_MONRDY_Pos                8                                             /*!< FPCCR: MONRDY Position */

+#define FPU_FPCCR_MONRDY_Msk               (1UL << FPU_FPCCR_MONRDY_Pos)                  /*!< FPCCR: MONRDY bit Mask */

+

+#define FPU_FPCCR_BFRDY_Pos                 6                                             /*!< FPCCR: BFRDY Position */

+#define FPU_FPCCR_BFRDY_Msk                (1UL << FPU_FPCCR_BFRDY_Pos)                   /*!< FPCCR: BFRDY bit Mask */

+

+#define FPU_FPCCR_MMRDY_Pos                 5                                             /*!< FPCCR: MMRDY Position */

+#define FPU_FPCCR_MMRDY_Msk                (1UL << FPU_FPCCR_MMRDY_Pos)                   /*!< FPCCR: MMRDY bit Mask */

+

+#define FPU_FPCCR_HFRDY_Pos                 4                                             /*!< FPCCR: HFRDY Position */

+#define FPU_FPCCR_HFRDY_Msk                (1UL << FPU_FPCCR_HFRDY_Pos)                   /*!< FPCCR: HFRDY bit Mask */

+

+#define FPU_FPCCR_THREAD_Pos                3                                             /*!< FPCCR: processor mode bit Position */

+#define FPU_FPCCR_THREAD_Msk               (1UL << FPU_FPCCR_THREAD_Pos)                  /*!< FPCCR: processor mode active bit Mask */

+

+#define FPU_FPCCR_USER_Pos                  1                                             /*!< FPCCR: privilege level bit Position */

+#define FPU_FPCCR_USER_Msk                 (1UL << FPU_FPCCR_USER_Pos)                    /*!< FPCCR: privilege level bit Mask */

+

+#define FPU_FPCCR_LSPACT_Pos                0                                             /*!< FPCCR: Lazy state preservation active bit Position */

+#define FPU_FPCCR_LSPACT_Msk               (1UL /*<< FPU_FPCCR_LSPACT_Pos*/)              /*!< FPCCR: Lazy state preservation active bit Mask */

+

+/* Floating-Point Context Address Register */

+#define FPU_FPCAR_ADDRESS_Pos               3                                             /*!< FPCAR: ADDRESS bit Position */

+#define FPU_FPCAR_ADDRESS_Msk              (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos)        /*!< FPCAR: ADDRESS bit Mask */

+

+/* Floating-Point Default Status Control Register */

+#define FPU_FPDSCR_AHP_Pos                 26                                             /*!< FPDSCR: AHP bit Position */

+#define FPU_FPDSCR_AHP_Msk                 (1UL << FPU_FPDSCR_AHP_Pos)                    /*!< FPDSCR: AHP bit Mask */

+

+#define FPU_FPDSCR_DN_Pos                  25                                             /*!< FPDSCR: DN bit Position */

+#define FPU_FPDSCR_DN_Msk                  (1UL << FPU_FPDSCR_DN_Pos)                     /*!< FPDSCR: DN bit Mask */

+

+#define FPU_FPDSCR_FZ_Pos                  24                                             /*!< FPDSCR: FZ bit Position */

+#define FPU_FPDSCR_FZ_Msk                  (1UL << FPU_FPDSCR_FZ_Pos)                     /*!< FPDSCR: FZ bit Mask */

+

+#define FPU_FPDSCR_RMode_Pos               22                                             /*!< FPDSCR: RMode bit Position */

+#define FPU_FPDSCR_RMode_Msk               (3UL << FPU_FPDSCR_RMode_Pos)                  /*!< FPDSCR: RMode bit Mask */

+

+/* Media and FP Feature Register 0 */

+#define FPU_MVFR0_FP_rounding_modes_Pos    28                                             /*!< MVFR0: FP rounding modes bits Position */

+#define FPU_MVFR0_FP_rounding_modes_Msk    (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos)     /*!< MVFR0: FP rounding modes bits Mask */

+

+#define FPU_MVFR0_Short_vectors_Pos        24                                             /*!< MVFR0: Short vectors bits Position */

+#define FPU_MVFR0_Short_vectors_Msk        (0xFUL << FPU_MVFR0_Short_vectors_Pos)         /*!< MVFR0: Short vectors bits Mask */

+

+#define FPU_MVFR0_Square_root_Pos          20                                             /*!< MVFR0: Square root bits Position */

+#define FPU_MVFR0_Square_root_Msk          (0xFUL << FPU_MVFR0_Square_root_Pos)           /*!< MVFR0: Square root bits Mask */

+

+#define FPU_MVFR0_Divide_Pos               16                                             /*!< MVFR0: Divide bits Position */

+#define FPU_MVFR0_Divide_Msk               (0xFUL << FPU_MVFR0_Divide_Pos)                /*!< MVFR0: Divide bits Mask */

+

+#define FPU_MVFR0_FP_excep_trapping_Pos    12                                             /*!< MVFR0: FP exception trapping bits Position */

+#define FPU_MVFR0_FP_excep_trapping_Msk    (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos)     /*!< MVFR0: FP exception trapping bits Mask */

+

+#define FPU_MVFR0_Double_precision_Pos      8                                             /*!< MVFR0: Double-precision bits Position */

+#define FPU_MVFR0_Double_precision_Msk     (0xFUL << FPU_MVFR0_Double_precision_Pos)      /*!< MVFR0: Double-precision bits Mask */

+

+#define FPU_MVFR0_Single_precision_Pos      4                                             /*!< MVFR0: Single-precision bits Position */

+#define FPU_MVFR0_Single_precision_Msk     (0xFUL << FPU_MVFR0_Single_precision_Pos)      /*!< MVFR0: Single-precision bits Mask */

+

+#define FPU_MVFR0_A_SIMD_registers_Pos      0                                             /*!< MVFR0: A_SIMD registers bits Position */

+#define FPU_MVFR0_A_SIMD_registers_Msk     (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/)  /*!< MVFR0: A_SIMD registers bits Mask */

+

+/* Media and FP Feature Register 1 */

+#define FPU_MVFR1_FP_fused_MAC_Pos         28                                             /*!< MVFR1: FP fused MAC bits Position */

+#define FPU_MVFR1_FP_fused_MAC_Msk         (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos)          /*!< MVFR1: FP fused MAC bits Mask */

+

+#define FPU_MVFR1_FP_HPFP_Pos              24                                             /*!< MVFR1: FP HPFP bits Position */

+#define FPU_MVFR1_FP_HPFP_Msk              (0xFUL << FPU_MVFR1_FP_HPFP_Pos)               /*!< MVFR1: FP HPFP bits Mask */

+

+#define FPU_MVFR1_D_NaN_mode_Pos            4                                             /*!< MVFR1: D_NaN mode bits Position */

+#define FPU_MVFR1_D_NaN_mode_Msk           (0xFUL << FPU_MVFR1_D_NaN_mode_Pos)            /*!< MVFR1: D_NaN mode bits Mask */

+

+#define FPU_MVFR1_FtZ_mode_Pos              0                                             /*!< MVFR1: FtZ mode bits Position */

+#define FPU_MVFR1_FtZ_mode_Msk             (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/)          /*!< MVFR1: FtZ mode bits Mask */

+

+/*@} end of group CMSIS_FPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Type definitions for the Core Debug Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Core Debug Register (CoreDebug).

+ */

+typedef struct

+{

+  __IO uint32_t DHCSR;                   /*!< Offset: 0x000 (R/W)  Debug Halting Control and Status Register    */

+  __O  uint32_t DCRSR;                   /*!< Offset: 0x004 ( /W)  Debug Core Register Selector Register        */

+  __IO uint32_t DCRDR;                   /*!< Offset: 0x008 (R/W)  Debug Core Register Data Register            */

+  __IO uint32_t DEMCR;                   /*!< Offset: 0x00C (R/W)  Debug Exception and Monitor Control Register */

+} CoreDebug_Type;

+

+/* Debug Halting Control and Status Register */

+#define CoreDebug_DHCSR_DBGKEY_Pos         16                                             /*!< CoreDebug DHCSR: DBGKEY Position */

+#define CoreDebug_DHCSR_DBGKEY_Msk         (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos)       /*!< CoreDebug DHCSR: DBGKEY Mask */

+

+#define CoreDebug_DHCSR_S_RESET_ST_Pos     25                                             /*!< CoreDebug DHCSR: S_RESET_ST Position */

+#define CoreDebug_DHCSR_S_RESET_ST_Msk     (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos)        /*!< CoreDebug DHCSR: S_RESET_ST Mask */

+

+#define CoreDebug_DHCSR_S_RETIRE_ST_Pos    24                                             /*!< CoreDebug DHCSR: S_RETIRE_ST Position */

+#define CoreDebug_DHCSR_S_RETIRE_ST_Msk    (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos)       /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */

+

+#define CoreDebug_DHCSR_S_LOCKUP_Pos       19                                             /*!< CoreDebug DHCSR: S_LOCKUP Position */

+#define CoreDebug_DHCSR_S_LOCKUP_Msk       (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos)          /*!< CoreDebug DHCSR: S_LOCKUP Mask */

+

+#define CoreDebug_DHCSR_S_SLEEP_Pos        18                                             /*!< CoreDebug DHCSR: S_SLEEP Position */

+#define CoreDebug_DHCSR_S_SLEEP_Msk        (1UL << CoreDebug_DHCSR_S_SLEEP_Pos)           /*!< CoreDebug DHCSR: S_SLEEP Mask */

+

+#define CoreDebug_DHCSR_S_HALT_Pos         17                                             /*!< CoreDebug DHCSR: S_HALT Position */

+#define CoreDebug_DHCSR_S_HALT_Msk         (1UL << CoreDebug_DHCSR_S_HALT_Pos)            /*!< CoreDebug DHCSR: S_HALT Mask */

+

+#define CoreDebug_DHCSR_S_REGRDY_Pos       16                                             /*!< CoreDebug DHCSR: S_REGRDY Position */

+#define CoreDebug_DHCSR_S_REGRDY_Msk       (1UL << CoreDebug_DHCSR_S_REGRDY_Pos)          /*!< CoreDebug DHCSR: S_REGRDY Mask */

+

+#define CoreDebug_DHCSR_C_SNAPSTALL_Pos     5                                             /*!< CoreDebug DHCSR: C_SNAPSTALL Position */

+#define CoreDebug_DHCSR_C_SNAPSTALL_Msk    (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos)       /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */

+

+#define CoreDebug_DHCSR_C_MASKINTS_Pos      3                                             /*!< CoreDebug DHCSR: C_MASKINTS Position */

+#define CoreDebug_DHCSR_C_MASKINTS_Msk     (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos)        /*!< CoreDebug DHCSR: C_MASKINTS Mask */

+

+#define CoreDebug_DHCSR_C_STEP_Pos          2                                             /*!< CoreDebug DHCSR: C_STEP Position */

+#define CoreDebug_DHCSR_C_STEP_Msk         (1UL << CoreDebug_DHCSR_C_STEP_Pos)            /*!< CoreDebug DHCSR: C_STEP Mask */

+

+#define CoreDebug_DHCSR_C_HALT_Pos          1                                             /*!< CoreDebug DHCSR: C_HALT Position */

+#define CoreDebug_DHCSR_C_HALT_Msk         (1UL << CoreDebug_DHCSR_C_HALT_Pos)            /*!< CoreDebug DHCSR: C_HALT Mask */

+

+#define CoreDebug_DHCSR_C_DEBUGEN_Pos       0                                             /*!< CoreDebug DHCSR: C_DEBUGEN Position */

+#define CoreDebug_DHCSR_C_DEBUGEN_Msk      (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/)     /*!< CoreDebug DHCSR: C_DEBUGEN Mask */

+

+/* Debug Core Register Selector Register */

+#define CoreDebug_DCRSR_REGWnR_Pos         16                                             /*!< CoreDebug DCRSR: REGWnR Position */

+#define CoreDebug_DCRSR_REGWnR_Msk         (1UL << CoreDebug_DCRSR_REGWnR_Pos)            /*!< CoreDebug DCRSR: REGWnR Mask */

+

+#define CoreDebug_DCRSR_REGSEL_Pos          0                                             /*!< CoreDebug DCRSR: REGSEL Position */

+#define CoreDebug_DCRSR_REGSEL_Msk         (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/)     /*!< CoreDebug DCRSR: REGSEL Mask */

+

+/* Debug Exception and Monitor Control Register */

+#define CoreDebug_DEMCR_TRCENA_Pos         24                                             /*!< CoreDebug DEMCR: TRCENA Position */

+#define CoreDebug_DEMCR_TRCENA_Msk         (1UL << CoreDebug_DEMCR_TRCENA_Pos)            /*!< CoreDebug DEMCR: TRCENA Mask */

+

+#define CoreDebug_DEMCR_MON_REQ_Pos        19                                             /*!< CoreDebug DEMCR: MON_REQ Position */

+#define CoreDebug_DEMCR_MON_REQ_Msk        (1UL << CoreDebug_DEMCR_MON_REQ_Pos)           /*!< CoreDebug DEMCR: MON_REQ Mask */

+

+#define CoreDebug_DEMCR_MON_STEP_Pos       18                                             /*!< CoreDebug DEMCR: MON_STEP Position */

+#define CoreDebug_DEMCR_MON_STEP_Msk       (1UL << CoreDebug_DEMCR_MON_STEP_Pos)          /*!< CoreDebug DEMCR: MON_STEP Mask */

+

+#define CoreDebug_DEMCR_MON_PEND_Pos       17                                             /*!< CoreDebug DEMCR: MON_PEND Position */

+#define CoreDebug_DEMCR_MON_PEND_Msk       (1UL << CoreDebug_DEMCR_MON_PEND_Pos)          /*!< CoreDebug DEMCR: MON_PEND Mask */

+

+#define CoreDebug_DEMCR_MON_EN_Pos         16                                             /*!< CoreDebug DEMCR: MON_EN Position */

+#define CoreDebug_DEMCR_MON_EN_Msk         (1UL << CoreDebug_DEMCR_MON_EN_Pos)            /*!< CoreDebug DEMCR: MON_EN Mask */

+

+#define CoreDebug_DEMCR_VC_HARDERR_Pos     10                                             /*!< CoreDebug DEMCR: VC_HARDERR Position */

+#define CoreDebug_DEMCR_VC_HARDERR_Msk     (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos)        /*!< CoreDebug DEMCR: VC_HARDERR Mask */

+

+#define CoreDebug_DEMCR_VC_INTERR_Pos       9                                             /*!< CoreDebug DEMCR: VC_INTERR Position */

+#define CoreDebug_DEMCR_VC_INTERR_Msk      (1UL << CoreDebug_DEMCR_VC_INTERR_Pos)         /*!< CoreDebug DEMCR: VC_INTERR Mask */

+

+#define CoreDebug_DEMCR_VC_BUSERR_Pos       8                                             /*!< CoreDebug DEMCR: VC_BUSERR Position */

+#define CoreDebug_DEMCR_VC_BUSERR_Msk      (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos)         /*!< CoreDebug DEMCR: VC_BUSERR Mask */

+

+#define CoreDebug_DEMCR_VC_STATERR_Pos      7                                             /*!< CoreDebug DEMCR: VC_STATERR Position */

+#define CoreDebug_DEMCR_VC_STATERR_Msk     (1UL << CoreDebug_DEMCR_VC_STATERR_Pos)        /*!< CoreDebug DEMCR: VC_STATERR Mask */

+

+#define CoreDebug_DEMCR_VC_CHKERR_Pos       6                                             /*!< CoreDebug DEMCR: VC_CHKERR Position */

+#define CoreDebug_DEMCR_VC_CHKERR_Msk      (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos)         /*!< CoreDebug DEMCR: VC_CHKERR Mask */

+

+#define CoreDebug_DEMCR_VC_NOCPERR_Pos      5                                             /*!< CoreDebug DEMCR: VC_NOCPERR Position */

+#define CoreDebug_DEMCR_VC_NOCPERR_Msk     (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos)        /*!< CoreDebug DEMCR: VC_NOCPERR Mask */

+

+#define CoreDebug_DEMCR_VC_MMERR_Pos        4                                             /*!< CoreDebug DEMCR: VC_MMERR Position */

+#define CoreDebug_DEMCR_VC_MMERR_Msk       (1UL << CoreDebug_DEMCR_VC_MMERR_Pos)          /*!< CoreDebug DEMCR: VC_MMERR Mask */

+

+#define CoreDebug_DEMCR_VC_CORERESET_Pos    0                                             /*!< CoreDebug DEMCR: VC_CORERESET Position */

+#define CoreDebug_DEMCR_VC_CORERESET_Msk   (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/)  /*!< CoreDebug DEMCR: VC_CORERESET Mask */

+

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M4 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address  */

+#define ITM_BASE            (0xE0000000UL)                            /*!< ITM Base Address                   */

+#define DWT_BASE            (0xE0001000UL)                            /*!< DWT Base Address                   */

+#define TPI_BASE            (0xE0040000UL)                            /*!< TPI Base Address                   */

+#define CoreDebug_BASE      (0xE000EDF0UL)                            /*!< Core Debug Base Address            */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address               */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                  */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address  */

+

+#define SCnSCB              ((SCnSCB_Type    *)     SCS_BASE      )   /*!< System control Register not in SCB */

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+#define ITM                 ((ITM_Type       *)     ITM_BASE      )   /*!< ITM configuration struct           */

+#define DWT                 ((DWT_Type       *)     DWT_BASE      )   /*!< DWT configuration struct           */

+#define TPI                 ((TPI_Type       *)     TPI_BASE      )   /*!< TPI configuration struct           */

+#define CoreDebug           ((CoreDebug_Type *)     CoreDebug_BASE)   /*!< Core Debug configuration struct    */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+#if (__FPU_PRESENT == 1)

+  #define FPU_BASE          (SCS_BASE +  0x0F30UL)                    /*!< Floating Point Unit                */

+  #define FPU               ((FPU_Type       *)     FPU_BASE      )   /*!< Floating Point Unit                */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Debug Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/** \brief  Set Priority Grouping

+

+  The function sets the priority grouping field using the required unlock sequence.

+  The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field.

+  Only values from 0..7 are used.

+  In case of a conflict between priority grouping and available

+  priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]      PriorityGroup  Priority grouping field.

+ */

+__STATIC_INLINE void NVIC_SetPriorityGrouping(uint32_t PriorityGroup)

+{

+  uint32_t reg_value;

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);             /* only values 0..7 are used          */

+

+  reg_value  =  SCB->AIRCR;                                                   /* read old register configuration    */

+  reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk));             /* clear bits to change               */

+  reg_value  =  (reg_value                                   |

+                ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                (PriorityGroupTmp << 8)                       );              /* Insert write key and priorty group */

+  SCB->AIRCR =  reg_value;

+}

+

+

+/** \brief  Get Priority Grouping

+

+  The function reads the priority grouping field from the NVIC Interrupt Controller.

+

+    \return                Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field).

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriorityGrouping(void)

+{

+  return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos));

+}

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Active Interrupt

+

+    The function reads the active register in NVIC and returns the active bit.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not active.

+    \return             1  Interrupt status is active.

+ */

+__STATIC_INLINE uint32_t NVIC_GetActive(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->IABR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)IRQn < 0) {

+    SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+  else {

+    NVIC->IP[((uint32_t)(int32_t)IRQn)]               = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)IRQn < 0) {

+    return(((uint32_t)SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return(((uint32_t)NVIC->IP[((uint32_t)(int32_t)IRQn)]               >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  Encode Priority

+

+    The function encodes the priority for an interrupt with the given priority group,

+    preemptive priority value, and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]     PriorityGroup  Used priority group.

+    \param [in]   PreemptPriority  Preemptive priority value (starting from 0).

+    \param [in]       SubPriority  Subpriority value (starting from 0).

+    \return                        Encoded priority. Value can be used in the function \ref NVIC_SetPriority().

+ */

+__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  return (

+           ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |

+           ((SubPriority     & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL)))

+         );

+}

+

+

+/** \brief  Decode Priority

+

+    The function decodes an interrupt priority value with a given priority group to

+    preemptive priority value and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.

+

+    \param [in]         Priority   Priority value, which can be retrieved with the function \ref NVIC_GetPriority().

+    \param [in]     PriorityGroup  Used priority group.

+    \param [out] pPreemptPriority  Preemptive priority value (starting from 0).

+    \param [out]     pSubPriority  Subpriority value (starting from 0).

+ */

+__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);

+  *pSubPriority     = (Priority                   ) & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL);

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                          /* Ensure all outstanding memory accesses included

+                                                                       buffered write are completed before reset */

+  SCB->AIRCR  = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos)    |

+                           (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) |

+                            SCB_AIRCR_SYSRESETREQ_Msk    );         /* Keep priority group unchanged */

+  __DSB();                                                          /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                             /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) { return (1UL); }    /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+/* ##################################### Debug In/Output function ########################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_core_DebugFunctions ITM Functions

+    \brief   Functions that access the ITM debug interface.

+  @{

+ */

+

+extern volatile int32_t ITM_RxBuffer;                    /*!< External variable to receive characters.                         */

+#define                 ITM_RXBUFFER_EMPTY    0x5AA55AA5 /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */

+

+

+/** \brief  ITM Send Character

+

+    The function transmits a character via the ITM channel 0, and

+    \li Just returns when no debugger is connected that has booked the output.

+    \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.

+

+    \param [in]     ch  Character to transmit.

+

+    \returns            Character to transmit.

+ */

+__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch)

+{

+  if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) &&      /* ITM enabled */

+      ((ITM->TER & 1UL               ) != 0UL)   )     /* ITM Port #0 enabled */

+  {

+    while (ITM->PORT[0].u32 == 0UL) { __NOP(); }

+    ITM->PORT[0].u8 = (uint8_t)ch;

+  }

+  return (ch);

+}

+

+

+/** \brief  ITM Receive Character

+

+    The function inputs a character via the external variable \ref ITM_RxBuffer.

+

+    \return             Received character.

+    \return         -1  No character pending.

+ */

+__STATIC_INLINE int32_t ITM_ReceiveChar (void) {

+  int32_t ch = -1;                           /* no character available */

+

+  if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) {

+    ch = ITM_RxBuffer;

+    ITM_RxBuffer = ITM_RXBUFFER_EMPTY;       /* ready for next character */

+  }

+

+  return (ch);

+}

+

+

+/** \brief  ITM Check Character

+

+    The function checks whether a character is pending for reading in the variable \ref ITM_RxBuffer.

+

+    \return          0  No character available.

+    \return          1  Character available.

+ */

+__STATIC_INLINE int32_t ITM_CheckChar (void) {

+

+  if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) {

+    return (0);                                 /* no character available */

+  } else {

+    return (1);                                 /*    character available */

+  }

+}

+

+/*@} end of CMSIS_core_DebugFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM4_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4_simd.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4_simd.h
new file mode 100644
index 0000000..3bc7906
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm4_simd.h
@@ -0,0 +1,649 @@
+/**************************************************************************//**
+ * @file     core_cm4_simd.h
+ * @brief    CMSIS Cortex-M4 SIMD Header File
+ * @version  V3.01
+ * @date     06. March 2012
+ *
+ * @note
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * @par
+ * ARM Limited (ARM) is supplying this software for use with Cortex-M
+ * processor based microcontrollers.  This file can be freely distributed
+ * within development tools that are supporting such ARM based processors.
+ *
+ * @par
+ * THIS SOFTWARE IS PROVIDED "AS IS".  NO WARRANTIES, WHETHER EXPRESS, IMPLIED
+ * OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE APPLY TO THIS SOFTWARE.
+ * ARM SHALL NOT, IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
+ *
+ ******************************************************************************/
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifndef __CORE_CM4_SIMD_H
+#define __CORE_CM4_SIMD_H
+
+
+/*******************************************************************************
+ *                Hardware Abstraction Layer
+ ******************************************************************************/
+
+
+/* ###################  Compiler specific Intrinsics  ########################### */
+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
+  Access to dedicated SIMD instructions
+  @{
+*/
+
+#if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
+/* ARM armcc specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#define __SADD8                           __sadd8
+#define __QADD8                           __qadd8
+#define __SHADD8                          __shadd8
+#define __UADD8                           __uadd8
+#define __UQADD8                          __uqadd8
+#define __UHADD8                          __uhadd8
+#define __SSUB8                           __ssub8
+#define __QSUB8                           __qsub8
+#define __SHSUB8                          __shsub8
+#define __USUB8                           __usub8
+#define __UQSUB8                          __uqsub8
+#define __UHSUB8                          __uhsub8
+#define __SADD16                          __sadd16
+#define __QADD16                          __qadd16
+#define __SHADD16                         __shadd16
+#define __UADD16                          __uadd16
+#define __UQADD16                         __uqadd16
+#define __UHADD16                         __uhadd16
+#define __SSUB16                          __ssub16
+#define __QSUB16                          __qsub16
+#define __SHSUB16                         __shsub16
+#define __USUB16                          __usub16
+#define __UQSUB16                         __uqsub16
+#define __UHSUB16                         __uhsub16
+#define __SASX                            __sasx
+#define __QASX                            __qasx
+#define __SHASX                           __shasx
+#define __UASX                            __uasx
+#define __UQASX                           __uqasx
+#define __UHASX                           __uhasx
+#define __SSAX                            __ssax
+#define __QSAX                            __qsax
+#define __SHSAX                           __shsax
+#define __USAX                            __usax
+#define __UQSAX                           __uqsax
+#define __UHSAX                           __uhsax
+#define __USAD8                           __usad8
+#define __USADA8                          __usada8
+#define __SSAT16                          __ssat16
+#define __USAT16                          __usat16
+#define __UXTB16                          __uxtb16
+#define __UXTAB16                         __uxtab16
+#define __SXTB16                          __sxtb16
+#define __SXTAB16                         __sxtab16
+#define __SMUAD                           __smuad
+#define __SMUADX                          __smuadx
+#define __SMLAD                           __smlad
+#define __SMLADX                          __smladx
+#define __SMLALD                          __smlald
+#define __SMLALDX                         __smlaldx
+#define __SMUSD                           __smusd
+#define __SMUSDX                          __smusdx
+#define __SMLSD                           __smlsd
+#define __SMLSDX                          __smlsdx
+#define __SMLSLD                          __smlsld
+#define __SMLSLDX                         __smlsldx
+#define __SEL                             __sel
+#define __QADD                            __qadd
+#define __QSUB                            __qsub
+
+#define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
+                                           ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
+
+#define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
+                                           ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
+
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
+/* IAR iccarm specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#include <cmsis_iar.h>
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
+/* TI CCS specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#include <cmsis_ccs.h>
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
+/* GNU gcc specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+#define __SSAT16(ARG1,ARG2) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+#define __USAT16(ARG1,ARG2) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1); \
+  __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
+  __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
+{
+  uint32_t result;
+
+  __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
+{
+  uint32_t result;
+
+  __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+#define __SMLALD(ARG1,ARG2,ARG3) \
+({ \
+  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
+  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+#define __SMLALDX(ARG1,ARG2,ARG3) \
+({ \
+  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
+  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+  uint32_t result;
+
+  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+  return(result);
+}
+
+#define __SMLSLD(ARG1,ARG2,ARG3) \
+({ \
+  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
+  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+#define __SMLSLDX(ARG1,ARG2,ARG3) \
+({ \
+  uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
+  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+  (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
+{
+  uint32_t result;
+
+  __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+  return(result);
+}
+
+#define __PKHBT(ARG1,ARG2,ARG3) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+  __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
+  __RES; \
+ })
+
+#define __PKHTB(ARG1,ARG2,ARG3) \
+({                          \
+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+  if (ARG3 == 0) \
+    __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
+  else \
+    __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
+  __RES; \
+ })
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
+/* TASKING carm specific functions */
+
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+/* not yet supported */
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+#endif
+
+/*@} end of group CMSIS_SIMD_intrinsics */
+
+
+#endif /* __CORE_CM4_SIMD_H */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm7.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm7.h
new file mode 100644
index 0000000..cb19b9f
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cm7.h
@@ -0,0 +1,2397 @@
+/**************************************************************************//**

+ * @file     core_cm7.h

+ * @brief    CMSIS Cortex-M7 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CM7_H_GENERIC

+#define __CORE_CM7_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup Cortex_M7

+  @{

+ */

+

+/*  CMSIS CM7 definitions */

+#define __CM7_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version   */

+#define __CM7_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version    */

+#define __CM7_CMSIS_VERSION       ((__CM7_CMSIS_VERSION_MAIN << 16) | \

+                                    __CM7_CMSIS_VERSION_SUB          )     /*!< CMSIS HAL version number         */

+

+#define __CORTEX_M                (0x07)                                   /*!< Cortex-M Core                    */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions.

+*/

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI_VFP_SUPPORT__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #if (__FPU_PRESENT == 1)

+      #define __FPU_USED       1

+    #else

+      #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+      #define __FPU_USED       0

+    #endif

+  #else

+    #define __FPU_USED         0

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+#include <core_cmSimd.h>                 /* Compiler specific SIMD Intrinsics               */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM7_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_CM7_H_DEPENDANT

+#define __CORE_CM7_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __CM7_REV

+    #define __CM7_REV               0x0000

+    #warning "__CM7_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __FPU_PRESENT

+    #define __FPU_PRESENT             0

+    #warning "__FPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __ICACHE_PRESENT

+    #define __ICACHE_PRESENT          0

+    #warning "__ICACHE_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __DCACHE_PRESENT

+    #define __DCACHE_PRESENT          0

+    #warning "__DCACHE_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __DTCM_PRESENT

+    #define __DTCM_PRESENT            0

+    #warning "__DTCM_PRESENT        not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          3

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group Cortex_M7 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core Debug Register

+  - Core MPU Register

+  - Core FPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:16;              /*!< bit:  0..15  Reserved                           */

+    uint32_t GE:4;                       /*!< bit: 16..19  Greater than or Equal flags        */

+    uint32_t _reserved1:7;               /*!< bit: 20..26  Reserved                           */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+#define APSR_Q_Pos                         27                                             /*!< APSR: Q Position */

+#define APSR_Q_Msk                         (1UL << APSR_Q_Pos)                            /*!< APSR: Q Mask */

+

+#define APSR_GE_Pos                        16                                             /*!< APSR: GE Position */

+#define APSR_GE_Msk                        (0xFUL << APSR_GE_Pos)                         /*!< APSR: GE Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:7;               /*!< bit:  9..15  Reserved                           */

+    uint32_t GE:4;                       /*!< bit: 16..19  Greater than or Equal flags        */

+    uint32_t _reserved1:4;               /*!< bit: 20..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t IT:2;                       /*!< bit: 25..26  saved IT state   (read 0)          */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_Q_Pos                         27                                             /*!< xPSR: Q Position */

+#define xPSR_Q_Msk                         (1UL << xPSR_Q_Pos)                            /*!< xPSR: Q Mask */

+

+#define xPSR_IT_Pos                        25                                             /*!< xPSR: IT Position */

+#define xPSR_IT_Msk                        (3UL << xPSR_IT_Pos)                           /*!< xPSR: IT Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_GE_Pos                        16                                             /*!< xPSR: GE Position */

+#define xPSR_GE_Msk                        (0xFUL << xPSR_GE_Pos)                         /*!< xPSR: GE Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t nPRIV:1;                    /*!< bit:      0  Execution privilege in Thread mode */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t FPCA:1;                     /*!< bit:      2  FP extension active flag           */

+    uint32_t _reserved0:29;              /*!< bit:  3..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_FPCA_Pos                    2                                             /*!< CONTROL: FPCA Position */

+#define CONTROL_FPCA_Msk                   (1UL << CONTROL_FPCA_Pos)                      /*!< CONTROL: FPCA Mask */

+

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+#define CONTROL_nPRIV_Pos                   0                                             /*!< CONTROL: nPRIV Position */

+#define CONTROL_nPRIV_Msk                  (1UL /*<< CONTROL_nPRIV_Pos*/)                 /*!< CONTROL: nPRIV Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[8];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[24];

+  __IO uint32_t ICER[8];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register         */

+       uint32_t RSERVED1[24];

+  __IO uint32_t ISPR[8];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register          */

+       uint32_t RESERVED2[24];

+  __IO uint32_t ICPR[8];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register        */

+       uint32_t RESERVED3[24];

+  __IO uint32_t IABR[8];                 /*!< Offset: 0x200 (R/W)  Interrupt Active bit Register           */

+       uint32_t RESERVED4[56];

+  __IO uint8_t  IP[240];                 /*!< Offset: 0x300 (R/W)  Interrupt Priority Register (8Bit wide) */

+       uint32_t RESERVED5[644];

+  __O  uint32_t STIR;                    /*!< Offset: 0xE00 ( /W)  Software Trigger Interrupt Register     */

+}  NVIC_Type;

+

+/* Software Triggered Interrupt Register Definitions */

+#define NVIC_STIR_INTID_Pos                 0                                          /*!< STIR: INTLINESNUM Position */

+#define NVIC_STIR_INTID_Msk                (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/)        /*!< STIR: INTLINESNUM Mask */

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+  __IO uint8_t  SHPR[12];                /*!< Offset: 0x018 (R/W)  System Handlers Priority Registers (4-7, 8-11, 12-15) */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+  __IO uint32_t CFSR;                    /*!< Offset: 0x028 (R/W)  Configurable Fault Status Register                    */

+  __IO uint32_t HFSR;                    /*!< Offset: 0x02C (R/W)  HardFault Status Register                             */

+  __IO uint32_t DFSR;                    /*!< Offset: 0x030 (R/W)  Debug Fault Status Register                           */

+  __IO uint32_t MMFAR;                   /*!< Offset: 0x034 (R/W)  MemManage Fault Address Register                      */

+  __IO uint32_t BFAR;                    /*!< Offset: 0x038 (R/W)  BusFault Address Register                             */

+  __IO uint32_t AFSR;                    /*!< Offset: 0x03C (R/W)  Auxiliary Fault Status Register                       */

+  __I  uint32_t ID_PFR[2];               /*!< Offset: 0x040 (R/ )  Processor Feature Register                            */

+  __I  uint32_t ID_DFR;                  /*!< Offset: 0x048 (R/ )  Debug Feature Register                                */

+  __I  uint32_t ID_AFR;                  /*!< Offset: 0x04C (R/ )  Auxiliary Feature Register                            */

+  __I  uint32_t ID_MFR[4];               /*!< Offset: 0x050 (R/ )  Memory Model Feature Register                         */

+  __I  uint32_t ID_ISAR[5];              /*!< Offset: 0x060 (R/ )  Instruction Set Attributes Register                   */

+       uint32_t RESERVED0[1];

+  __I  uint32_t CLIDR;                   /*!< Offset: 0x078 (R/ )  Cache Level ID register                               */

+  __I  uint32_t CTR;                     /*!< Offset: 0x07C (R/ )  Cache Type register                                   */

+  __I  uint32_t CCSIDR;                  /*!< Offset: 0x080 (R/ )  Cache Size ID Register                                */

+  __IO uint32_t CSSELR;                  /*!< Offset: 0x084 (R/W)  Cache Size Selection Register                         */

+  __IO uint32_t CPACR;                   /*!< Offset: 0x088 (R/W)  Coprocessor Access Control Register                   */

+       uint32_t RESERVED3[93];

+  __O  uint32_t STIR;                    /*!< Offset: 0x200 ( /W)  Software Triggered Interrupt Register                 */

+       uint32_t RESERVED4[15];

+  __I  uint32_t MVFR0;                   /*!< Offset: 0x240 (R/ )  Media and VFP Feature Register 0                      */

+  __I  uint32_t MVFR1;                   /*!< Offset: 0x244 (R/ )  Media and VFP Feature Register 1                      */

+  __I  uint32_t MVFR2;                   /*!< Offset: 0x248 (R/ )  Media and VFP Feature Register 1                      */

+       uint32_t RESERVED5[1];

+  __O  uint32_t ICIALLU;                 /*!< Offset: 0x250 ( /W)  I-Cache Invalidate All to PoU                         */

+       uint32_t RESERVED6[1];

+  __O  uint32_t ICIMVAU;                 /*!< Offset: 0x258 ( /W)  I-Cache Invalidate by MVA to PoU                      */

+  __O  uint32_t DCIMVAC;                 /*!< Offset: 0x25C ( /W)  D-Cache Invalidate by MVA to PoC                      */

+  __O  uint32_t DCISW;                   /*!< Offset: 0x260 ( /W)  D-Cache Invalidate by Set-way                         */

+  __O  uint32_t DCCMVAU;                 /*!< Offset: 0x264 ( /W)  D-Cache Clean by MVA to PoU                           */

+  __O  uint32_t DCCMVAC;                 /*!< Offset: 0x268 ( /W)  D-Cache Clean by MVA to PoC                           */

+  __O  uint32_t DCCSW;                   /*!< Offset: 0x26C ( /W)  D-Cache Clean by Set-way                              */

+  __O  uint32_t DCCIMVAC;                /*!< Offset: 0x270 ( /W)  D-Cache Clean and Invalidate by MVA to PoC            */

+  __O  uint32_t DCCISW;                  /*!< Offset: 0x274 ( /W)  D-Cache Clean and Invalidate by Set-way               */

+       uint32_t RESERVED7[6];

+  __IO uint32_t ITCMCR;                  /*!< Offset: 0x290 (R/W)  Instruction Tightly-Coupled Memory Control Register   */

+  __IO uint32_t DTCMCR;                  /*!< Offset: 0x294 (R/W)  Data Tightly-Coupled Memory Control Registers         */

+  __IO uint32_t AHBPCR;                  /*!< Offset: 0x298 (R/W)  AHBP Control Register                                 */

+  __IO uint32_t CACR;                    /*!< Offset: 0x29C (R/W)  L1 Cache Control Register                             */

+  __IO uint32_t AHBSCR;                  /*!< Offset: 0x2A0 (R/W)  AHB Slave Control Register                            */

+       uint32_t RESERVED8[1];

+  __IO uint32_t ABFSR;                   /*!< Offset: 0x2A8 (R/W)  Auxiliary Bus Fault Status Register                   */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_RETTOBASE_Pos             11                                             /*!< SCB ICSR: RETTOBASE Position */

+#define SCB_ICSR_RETTOBASE_Msk             (1UL << SCB_ICSR_RETTOBASE_Pos)                /*!< SCB ICSR: RETTOBASE Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Vector Table Offset Register Definitions */

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos)           /*!< SCB VTOR: TBLOFF Mask */

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_PRIGROUP_Pos              8                                             /*!< SCB AIRCR: PRIGROUP Position */

+#define SCB_AIRCR_PRIGROUP_Msk             (7UL << SCB_AIRCR_PRIGROUP_Pos)                /*!< SCB AIRCR: PRIGROUP Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+#define SCB_AIRCR_VECTRESET_Pos             0                                             /*!< SCB AIRCR: VECTRESET Position */

+#define SCB_AIRCR_VECTRESET_Msk            (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/)           /*!< SCB AIRCR: VECTRESET Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_BP_Pos                      18                                            /*!< SCB CCR: Branch prediction enable bit Position */

+#define SCB_CCR_BP_Msk                     (1UL << SCB_CCR_BP_Pos)                        /*!< SCB CCR: Branch prediction enable bit Mask */

+

+#define SCB_CCR_IC_Pos                      17                                            /*!< SCB CCR: Instruction cache enable bit Position */

+#define SCB_CCR_IC_Msk                     (1UL << SCB_CCR_IC_Pos)                        /*!< SCB CCR: Instruction cache enable bit Mask */

+

+#define SCB_CCR_DC_Pos                      16                                            /*!< SCB CCR: Cache enable bit Position */

+#define SCB_CCR_DC_Msk                     (1UL << SCB_CCR_DC_Pos)                        /*!< SCB CCR: Cache enable bit Mask */

+

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_BFHFNMIGN_Pos               8                                             /*!< SCB CCR: BFHFNMIGN Position */

+#define SCB_CCR_BFHFNMIGN_Msk              (1UL << SCB_CCR_BFHFNMIGN_Pos)                 /*!< SCB CCR: BFHFNMIGN Mask */

+

+#define SCB_CCR_DIV_0_TRP_Pos               4                                             /*!< SCB CCR: DIV_0_TRP Position */

+#define SCB_CCR_DIV_0_TRP_Msk              (1UL << SCB_CCR_DIV_0_TRP_Pos)                 /*!< SCB CCR: DIV_0_TRP Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+#define SCB_CCR_USERSETMPEND_Pos            1                                             /*!< SCB CCR: USERSETMPEND Position */

+#define SCB_CCR_USERSETMPEND_Msk           (1UL << SCB_CCR_USERSETMPEND_Pos)              /*!< SCB CCR: USERSETMPEND Mask */

+

+#define SCB_CCR_NONBASETHRDENA_Pos          0                                             /*!< SCB CCR: NONBASETHRDENA Position */

+#define SCB_CCR_NONBASETHRDENA_Msk         (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/)        /*!< SCB CCR: NONBASETHRDENA Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_USGFAULTENA_Pos          18                                             /*!< SCB SHCSR: USGFAULTENA Position */

+#define SCB_SHCSR_USGFAULTENA_Msk          (1UL << SCB_SHCSR_USGFAULTENA_Pos)             /*!< SCB SHCSR: USGFAULTENA Mask */

+

+#define SCB_SHCSR_BUSFAULTENA_Pos          17                                             /*!< SCB SHCSR: BUSFAULTENA Position */

+#define SCB_SHCSR_BUSFAULTENA_Msk          (1UL << SCB_SHCSR_BUSFAULTENA_Pos)             /*!< SCB SHCSR: BUSFAULTENA Mask */

+

+#define SCB_SHCSR_MEMFAULTENA_Pos          16                                             /*!< SCB SHCSR: MEMFAULTENA Position */

+#define SCB_SHCSR_MEMFAULTENA_Msk          (1UL << SCB_SHCSR_MEMFAULTENA_Pos)             /*!< SCB SHCSR: MEMFAULTENA Mask */

+

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+#define SCB_SHCSR_BUSFAULTPENDED_Pos       14                                             /*!< SCB SHCSR: BUSFAULTPENDED Position */

+#define SCB_SHCSR_BUSFAULTPENDED_Msk       (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos)          /*!< SCB SHCSR: BUSFAULTPENDED Mask */

+

+#define SCB_SHCSR_MEMFAULTPENDED_Pos       13                                             /*!< SCB SHCSR: MEMFAULTPENDED Position */

+#define SCB_SHCSR_MEMFAULTPENDED_Msk       (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos)          /*!< SCB SHCSR: MEMFAULTPENDED Mask */

+

+#define SCB_SHCSR_USGFAULTPENDED_Pos       12                                             /*!< SCB SHCSR: USGFAULTPENDED Position */

+#define SCB_SHCSR_USGFAULTPENDED_Msk       (1UL << SCB_SHCSR_USGFAULTPENDED_Pos)          /*!< SCB SHCSR: USGFAULTPENDED Mask */

+

+#define SCB_SHCSR_SYSTICKACT_Pos           11                                             /*!< SCB SHCSR: SYSTICKACT Position */

+#define SCB_SHCSR_SYSTICKACT_Msk           (1UL << SCB_SHCSR_SYSTICKACT_Pos)              /*!< SCB SHCSR: SYSTICKACT Mask */

+

+#define SCB_SHCSR_PENDSVACT_Pos            10                                             /*!< SCB SHCSR: PENDSVACT Position */

+#define SCB_SHCSR_PENDSVACT_Msk            (1UL << SCB_SHCSR_PENDSVACT_Pos)               /*!< SCB SHCSR: PENDSVACT Mask */

+

+#define SCB_SHCSR_MONITORACT_Pos            8                                             /*!< SCB SHCSR: MONITORACT Position */

+#define SCB_SHCSR_MONITORACT_Msk           (1UL << SCB_SHCSR_MONITORACT_Pos)              /*!< SCB SHCSR: MONITORACT Mask */

+

+#define SCB_SHCSR_SVCALLACT_Pos             7                                             /*!< SCB SHCSR: SVCALLACT Position */

+#define SCB_SHCSR_SVCALLACT_Msk            (1UL << SCB_SHCSR_SVCALLACT_Pos)               /*!< SCB SHCSR: SVCALLACT Mask */

+

+#define SCB_SHCSR_USGFAULTACT_Pos           3                                             /*!< SCB SHCSR: USGFAULTACT Position */

+#define SCB_SHCSR_USGFAULTACT_Msk          (1UL << SCB_SHCSR_USGFAULTACT_Pos)             /*!< SCB SHCSR: USGFAULTACT Mask */

+

+#define SCB_SHCSR_BUSFAULTACT_Pos           1                                             /*!< SCB SHCSR: BUSFAULTACT Position */

+#define SCB_SHCSR_BUSFAULTACT_Msk          (1UL << SCB_SHCSR_BUSFAULTACT_Pos)             /*!< SCB SHCSR: BUSFAULTACT Mask */

+

+#define SCB_SHCSR_MEMFAULTACT_Pos           0                                             /*!< SCB SHCSR: MEMFAULTACT Position */

+#define SCB_SHCSR_MEMFAULTACT_Msk          (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/)         /*!< SCB SHCSR: MEMFAULTACT Mask */

+

+/* SCB Configurable Fault Status Registers Definitions */

+#define SCB_CFSR_USGFAULTSR_Pos            16                                             /*!< SCB CFSR: Usage Fault Status Register Position */

+#define SCB_CFSR_USGFAULTSR_Msk            (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos)          /*!< SCB CFSR: Usage Fault Status Register Mask */

+

+#define SCB_CFSR_BUSFAULTSR_Pos             8                                             /*!< SCB CFSR: Bus Fault Status Register Position */

+#define SCB_CFSR_BUSFAULTSR_Msk            (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos)            /*!< SCB CFSR: Bus Fault Status Register Mask */

+

+#define SCB_CFSR_MEMFAULTSR_Pos             0                                             /*!< SCB CFSR: Memory Manage Fault Status Register Position */

+#define SCB_CFSR_MEMFAULTSR_Msk            (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/)        /*!< SCB CFSR: Memory Manage Fault Status Register Mask */

+

+/* SCB Hard Fault Status Registers Definitions */

+#define SCB_HFSR_DEBUGEVT_Pos              31                                             /*!< SCB HFSR: DEBUGEVT Position */

+#define SCB_HFSR_DEBUGEVT_Msk              (1UL << SCB_HFSR_DEBUGEVT_Pos)                 /*!< SCB HFSR: DEBUGEVT Mask */

+

+#define SCB_HFSR_FORCED_Pos                30                                             /*!< SCB HFSR: FORCED Position */

+#define SCB_HFSR_FORCED_Msk                (1UL << SCB_HFSR_FORCED_Pos)                   /*!< SCB HFSR: FORCED Mask */

+

+#define SCB_HFSR_VECTTBL_Pos                1                                             /*!< SCB HFSR: VECTTBL Position */

+#define SCB_HFSR_VECTTBL_Msk               (1UL << SCB_HFSR_VECTTBL_Pos)                  /*!< SCB HFSR: VECTTBL Mask */

+

+/* SCB Debug Fault Status Register Definitions */

+#define SCB_DFSR_EXTERNAL_Pos               4                                             /*!< SCB DFSR: EXTERNAL Position */

+#define SCB_DFSR_EXTERNAL_Msk              (1UL << SCB_DFSR_EXTERNAL_Pos)                 /*!< SCB DFSR: EXTERNAL Mask */

+

+#define SCB_DFSR_VCATCH_Pos                 3                                             /*!< SCB DFSR: VCATCH Position */

+#define SCB_DFSR_VCATCH_Msk                (1UL << SCB_DFSR_VCATCH_Pos)                   /*!< SCB DFSR: VCATCH Mask */

+

+#define SCB_DFSR_DWTTRAP_Pos                2                                             /*!< SCB DFSR: DWTTRAP Position */

+#define SCB_DFSR_DWTTRAP_Msk               (1UL << SCB_DFSR_DWTTRAP_Pos)                  /*!< SCB DFSR: DWTTRAP Mask */

+

+#define SCB_DFSR_BKPT_Pos                   1                                             /*!< SCB DFSR: BKPT Position */

+#define SCB_DFSR_BKPT_Msk                  (1UL << SCB_DFSR_BKPT_Pos)                     /*!< SCB DFSR: BKPT Mask */

+

+#define SCB_DFSR_HALTED_Pos                 0                                             /*!< SCB DFSR: HALTED Position */

+#define SCB_DFSR_HALTED_Msk                (1UL /*<< SCB_DFSR_HALTED_Pos*/)               /*!< SCB DFSR: HALTED Mask */

+

+/* Cache Level ID register */

+#define SCB_CLIDR_LOUU_Pos                 27                                             /*!< SCB CLIDR: LoUU Position */

+#define SCB_CLIDR_LOUU_Msk                 (7UL << SCB_CLIDR_LOUU_Pos)                    /*!< SCB CLIDR: LoUU Mask */

+

+#define SCB_CLIDR_LOC_Pos                  24                                             /*!< SCB CLIDR: LoC Position */

+#define SCB_CLIDR_LOC_Msk                  (7UL << SCB_CLIDR_FORMAT_Pos)                  /*!< SCB CLIDR: LoC Mask */

+

+/* Cache Type register */

+#define SCB_CTR_FORMAT_Pos                 29                                             /*!< SCB CTR: Format Position */

+#define SCB_CTR_FORMAT_Msk                 (7UL << SCB_CTR_FORMAT_Pos)                    /*!< SCB CTR: Format Mask */

+

+#define SCB_CTR_CWG_Pos                    24                                             /*!< SCB CTR: CWG Position */

+#define SCB_CTR_CWG_Msk                    (0xFUL << SCB_CTR_CWG_Pos)                     /*!< SCB CTR: CWG Mask */

+

+#define SCB_CTR_ERG_Pos                    20                                             /*!< SCB CTR: ERG Position */

+#define SCB_CTR_ERG_Msk                    (0xFUL << SCB_CTR_ERG_Pos)                     /*!< SCB CTR: ERG Mask */

+

+#define SCB_CTR_DMINLINE_Pos               16                                             /*!< SCB CTR: DminLine Position */

+#define SCB_CTR_DMINLINE_Msk               (0xFUL << SCB_CTR_DMINLINE_Pos)                /*!< SCB CTR: DminLine Mask */

+

+#define SCB_CTR_IMINLINE_Pos                0                                             /*!< SCB CTR: ImInLine Position */

+#define SCB_CTR_IMINLINE_Msk               (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/)            /*!< SCB CTR: ImInLine Mask */

+

+/* Cache Size ID Register */

+#define SCB_CCSIDR_WT_Pos                  31                                             /*!< SCB CCSIDR: WT Position */

+#define SCB_CCSIDR_WT_Msk                  (7UL << SCB_CCSIDR_WT_Pos)                     /*!< SCB CCSIDR: WT Mask */

+

+#define SCB_CCSIDR_WB_Pos                  30                                             /*!< SCB CCSIDR: WB Position */

+#define SCB_CCSIDR_WB_Msk                  (7UL << SCB_CCSIDR_WB_Pos)                     /*!< SCB CCSIDR: WB Mask */

+

+#define SCB_CCSIDR_RA_Pos                  29                                             /*!< SCB CCSIDR: RA Position */

+#define SCB_CCSIDR_RA_Msk                  (7UL << SCB_CCSIDR_RA_Pos)                     /*!< SCB CCSIDR: RA Mask */

+

+#define SCB_CCSIDR_WA_Pos                  28                                             /*!< SCB CCSIDR: WA Position */

+#define SCB_CCSIDR_WA_Msk                  (7UL << SCB_CCSIDR_WA_Pos)                     /*!< SCB CCSIDR: WA Mask */

+

+#define SCB_CCSIDR_NUMSETS_Pos             13                                             /*!< SCB CCSIDR: NumSets Position */

+#define SCB_CCSIDR_NUMSETS_Msk             (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos)           /*!< SCB CCSIDR: NumSets Mask */

+

+#define SCB_CCSIDR_ASSOCIATIVITY_Pos        3                                             /*!< SCB CCSIDR: Associativity Position */

+#define SCB_CCSIDR_ASSOCIATIVITY_Msk       (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos)      /*!< SCB CCSIDR: Associativity Mask */

+

+#define SCB_CCSIDR_LINESIZE_Pos             0                                             /*!< SCB CCSIDR: LineSize Position */

+#define SCB_CCSIDR_LINESIZE_Msk            (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/)           /*!< SCB CCSIDR: LineSize Mask */

+

+/* Cache Size Selection Register */

+#define SCB_CSSELR_LEVEL_Pos                1                                             /*!< SCB CSSELR: Level Position */

+#define SCB_CSSELR_LEVEL_Msk               (7UL << SCB_CSSELR_LEVEL_Pos)                  /*!< SCB CSSELR: Level Mask */

+

+#define SCB_CSSELR_IND_Pos                  0                                             /*!< SCB CSSELR: InD Position */

+#define SCB_CSSELR_IND_Msk                 (1UL /*<< SCB_CSSELR_IND_Pos*/)                /*!< SCB CSSELR: InD Mask */

+

+/* SCB Software Triggered Interrupt Register */

+#define SCB_STIR_INTID_Pos                  0                                             /*!< SCB STIR: INTID Position */

+#define SCB_STIR_INTID_Msk                 (0x1FFUL /*<< SCB_STIR_INTID_Pos*/)            /*!< SCB STIR: INTID Mask */

+

+/* Instruction Tightly-Coupled Memory Control Register*/

+#define SCB_ITCMCR_SZ_Pos                   3                                             /*!< SCB ITCMCR: SZ Position */

+#define SCB_ITCMCR_SZ_Msk                  (0xFUL << SCB_ITCMCR_SZ_Pos)                   /*!< SCB ITCMCR: SZ Mask */

+

+#define SCB_ITCMCR_RETEN_Pos                2                                             /*!< SCB ITCMCR: RETEN Position */

+#define SCB_ITCMCR_RETEN_Msk               (1UL << SCB_ITCMCR_RETEN_Pos)                  /*!< SCB ITCMCR: RETEN Mask */

+

+#define SCB_ITCMCR_RMW_Pos                  1                                             /*!< SCB ITCMCR: RMW Position */

+#define SCB_ITCMCR_RMW_Msk                 (1UL << SCB_ITCMCR_RMW_Pos)                    /*!< SCB ITCMCR: RMW Mask */

+

+#define SCB_ITCMCR_EN_Pos                   0                                             /*!< SCB ITCMCR: EN Position */

+#define SCB_ITCMCR_EN_Msk                  (1UL /*<< SCB_ITCMCR_EN_Pos*/)                 /*!< SCB ITCMCR: EN Mask */

+

+/* Data Tightly-Coupled Memory Control Registers */

+#define SCB_DTCMCR_SZ_Pos                   3                                             /*!< SCB DTCMCR: SZ Position */

+#define SCB_DTCMCR_SZ_Msk                  (0xFUL << SCB_DTCMCR_SZ_Pos)                   /*!< SCB DTCMCR: SZ Mask */

+

+#define SCB_DTCMCR_RETEN_Pos                2                                             /*!< SCB DTCMCR: RETEN Position */

+#define SCB_DTCMCR_RETEN_Msk               (1UL << SCB_DTCMCR_RETEN_Pos)                   /*!< SCB DTCMCR: RETEN Mask */

+

+#define SCB_DTCMCR_RMW_Pos                  1                                             /*!< SCB DTCMCR: RMW Position */

+#define SCB_DTCMCR_RMW_Msk                 (1UL << SCB_DTCMCR_RMW_Pos)                    /*!< SCB DTCMCR: RMW Mask */

+

+#define SCB_DTCMCR_EN_Pos                   0                                             /*!< SCB DTCMCR: EN Position */

+#define SCB_DTCMCR_EN_Msk                  (1UL /*<< SCB_DTCMCR_EN_Pos*/)                 /*!< SCB DTCMCR: EN Mask */

+

+/* AHBP Control Register */

+#define SCB_AHBPCR_SZ_Pos                   1                                             /*!< SCB AHBPCR: SZ Position */

+#define SCB_AHBPCR_SZ_Msk                  (7UL << SCB_AHBPCR_SZ_Pos)                     /*!< SCB AHBPCR: SZ Mask */

+

+#define SCB_AHBPCR_EN_Pos                   0                                             /*!< SCB AHBPCR: EN Position */

+#define SCB_AHBPCR_EN_Msk                  (1UL /*<< SCB_AHBPCR_EN_Pos*/)                 /*!< SCB AHBPCR: EN Mask */

+

+/* L1 Cache Control Register */

+#define SCB_CACR_FORCEWT_Pos                2                                             /*!< SCB CACR: FORCEWT Position */

+#define SCB_CACR_FORCEWT_Msk               (1UL << SCB_CACR_FORCEWT_Pos)                  /*!< SCB CACR: FORCEWT Mask */

+

+#define SCB_CACR_ECCEN_Pos                  1                                             /*!< SCB CACR: ECCEN Position */

+#define SCB_CACR_ECCEN_Msk                 (1UL << SCB_CACR_ECCEN_Pos)                    /*!< SCB CACR: ECCEN Mask */

+

+#define SCB_CACR_SIWT_Pos                   0                                             /*!< SCB CACR: SIWT Position */

+#define SCB_CACR_SIWT_Msk                  (1UL /*<< SCB_CACR_SIWT_Pos*/)                 /*!< SCB CACR: SIWT Mask */

+

+/* AHBS control register */

+#define SCB_AHBSCR_INITCOUNT_Pos           11                                             /*!< SCB AHBSCR: INITCOUNT Position */

+#define SCB_AHBSCR_INITCOUNT_Msk           (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos)           /*!< SCB AHBSCR: INITCOUNT Mask */

+

+#define SCB_AHBSCR_TPRI_Pos                 2                                             /*!< SCB AHBSCR: TPRI Position */

+#define SCB_AHBSCR_TPRI_Msk                (0x1FFUL << SCB_AHBPCR_TPRI_Pos)               /*!< SCB AHBSCR: TPRI Mask */

+

+#define SCB_AHBSCR_CTL_Pos                  0                                             /*!< SCB AHBSCR: CTL Position*/

+#define SCB_AHBSCR_CTL_Msk                 (3UL /*<< SCB_AHBPCR_CTL_Pos*/)                /*!< SCB AHBSCR: CTL Mask */

+

+/* Auxiliary Bus Fault Status Register */

+#define SCB_ABFSR_AXIMTYPE_Pos              8                                             /*!< SCB ABFSR: AXIMTYPE Position*/

+#define SCB_ABFSR_AXIMTYPE_Msk             (3UL << SCB_ABFSR_AXIMTYPE_Pos)                /*!< SCB ABFSR: AXIMTYPE Mask */

+

+#define SCB_ABFSR_EPPB_Pos                  4                                             /*!< SCB ABFSR: EPPB Position*/

+#define SCB_ABFSR_EPPB_Msk                 (1UL << SCB_ABFSR_EPPB_Pos)                    /*!< SCB ABFSR: EPPB Mask */

+

+#define SCB_ABFSR_AXIM_Pos                  3                                             /*!< SCB ABFSR: AXIM Position*/

+#define SCB_ABFSR_AXIM_Msk                 (1UL << SCB_ABFSR_AXIM_Pos)                    /*!< SCB ABFSR: AXIM Mask */

+

+#define SCB_ABFSR_AHBP_Pos                  2                                             /*!< SCB ABFSR: AHBP Position*/

+#define SCB_ABFSR_AHBP_Msk                 (1UL << SCB_ABFSR_AHBP_Pos)                    /*!< SCB ABFSR: AHBP Mask */

+

+#define SCB_ABFSR_DTCM_Pos                  1                                             /*!< SCB ABFSR: DTCM Position*/

+#define SCB_ABFSR_DTCM_Msk                 (1UL << SCB_ABFSR_DTCM_Pos)                    /*!< SCB ABFSR: DTCM Mask */

+

+#define SCB_ABFSR_ITCM_Pos                  0                                             /*!< SCB ABFSR: ITCM Position*/

+#define SCB_ABFSR_ITCM_Msk                 (1UL /*<< SCB_ABFSR_ITCM_Pos*/)                /*!< SCB ABFSR: ITCM Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)

+    \brief      Type definitions for the System Control and ID Register not in the SCB

+  @{

+ */

+

+/** \brief  Structure type to access the System Control and ID Register not in the SCB.

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __I  uint32_t ICTR;                    /*!< Offset: 0x004 (R/ )  Interrupt Controller Type Register      */

+  __IO uint32_t ACTLR;                   /*!< Offset: 0x008 (R/W)  Auxiliary Control Register              */

+} SCnSCB_Type;

+

+/* Interrupt Controller Type Register Definitions */

+#define SCnSCB_ICTR_INTLINESNUM_Pos         0                                          /*!< ICTR: INTLINESNUM Position */

+#define SCnSCB_ICTR_INTLINESNUM_Msk        (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/)  /*!< ICTR: INTLINESNUM Mask */

+

+/* Auxiliary Control Register Definitions */

+#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos    12                                          /*!< ACTLR: DISITMATBFLUSH Position */

+#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk    (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos)    /*!< ACTLR: DISITMATBFLUSH Mask */

+

+#define SCnSCB_ACTLR_DISRAMODE_Pos         11                                          /*!< ACTLR: DISRAMODE Position */

+#define SCnSCB_ACTLR_DISRAMODE_Msk         (1UL << SCnSCB_ACTLR_DISRAMODE_Pos)         /*!< ACTLR: DISRAMODE Mask */

+

+#define SCnSCB_ACTLR_FPEXCODIS_Pos         10                                          /*!< ACTLR: FPEXCODIS Position */

+#define SCnSCB_ACTLR_FPEXCODIS_Msk         (1UL << SCnSCB_ACTLR_FPEXCODIS_Pos)         /*!< ACTLR: FPEXCODIS Mask */

+

+#define SCnSCB_ACTLR_DISFOLD_Pos            2                                          /*!< ACTLR: DISFOLD Position */

+#define SCnSCB_ACTLR_DISFOLD_Msk           (1UL << SCnSCB_ACTLR_DISFOLD_Pos)           /*!< ACTLR: DISFOLD Mask */

+

+#define SCnSCB_ACTLR_DISMCYCINT_Pos         0                                          /*!< ACTLR: DISMCYCINT Position */

+#define SCnSCB_ACTLR_DISMCYCINT_Msk        (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/)    /*!< ACTLR: DISMCYCINT Mask */

+

+/*@} end of group CMSIS_SCnotSCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_ITM     Instrumentation Trace Macrocell (ITM)

+    \brief      Type definitions for the Instrumentation Trace Macrocell (ITM)

+  @{

+ */

+

+/** \brief  Structure type to access the Instrumentation Trace Macrocell Register (ITM).

+ */

+typedef struct

+{

+  __O  union

+  {

+    __O  uint8_t    u8;                  /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 8-bit                   */

+    __O  uint16_t   u16;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 16-bit                  */

+    __O  uint32_t   u32;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 32-bit                  */

+  }  PORT [32];                          /*!< Offset: 0x000 ( /W)  ITM Stimulus Port Registers               */

+       uint32_t RESERVED0[864];

+  __IO uint32_t TER;                     /*!< Offset: 0xE00 (R/W)  ITM Trace Enable Register                 */

+       uint32_t RESERVED1[15];

+  __IO uint32_t TPR;                     /*!< Offset: 0xE40 (R/W)  ITM Trace Privilege Register              */

+       uint32_t RESERVED2[15];

+  __IO uint32_t TCR;                     /*!< Offset: 0xE80 (R/W)  ITM Trace Control Register                */

+       uint32_t RESERVED3[29];

+  __O  uint32_t IWR;                     /*!< Offset: 0xEF8 ( /W)  ITM Integration Write Register            */

+  __I  uint32_t IRR;                     /*!< Offset: 0xEFC (R/ )  ITM Integration Read Register             */

+  __IO uint32_t IMCR;                    /*!< Offset: 0xF00 (R/W)  ITM Integration Mode Control Register     */

+       uint32_t RESERVED4[43];

+  __O  uint32_t LAR;                     /*!< Offset: 0xFB0 ( /W)  ITM Lock Access Register                  */

+  __I  uint32_t LSR;                     /*!< Offset: 0xFB4 (R/ )  ITM Lock Status Register                  */

+       uint32_t RESERVED5[6];

+  __I  uint32_t PID4;                    /*!< Offset: 0xFD0 (R/ )  ITM Peripheral Identification Register #4 */

+  __I  uint32_t PID5;                    /*!< Offset: 0xFD4 (R/ )  ITM Peripheral Identification Register #5 */

+  __I  uint32_t PID6;                    /*!< Offset: 0xFD8 (R/ )  ITM Peripheral Identification Register #6 */

+  __I  uint32_t PID7;                    /*!< Offset: 0xFDC (R/ )  ITM Peripheral Identification Register #7 */

+  __I  uint32_t PID0;                    /*!< Offset: 0xFE0 (R/ )  ITM Peripheral Identification Register #0 */

+  __I  uint32_t PID1;                    /*!< Offset: 0xFE4 (R/ )  ITM Peripheral Identification Register #1 */

+  __I  uint32_t PID2;                    /*!< Offset: 0xFE8 (R/ )  ITM Peripheral Identification Register #2 */

+  __I  uint32_t PID3;                    /*!< Offset: 0xFEC (R/ )  ITM Peripheral Identification Register #3 */

+  __I  uint32_t CID0;                    /*!< Offset: 0xFF0 (R/ )  ITM Component  Identification Register #0 */

+  __I  uint32_t CID1;                    /*!< Offset: 0xFF4 (R/ )  ITM Component  Identification Register #1 */

+  __I  uint32_t CID2;                    /*!< Offset: 0xFF8 (R/ )  ITM Component  Identification Register #2 */

+  __I  uint32_t CID3;                    /*!< Offset: 0xFFC (R/ )  ITM Component  Identification Register #3 */

+} ITM_Type;

+

+/* ITM Trace Privilege Register Definitions */

+#define ITM_TPR_PRIVMASK_Pos                0                                             /*!< ITM TPR: PRIVMASK Position */

+#define ITM_TPR_PRIVMASK_Msk               (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/)            /*!< ITM TPR: PRIVMASK Mask */

+

+/* ITM Trace Control Register Definitions */

+#define ITM_TCR_BUSY_Pos                   23                                             /*!< ITM TCR: BUSY Position */

+#define ITM_TCR_BUSY_Msk                   (1UL << ITM_TCR_BUSY_Pos)                      /*!< ITM TCR: BUSY Mask */

+

+#define ITM_TCR_TraceBusID_Pos             16                                             /*!< ITM TCR: ATBID Position */

+#define ITM_TCR_TraceBusID_Msk             (0x7FUL << ITM_TCR_TraceBusID_Pos)             /*!< ITM TCR: ATBID Mask */

+

+#define ITM_TCR_GTSFREQ_Pos                10                                             /*!< ITM TCR: Global timestamp frequency Position */

+#define ITM_TCR_GTSFREQ_Msk                (3UL << ITM_TCR_GTSFREQ_Pos)                   /*!< ITM TCR: Global timestamp frequency Mask */

+

+#define ITM_TCR_TSPrescale_Pos              8                                             /*!< ITM TCR: TSPrescale Position */

+#define ITM_TCR_TSPrescale_Msk             (3UL << ITM_TCR_TSPrescale_Pos)                /*!< ITM TCR: TSPrescale Mask */

+

+#define ITM_TCR_SWOENA_Pos                  4                                             /*!< ITM TCR: SWOENA Position */

+#define ITM_TCR_SWOENA_Msk                 (1UL << ITM_TCR_SWOENA_Pos)                    /*!< ITM TCR: SWOENA Mask */

+

+#define ITM_TCR_DWTENA_Pos                  3                                             /*!< ITM TCR: DWTENA Position */

+#define ITM_TCR_DWTENA_Msk                 (1UL << ITM_TCR_DWTENA_Pos)                    /*!< ITM TCR: DWTENA Mask */

+

+#define ITM_TCR_SYNCENA_Pos                 2                                             /*!< ITM TCR: SYNCENA Position */

+#define ITM_TCR_SYNCENA_Msk                (1UL << ITM_TCR_SYNCENA_Pos)                   /*!< ITM TCR: SYNCENA Mask */

+

+#define ITM_TCR_TSENA_Pos                   1                                             /*!< ITM TCR: TSENA Position */

+#define ITM_TCR_TSENA_Msk                  (1UL << ITM_TCR_TSENA_Pos)                     /*!< ITM TCR: TSENA Mask */

+

+#define ITM_TCR_ITMENA_Pos                  0                                             /*!< ITM TCR: ITM Enable bit Position */

+#define ITM_TCR_ITMENA_Msk                 (1UL /*<< ITM_TCR_ITMENA_Pos*/)                /*!< ITM TCR: ITM Enable bit Mask */

+

+/* ITM Integration Write Register Definitions */

+#define ITM_IWR_ATVALIDM_Pos                0                                             /*!< ITM IWR: ATVALIDM Position */

+#define ITM_IWR_ATVALIDM_Msk               (1UL /*<< ITM_IWR_ATVALIDM_Pos*/)              /*!< ITM IWR: ATVALIDM Mask */

+

+/* ITM Integration Read Register Definitions */

+#define ITM_IRR_ATREADYM_Pos                0                                             /*!< ITM IRR: ATREADYM Position */

+#define ITM_IRR_ATREADYM_Msk               (1UL /*<< ITM_IRR_ATREADYM_Pos*/)              /*!< ITM IRR: ATREADYM Mask */

+

+/* ITM Integration Mode Control Register Definitions */

+#define ITM_IMCR_INTEGRATION_Pos            0                                             /*!< ITM IMCR: INTEGRATION Position */

+#define ITM_IMCR_INTEGRATION_Msk           (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/)          /*!< ITM IMCR: INTEGRATION Mask */

+

+/* ITM Lock Status Register Definitions */

+#define ITM_LSR_ByteAcc_Pos                 2                                             /*!< ITM LSR: ByteAcc Position */

+#define ITM_LSR_ByteAcc_Msk                (1UL << ITM_LSR_ByteAcc_Pos)                   /*!< ITM LSR: ByteAcc Mask */

+

+#define ITM_LSR_Access_Pos                  1                                             /*!< ITM LSR: Access Position */

+#define ITM_LSR_Access_Msk                 (1UL << ITM_LSR_Access_Pos)                    /*!< ITM LSR: Access Mask */

+

+#define ITM_LSR_Present_Pos                 0                                             /*!< ITM LSR: Present Position */

+#define ITM_LSR_Present_Msk                (1UL /*<< ITM_LSR_Present_Pos*/)               /*!< ITM LSR: Present Mask */

+

+/*@}*/ /* end of group CMSIS_ITM */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_DWT     Data Watchpoint and Trace (DWT)

+    \brief      Type definitions for the Data Watchpoint and Trace (DWT)

+  @{

+ */

+

+/** \brief  Structure type to access the Data Watchpoint and Trace Register (DWT).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  Control Register                          */

+  __IO uint32_t CYCCNT;                  /*!< Offset: 0x004 (R/W)  Cycle Count Register                      */

+  __IO uint32_t CPICNT;                  /*!< Offset: 0x008 (R/W)  CPI Count Register                        */

+  __IO uint32_t EXCCNT;                  /*!< Offset: 0x00C (R/W)  Exception Overhead Count Register         */

+  __IO uint32_t SLEEPCNT;                /*!< Offset: 0x010 (R/W)  Sleep Count Register                      */

+  __IO uint32_t LSUCNT;                  /*!< Offset: 0x014 (R/W)  LSU Count Register                        */

+  __IO uint32_t FOLDCNT;                 /*!< Offset: 0x018 (R/W)  Folded-instruction Count Register         */

+  __I  uint32_t PCSR;                    /*!< Offset: 0x01C (R/ )  Program Counter Sample Register           */

+  __IO uint32_t COMP0;                   /*!< Offset: 0x020 (R/W)  Comparator Register 0                     */

+  __IO uint32_t MASK0;                   /*!< Offset: 0x024 (R/W)  Mask Register 0                           */

+  __IO uint32_t FUNCTION0;               /*!< Offset: 0x028 (R/W)  Function Register 0                       */

+       uint32_t RESERVED0[1];

+  __IO uint32_t COMP1;                   /*!< Offset: 0x030 (R/W)  Comparator Register 1                     */

+  __IO uint32_t MASK1;                   /*!< Offset: 0x034 (R/W)  Mask Register 1                           */

+  __IO uint32_t FUNCTION1;               /*!< Offset: 0x038 (R/W)  Function Register 1                       */

+       uint32_t RESERVED1[1];

+  __IO uint32_t COMP2;                   /*!< Offset: 0x040 (R/W)  Comparator Register 2                     */

+  __IO uint32_t MASK2;                   /*!< Offset: 0x044 (R/W)  Mask Register 2                           */

+  __IO uint32_t FUNCTION2;               /*!< Offset: 0x048 (R/W)  Function Register 2                       */

+       uint32_t RESERVED2[1];

+  __IO uint32_t COMP3;                   /*!< Offset: 0x050 (R/W)  Comparator Register 3                     */

+  __IO uint32_t MASK3;                   /*!< Offset: 0x054 (R/W)  Mask Register 3                           */

+  __IO uint32_t FUNCTION3;               /*!< Offset: 0x058 (R/W)  Function Register 3                       */

+       uint32_t RESERVED3[981];

+  __O  uint32_t LAR;                     /*!< Offset: 0xFB0 (  W)  Lock Access Register                      */

+  __I  uint32_t LSR;                     /*!< Offset: 0xFB4 (R  )  Lock Status Register                      */

+} DWT_Type;

+

+/* DWT Control Register Definitions */

+#define DWT_CTRL_NUMCOMP_Pos               28                                          /*!< DWT CTRL: NUMCOMP Position */

+#define DWT_CTRL_NUMCOMP_Msk               (0xFUL << DWT_CTRL_NUMCOMP_Pos)             /*!< DWT CTRL: NUMCOMP Mask */

+

+#define DWT_CTRL_NOTRCPKT_Pos              27                                          /*!< DWT CTRL: NOTRCPKT Position */

+#define DWT_CTRL_NOTRCPKT_Msk              (0x1UL << DWT_CTRL_NOTRCPKT_Pos)            /*!< DWT CTRL: NOTRCPKT Mask */

+

+#define DWT_CTRL_NOEXTTRIG_Pos             26                                          /*!< DWT CTRL: NOEXTTRIG Position */

+#define DWT_CTRL_NOEXTTRIG_Msk             (0x1UL << DWT_CTRL_NOEXTTRIG_Pos)           /*!< DWT CTRL: NOEXTTRIG Mask */

+

+#define DWT_CTRL_NOCYCCNT_Pos              25                                          /*!< DWT CTRL: NOCYCCNT Position */

+#define DWT_CTRL_NOCYCCNT_Msk              (0x1UL << DWT_CTRL_NOCYCCNT_Pos)            /*!< DWT CTRL: NOCYCCNT Mask */

+

+#define DWT_CTRL_NOPRFCNT_Pos              24                                          /*!< DWT CTRL: NOPRFCNT Position */

+#define DWT_CTRL_NOPRFCNT_Msk              (0x1UL << DWT_CTRL_NOPRFCNT_Pos)            /*!< DWT CTRL: NOPRFCNT Mask */

+

+#define DWT_CTRL_CYCEVTENA_Pos             22                                          /*!< DWT CTRL: CYCEVTENA Position */

+#define DWT_CTRL_CYCEVTENA_Msk             (0x1UL << DWT_CTRL_CYCEVTENA_Pos)           /*!< DWT CTRL: CYCEVTENA Mask */

+

+#define DWT_CTRL_FOLDEVTENA_Pos            21                                          /*!< DWT CTRL: FOLDEVTENA Position */

+#define DWT_CTRL_FOLDEVTENA_Msk            (0x1UL << DWT_CTRL_FOLDEVTENA_Pos)          /*!< DWT CTRL: FOLDEVTENA Mask */

+

+#define DWT_CTRL_LSUEVTENA_Pos             20                                          /*!< DWT CTRL: LSUEVTENA Position */

+#define DWT_CTRL_LSUEVTENA_Msk             (0x1UL << DWT_CTRL_LSUEVTENA_Pos)           /*!< DWT CTRL: LSUEVTENA Mask */

+

+#define DWT_CTRL_SLEEPEVTENA_Pos           19                                          /*!< DWT CTRL: SLEEPEVTENA Position */

+#define DWT_CTRL_SLEEPEVTENA_Msk           (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos)         /*!< DWT CTRL: SLEEPEVTENA Mask */

+

+#define DWT_CTRL_EXCEVTENA_Pos             18                                          /*!< DWT CTRL: EXCEVTENA Position */

+#define DWT_CTRL_EXCEVTENA_Msk             (0x1UL << DWT_CTRL_EXCEVTENA_Pos)           /*!< DWT CTRL: EXCEVTENA Mask */

+

+#define DWT_CTRL_CPIEVTENA_Pos             17                                          /*!< DWT CTRL: CPIEVTENA Position */

+#define DWT_CTRL_CPIEVTENA_Msk             (0x1UL << DWT_CTRL_CPIEVTENA_Pos)           /*!< DWT CTRL: CPIEVTENA Mask */

+

+#define DWT_CTRL_EXCTRCENA_Pos             16                                          /*!< DWT CTRL: EXCTRCENA Position */

+#define DWT_CTRL_EXCTRCENA_Msk             (0x1UL << DWT_CTRL_EXCTRCENA_Pos)           /*!< DWT CTRL: EXCTRCENA Mask */

+

+#define DWT_CTRL_PCSAMPLENA_Pos            12                                          /*!< DWT CTRL: PCSAMPLENA Position */

+#define DWT_CTRL_PCSAMPLENA_Msk            (0x1UL << DWT_CTRL_PCSAMPLENA_Pos)          /*!< DWT CTRL: PCSAMPLENA Mask */

+

+#define DWT_CTRL_SYNCTAP_Pos               10                                          /*!< DWT CTRL: SYNCTAP Position */

+#define DWT_CTRL_SYNCTAP_Msk               (0x3UL << DWT_CTRL_SYNCTAP_Pos)             /*!< DWT CTRL: SYNCTAP Mask */

+

+#define DWT_CTRL_CYCTAP_Pos                 9                                          /*!< DWT CTRL: CYCTAP Position */

+#define DWT_CTRL_CYCTAP_Msk                (0x1UL << DWT_CTRL_CYCTAP_Pos)              /*!< DWT CTRL: CYCTAP Mask */

+

+#define DWT_CTRL_POSTINIT_Pos               5                                          /*!< DWT CTRL: POSTINIT Position */

+#define DWT_CTRL_POSTINIT_Msk              (0xFUL << DWT_CTRL_POSTINIT_Pos)            /*!< DWT CTRL: POSTINIT Mask */

+

+#define DWT_CTRL_POSTPRESET_Pos             1                                          /*!< DWT CTRL: POSTPRESET Position */

+#define DWT_CTRL_POSTPRESET_Msk            (0xFUL << DWT_CTRL_POSTPRESET_Pos)          /*!< DWT CTRL: POSTPRESET Mask */

+

+#define DWT_CTRL_CYCCNTENA_Pos              0                                          /*!< DWT CTRL: CYCCNTENA Position */

+#define DWT_CTRL_CYCCNTENA_Msk             (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/)       /*!< DWT CTRL: CYCCNTENA Mask */

+

+/* DWT CPI Count Register Definitions */

+#define DWT_CPICNT_CPICNT_Pos               0                                          /*!< DWT CPICNT: CPICNT Position */

+#define DWT_CPICNT_CPICNT_Msk              (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/)       /*!< DWT CPICNT: CPICNT Mask */

+

+/* DWT Exception Overhead Count Register Definitions */

+#define DWT_EXCCNT_EXCCNT_Pos               0                                          /*!< DWT EXCCNT: EXCCNT Position */

+#define DWT_EXCCNT_EXCCNT_Msk              (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/)       /*!< DWT EXCCNT: EXCCNT Mask */

+

+/* DWT Sleep Count Register Definitions */

+#define DWT_SLEEPCNT_SLEEPCNT_Pos           0                                          /*!< DWT SLEEPCNT: SLEEPCNT Position */

+#define DWT_SLEEPCNT_SLEEPCNT_Msk          (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/)   /*!< DWT SLEEPCNT: SLEEPCNT Mask */

+

+/* DWT LSU Count Register Definitions */

+#define DWT_LSUCNT_LSUCNT_Pos               0                                          /*!< DWT LSUCNT: LSUCNT Position */

+#define DWT_LSUCNT_LSUCNT_Msk              (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/)       /*!< DWT LSUCNT: LSUCNT Mask */

+

+/* DWT Folded-instruction Count Register Definitions */

+#define DWT_FOLDCNT_FOLDCNT_Pos             0                                          /*!< DWT FOLDCNT: FOLDCNT Position */

+#define DWT_FOLDCNT_FOLDCNT_Msk            (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/)     /*!< DWT FOLDCNT: FOLDCNT Mask */

+

+/* DWT Comparator Mask Register Definitions */

+#define DWT_MASK_MASK_Pos                   0                                          /*!< DWT MASK: MASK Position */

+#define DWT_MASK_MASK_Msk                  (0x1FUL /*<< DWT_MASK_MASK_Pos*/)           /*!< DWT MASK: MASK Mask */

+

+/* DWT Comparator Function Register Definitions */

+#define DWT_FUNCTION_MATCHED_Pos           24                                          /*!< DWT FUNCTION: MATCHED Position */

+#define DWT_FUNCTION_MATCHED_Msk           (0x1UL << DWT_FUNCTION_MATCHED_Pos)         /*!< DWT FUNCTION: MATCHED Mask */

+

+#define DWT_FUNCTION_DATAVADDR1_Pos        16                                          /*!< DWT FUNCTION: DATAVADDR1 Position */

+#define DWT_FUNCTION_DATAVADDR1_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos)      /*!< DWT FUNCTION: DATAVADDR1 Mask */

+

+#define DWT_FUNCTION_DATAVADDR0_Pos        12                                          /*!< DWT FUNCTION: DATAVADDR0 Position */

+#define DWT_FUNCTION_DATAVADDR0_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos)      /*!< DWT FUNCTION: DATAVADDR0 Mask */

+

+#define DWT_FUNCTION_DATAVSIZE_Pos         10                                          /*!< DWT FUNCTION: DATAVSIZE Position */

+#define DWT_FUNCTION_DATAVSIZE_Msk         (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos)       /*!< DWT FUNCTION: DATAVSIZE Mask */

+

+#define DWT_FUNCTION_LNK1ENA_Pos            9                                          /*!< DWT FUNCTION: LNK1ENA Position */

+#define DWT_FUNCTION_LNK1ENA_Msk           (0x1UL << DWT_FUNCTION_LNK1ENA_Pos)         /*!< DWT FUNCTION: LNK1ENA Mask */

+

+#define DWT_FUNCTION_DATAVMATCH_Pos         8                                          /*!< DWT FUNCTION: DATAVMATCH Position */

+#define DWT_FUNCTION_DATAVMATCH_Msk        (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos)      /*!< DWT FUNCTION: DATAVMATCH Mask */

+

+#define DWT_FUNCTION_CYCMATCH_Pos           7                                          /*!< DWT FUNCTION: CYCMATCH Position */

+#define DWT_FUNCTION_CYCMATCH_Msk          (0x1UL << DWT_FUNCTION_CYCMATCH_Pos)        /*!< DWT FUNCTION: CYCMATCH Mask */

+

+#define DWT_FUNCTION_EMITRANGE_Pos          5                                          /*!< DWT FUNCTION: EMITRANGE Position */

+#define DWT_FUNCTION_EMITRANGE_Msk         (0x1UL << DWT_FUNCTION_EMITRANGE_Pos)       /*!< DWT FUNCTION: EMITRANGE Mask */

+

+#define DWT_FUNCTION_FUNCTION_Pos           0                                          /*!< DWT FUNCTION: FUNCTION Position */

+#define DWT_FUNCTION_FUNCTION_Msk          (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/)    /*!< DWT FUNCTION: FUNCTION Mask */

+

+/*@}*/ /* end of group CMSIS_DWT */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_TPI     Trace Port Interface (TPI)

+    \brief      Type definitions for the Trace Port Interface (TPI)

+  @{

+ */

+

+/** \brief  Structure type to access the Trace Port Interface Register (TPI).

+ */

+typedef struct

+{

+  __IO uint32_t SSPSR;                   /*!< Offset: 0x000 (R/ )  Supported Parallel Port Size Register     */

+  __IO uint32_t CSPSR;                   /*!< Offset: 0x004 (R/W)  Current Parallel Port Size Register */

+       uint32_t RESERVED0[2];

+  __IO uint32_t ACPR;                    /*!< Offset: 0x010 (R/W)  Asynchronous Clock Prescaler Register */

+       uint32_t RESERVED1[55];

+  __IO uint32_t SPPR;                    /*!< Offset: 0x0F0 (R/W)  Selected Pin Protocol Register */

+       uint32_t RESERVED2[131];

+  __I  uint32_t FFSR;                    /*!< Offset: 0x300 (R/ )  Formatter and Flush Status Register */

+  __IO uint32_t FFCR;                    /*!< Offset: 0x304 (R/W)  Formatter and Flush Control Register */

+  __I  uint32_t FSCR;                    /*!< Offset: 0x308 (R/ )  Formatter Synchronization Counter Register */

+       uint32_t RESERVED3[759];

+  __I  uint32_t TRIGGER;                 /*!< Offset: 0xEE8 (R/ )  TRIGGER */

+  __I  uint32_t FIFO0;                   /*!< Offset: 0xEEC (R/ )  Integration ETM Data */

+  __I  uint32_t ITATBCTR2;               /*!< Offset: 0xEF0 (R/ )  ITATBCTR2 */

+       uint32_t RESERVED4[1];

+  __I  uint32_t ITATBCTR0;               /*!< Offset: 0xEF8 (R/ )  ITATBCTR0 */

+  __I  uint32_t FIFO1;                   /*!< Offset: 0xEFC (R/ )  Integration ITM Data */

+  __IO uint32_t ITCTRL;                  /*!< Offset: 0xF00 (R/W)  Integration Mode Control */

+       uint32_t RESERVED5[39];

+  __IO uint32_t CLAIMSET;                /*!< Offset: 0xFA0 (R/W)  Claim tag set */

+  __IO uint32_t CLAIMCLR;                /*!< Offset: 0xFA4 (R/W)  Claim tag clear */

+       uint32_t RESERVED7[8];

+  __I  uint32_t DEVID;                   /*!< Offset: 0xFC8 (R/ )  TPIU_DEVID */

+  __I  uint32_t DEVTYPE;                 /*!< Offset: 0xFCC (R/ )  TPIU_DEVTYPE */

+} TPI_Type;

+

+/* TPI Asynchronous Clock Prescaler Register Definitions */

+#define TPI_ACPR_PRESCALER_Pos              0                                          /*!< TPI ACPR: PRESCALER Position */

+#define TPI_ACPR_PRESCALER_Msk             (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/)    /*!< TPI ACPR: PRESCALER Mask */

+

+/* TPI Selected Pin Protocol Register Definitions */

+#define TPI_SPPR_TXMODE_Pos                 0                                          /*!< TPI SPPR: TXMODE Position */

+#define TPI_SPPR_TXMODE_Msk                (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/)          /*!< TPI SPPR: TXMODE Mask */

+

+/* TPI Formatter and Flush Status Register Definitions */

+#define TPI_FFSR_FtNonStop_Pos              3                                          /*!< TPI FFSR: FtNonStop Position */

+#define TPI_FFSR_FtNonStop_Msk             (0x1UL << TPI_FFSR_FtNonStop_Pos)           /*!< TPI FFSR: FtNonStop Mask */

+

+#define TPI_FFSR_TCPresent_Pos              2                                          /*!< TPI FFSR: TCPresent Position */

+#define TPI_FFSR_TCPresent_Msk             (0x1UL << TPI_FFSR_TCPresent_Pos)           /*!< TPI FFSR: TCPresent Mask */

+

+#define TPI_FFSR_FtStopped_Pos              1                                          /*!< TPI FFSR: FtStopped Position */

+#define TPI_FFSR_FtStopped_Msk             (0x1UL << TPI_FFSR_FtStopped_Pos)           /*!< TPI FFSR: FtStopped Mask */

+

+#define TPI_FFSR_FlInProg_Pos               0                                          /*!< TPI FFSR: FlInProg Position */

+#define TPI_FFSR_FlInProg_Msk              (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/)        /*!< TPI FFSR: FlInProg Mask */

+

+/* TPI Formatter and Flush Control Register Definitions */

+#define TPI_FFCR_TrigIn_Pos                 8                                          /*!< TPI FFCR: TrigIn Position */

+#define TPI_FFCR_TrigIn_Msk                (0x1UL << TPI_FFCR_TrigIn_Pos)              /*!< TPI FFCR: TrigIn Mask */

+

+#define TPI_FFCR_EnFCont_Pos                1                                          /*!< TPI FFCR: EnFCont Position */

+#define TPI_FFCR_EnFCont_Msk               (0x1UL << TPI_FFCR_EnFCont_Pos)             /*!< TPI FFCR: EnFCont Mask */

+

+/* TPI TRIGGER Register Definitions */

+#define TPI_TRIGGER_TRIGGER_Pos             0                                          /*!< TPI TRIGGER: TRIGGER Position */

+#define TPI_TRIGGER_TRIGGER_Msk            (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/)      /*!< TPI TRIGGER: TRIGGER Mask */

+

+/* TPI Integration ETM Data Register Definitions (FIFO0) */

+#define TPI_FIFO0_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO0: ITM_ATVALID Position */

+#define TPI_FIFO0_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos)        /*!< TPI FIFO0: ITM_ATVALID Mask */

+

+#define TPI_FIFO0_ITM_bytecount_Pos        27                                          /*!< TPI FIFO0: ITM_bytecount Position */

+#define TPI_FIFO0_ITM_bytecount_Msk        (0x3UL << TPI_FIFO0_ITM_bytecount_Pos)      /*!< TPI FIFO0: ITM_bytecount Mask */

+

+#define TPI_FIFO0_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO0: ETM_ATVALID Position */

+#define TPI_FIFO0_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos)        /*!< TPI FIFO0: ETM_ATVALID Mask */

+

+#define TPI_FIFO0_ETM_bytecount_Pos        24                                          /*!< TPI FIFO0: ETM_bytecount Position */

+#define TPI_FIFO0_ETM_bytecount_Msk        (0x3UL << TPI_FIFO0_ETM_bytecount_Pos)      /*!< TPI FIFO0: ETM_bytecount Mask */

+

+#define TPI_FIFO0_ETM2_Pos                 16                                          /*!< TPI FIFO0: ETM2 Position */

+#define TPI_FIFO0_ETM2_Msk                 (0xFFUL << TPI_FIFO0_ETM2_Pos)              /*!< TPI FIFO0: ETM2 Mask */

+

+#define TPI_FIFO0_ETM1_Pos                  8                                          /*!< TPI FIFO0: ETM1 Position */

+#define TPI_FIFO0_ETM1_Msk                 (0xFFUL << TPI_FIFO0_ETM1_Pos)              /*!< TPI FIFO0: ETM1 Mask */

+

+#define TPI_FIFO0_ETM0_Pos                  0                                          /*!< TPI FIFO0: ETM0 Position */

+#define TPI_FIFO0_ETM0_Msk                 (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/)          /*!< TPI FIFO0: ETM0 Mask */

+

+/* TPI ITATBCTR2 Register Definitions */

+#define TPI_ITATBCTR2_ATREADY_Pos           0                                          /*!< TPI ITATBCTR2: ATREADY Position */

+#define TPI_ITATBCTR2_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR2_ATREADY_Pos*/)    /*!< TPI ITATBCTR2: ATREADY Mask */

+

+/* TPI Integration ITM Data Register Definitions (FIFO1) */

+#define TPI_FIFO1_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO1: ITM_ATVALID Position */

+#define TPI_FIFO1_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos)        /*!< TPI FIFO1: ITM_ATVALID Mask */

+

+#define TPI_FIFO1_ITM_bytecount_Pos        27                                          /*!< TPI FIFO1: ITM_bytecount Position */

+#define TPI_FIFO1_ITM_bytecount_Msk        (0x3UL << TPI_FIFO1_ITM_bytecount_Pos)      /*!< TPI FIFO1: ITM_bytecount Mask */

+

+#define TPI_FIFO1_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO1: ETM_ATVALID Position */

+#define TPI_FIFO1_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos)        /*!< TPI FIFO1: ETM_ATVALID Mask */

+

+#define TPI_FIFO1_ETM_bytecount_Pos        24                                          /*!< TPI FIFO1: ETM_bytecount Position */

+#define TPI_FIFO1_ETM_bytecount_Msk        (0x3UL << TPI_FIFO1_ETM_bytecount_Pos)      /*!< TPI FIFO1: ETM_bytecount Mask */

+

+#define TPI_FIFO1_ITM2_Pos                 16                                          /*!< TPI FIFO1: ITM2 Position */

+#define TPI_FIFO1_ITM2_Msk                 (0xFFUL << TPI_FIFO1_ITM2_Pos)              /*!< TPI FIFO1: ITM2 Mask */

+

+#define TPI_FIFO1_ITM1_Pos                  8                                          /*!< TPI FIFO1: ITM1 Position */

+#define TPI_FIFO1_ITM1_Msk                 (0xFFUL << TPI_FIFO1_ITM1_Pos)              /*!< TPI FIFO1: ITM1 Mask */

+

+#define TPI_FIFO1_ITM0_Pos                  0                                          /*!< TPI FIFO1: ITM0 Position */

+#define TPI_FIFO1_ITM0_Msk                 (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/)          /*!< TPI FIFO1: ITM0 Mask */

+

+/* TPI ITATBCTR0 Register Definitions */

+#define TPI_ITATBCTR0_ATREADY_Pos           0                                          /*!< TPI ITATBCTR0: ATREADY Position */

+#define TPI_ITATBCTR0_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR0_ATREADY_Pos*/)    /*!< TPI ITATBCTR0: ATREADY Mask */

+

+/* TPI Integration Mode Control Register Definitions */

+#define TPI_ITCTRL_Mode_Pos                 0                                          /*!< TPI ITCTRL: Mode Position */

+#define TPI_ITCTRL_Mode_Msk                (0x1UL /*<< TPI_ITCTRL_Mode_Pos*/)          /*!< TPI ITCTRL: Mode Mask */

+

+/* TPI DEVID Register Definitions */

+#define TPI_DEVID_NRZVALID_Pos             11                                          /*!< TPI DEVID: NRZVALID Position */

+#define TPI_DEVID_NRZVALID_Msk             (0x1UL << TPI_DEVID_NRZVALID_Pos)           /*!< TPI DEVID: NRZVALID Mask */

+

+#define TPI_DEVID_MANCVALID_Pos            10                                          /*!< TPI DEVID: MANCVALID Position */

+#define TPI_DEVID_MANCVALID_Msk            (0x1UL << TPI_DEVID_MANCVALID_Pos)          /*!< TPI DEVID: MANCVALID Mask */

+

+#define TPI_DEVID_PTINVALID_Pos             9                                          /*!< TPI DEVID: PTINVALID Position */

+#define TPI_DEVID_PTINVALID_Msk            (0x1UL << TPI_DEVID_PTINVALID_Pos)          /*!< TPI DEVID: PTINVALID Mask */

+

+#define TPI_DEVID_MinBufSz_Pos              6                                          /*!< TPI DEVID: MinBufSz Position */

+#define TPI_DEVID_MinBufSz_Msk             (0x7UL << TPI_DEVID_MinBufSz_Pos)           /*!< TPI DEVID: MinBufSz Mask */

+

+#define TPI_DEVID_AsynClkIn_Pos             5                                          /*!< TPI DEVID: AsynClkIn Position */

+#define TPI_DEVID_AsynClkIn_Msk            (0x1UL << TPI_DEVID_AsynClkIn_Pos)          /*!< TPI DEVID: AsynClkIn Mask */

+

+#define TPI_DEVID_NrTraceInput_Pos          0                                          /*!< TPI DEVID: NrTraceInput Position */

+#define TPI_DEVID_NrTraceInput_Msk         (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/)  /*!< TPI DEVID: NrTraceInput Mask */

+

+/* TPI DEVTYPE Register Definitions */

+#define TPI_DEVTYPE_MajorType_Pos           4                                          /*!< TPI DEVTYPE: MajorType Position */

+#define TPI_DEVTYPE_MajorType_Msk          (0xFUL << TPI_DEVTYPE_MajorType_Pos)        /*!< TPI DEVTYPE: MajorType Mask */

+

+#define TPI_DEVTYPE_SubType_Pos             0                                          /*!< TPI DEVTYPE: SubType Position */

+#define TPI_DEVTYPE_SubType_Msk            (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/)      /*!< TPI DEVTYPE: SubType Mask */

+

+/*@}*/ /* end of group CMSIS_TPI */

+

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+  __IO uint32_t RBAR_A1;                 /*!< Offset: 0x014 (R/W)  MPU Alias 1 Region Base Address Register       */

+  __IO uint32_t RASR_A1;                 /*!< Offset: 0x018 (R/W)  MPU Alias 1 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A2;                 /*!< Offset: 0x01C (R/W)  MPU Alias 2 Region Base Address Register       */

+  __IO uint32_t RASR_A2;                 /*!< Offset: 0x020 (R/W)  MPU Alias 2 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A3;                 /*!< Offset: 0x024 (R/W)  MPU Alias 3 Region Base Address Register       */

+  __IO uint32_t RASR_A3;                 /*!< Offset: 0x028 (R/W)  MPU Alias 3 Region Attribute and Size Register */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   5                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos)             /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+#if (__FPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_FPU     Floating Point Unit (FPU)

+    \brief      Type definitions for the Floating Point Unit (FPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Floating Point Unit (FPU).

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __IO uint32_t FPCCR;                   /*!< Offset: 0x004 (R/W)  Floating-Point Context Control Register               */

+  __IO uint32_t FPCAR;                   /*!< Offset: 0x008 (R/W)  Floating-Point Context Address Register               */

+  __IO uint32_t FPDSCR;                  /*!< Offset: 0x00C (R/W)  Floating-Point Default Status Control Register        */

+  __I  uint32_t MVFR0;                   /*!< Offset: 0x010 (R/ )  Media and FP Feature Register 0                       */

+  __I  uint32_t MVFR1;                   /*!< Offset: 0x014 (R/ )  Media and FP Feature Register 1                       */

+  __I  uint32_t MVFR2;                   /*!< Offset: 0x018 (R/ )  Media and FP Feature Register 2                       */

+} FPU_Type;

+

+/* Floating-Point Context Control Register */

+#define FPU_FPCCR_ASPEN_Pos                31                                             /*!< FPCCR: ASPEN bit Position */

+#define FPU_FPCCR_ASPEN_Msk                (1UL << FPU_FPCCR_ASPEN_Pos)                   /*!< FPCCR: ASPEN bit Mask */

+

+#define FPU_FPCCR_LSPEN_Pos                30                                             /*!< FPCCR: LSPEN Position */

+#define FPU_FPCCR_LSPEN_Msk                (1UL << FPU_FPCCR_LSPEN_Pos)                   /*!< FPCCR: LSPEN bit Mask */

+

+#define FPU_FPCCR_MONRDY_Pos                8                                             /*!< FPCCR: MONRDY Position */

+#define FPU_FPCCR_MONRDY_Msk               (1UL << FPU_FPCCR_MONRDY_Pos)                  /*!< FPCCR: MONRDY bit Mask */

+

+#define FPU_FPCCR_BFRDY_Pos                 6                                             /*!< FPCCR: BFRDY Position */

+#define FPU_FPCCR_BFRDY_Msk                (1UL << FPU_FPCCR_BFRDY_Pos)                   /*!< FPCCR: BFRDY bit Mask */

+

+#define FPU_FPCCR_MMRDY_Pos                 5                                             /*!< FPCCR: MMRDY Position */

+#define FPU_FPCCR_MMRDY_Msk                (1UL << FPU_FPCCR_MMRDY_Pos)                   /*!< FPCCR: MMRDY bit Mask */

+

+#define FPU_FPCCR_HFRDY_Pos                 4                                             /*!< FPCCR: HFRDY Position */

+#define FPU_FPCCR_HFRDY_Msk                (1UL << FPU_FPCCR_HFRDY_Pos)                   /*!< FPCCR: HFRDY bit Mask */

+

+#define FPU_FPCCR_THREAD_Pos                3                                             /*!< FPCCR: processor mode bit Position */

+#define FPU_FPCCR_THREAD_Msk               (1UL << FPU_FPCCR_THREAD_Pos)                  /*!< FPCCR: processor mode active bit Mask */

+

+#define FPU_FPCCR_USER_Pos                  1                                             /*!< FPCCR: privilege level bit Position */

+#define FPU_FPCCR_USER_Msk                 (1UL << FPU_FPCCR_USER_Pos)                    /*!< FPCCR: privilege level bit Mask */

+

+#define FPU_FPCCR_LSPACT_Pos                0                                             /*!< FPCCR: Lazy state preservation active bit Position */

+#define FPU_FPCCR_LSPACT_Msk               (1UL /*<< FPU_FPCCR_LSPACT_Pos*/)              /*!< FPCCR: Lazy state preservation active bit Mask */

+

+/* Floating-Point Context Address Register */

+#define FPU_FPCAR_ADDRESS_Pos               3                                             /*!< FPCAR: ADDRESS bit Position */

+#define FPU_FPCAR_ADDRESS_Msk              (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos)        /*!< FPCAR: ADDRESS bit Mask */

+

+/* Floating-Point Default Status Control Register */

+#define FPU_FPDSCR_AHP_Pos                 26                                             /*!< FPDSCR: AHP bit Position */

+#define FPU_FPDSCR_AHP_Msk                 (1UL << FPU_FPDSCR_AHP_Pos)                    /*!< FPDSCR: AHP bit Mask */

+

+#define FPU_FPDSCR_DN_Pos                  25                                             /*!< FPDSCR: DN bit Position */

+#define FPU_FPDSCR_DN_Msk                  (1UL << FPU_FPDSCR_DN_Pos)                     /*!< FPDSCR: DN bit Mask */

+

+#define FPU_FPDSCR_FZ_Pos                  24                                             /*!< FPDSCR: FZ bit Position */

+#define FPU_FPDSCR_FZ_Msk                  (1UL << FPU_FPDSCR_FZ_Pos)                     /*!< FPDSCR: FZ bit Mask */

+

+#define FPU_FPDSCR_RMode_Pos               22                                             /*!< FPDSCR: RMode bit Position */

+#define FPU_FPDSCR_RMode_Msk               (3UL << FPU_FPDSCR_RMode_Pos)                  /*!< FPDSCR: RMode bit Mask */

+

+/* Media and FP Feature Register 0 */

+#define FPU_MVFR0_FP_rounding_modes_Pos    28                                             /*!< MVFR0: FP rounding modes bits Position */

+#define FPU_MVFR0_FP_rounding_modes_Msk    (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos)     /*!< MVFR0: FP rounding modes bits Mask */

+

+#define FPU_MVFR0_Short_vectors_Pos        24                                             /*!< MVFR0: Short vectors bits Position */

+#define FPU_MVFR0_Short_vectors_Msk        (0xFUL << FPU_MVFR0_Short_vectors_Pos)         /*!< MVFR0: Short vectors bits Mask */

+

+#define FPU_MVFR0_Square_root_Pos          20                                             /*!< MVFR0: Square root bits Position */

+#define FPU_MVFR0_Square_root_Msk          (0xFUL << FPU_MVFR0_Square_root_Pos)           /*!< MVFR0: Square root bits Mask */

+

+#define FPU_MVFR0_Divide_Pos               16                                             /*!< MVFR0: Divide bits Position */

+#define FPU_MVFR0_Divide_Msk               (0xFUL << FPU_MVFR0_Divide_Pos)                /*!< MVFR0: Divide bits Mask */

+

+#define FPU_MVFR0_FP_excep_trapping_Pos    12                                             /*!< MVFR0: FP exception trapping bits Position */

+#define FPU_MVFR0_FP_excep_trapping_Msk    (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos)     /*!< MVFR0: FP exception trapping bits Mask */

+

+#define FPU_MVFR0_Double_precision_Pos      8                                             /*!< MVFR0: Double-precision bits Position */

+#define FPU_MVFR0_Double_precision_Msk     (0xFUL << FPU_MVFR0_Double_precision_Pos)      /*!< MVFR0: Double-precision bits Mask */

+

+#define FPU_MVFR0_Single_precision_Pos      4                                             /*!< MVFR0: Single-precision bits Position */

+#define FPU_MVFR0_Single_precision_Msk     (0xFUL << FPU_MVFR0_Single_precision_Pos)      /*!< MVFR0: Single-precision bits Mask */

+

+#define FPU_MVFR0_A_SIMD_registers_Pos      0                                             /*!< MVFR0: A_SIMD registers bits Position */

+#define FPU_MVFR0_A_SIMD_registers_Msk     (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/)  /*!< MVFR0: A_SIMD registers bits Mask */

+

+/* Media and FP Feature Register 1 */

+#define FPU_MVFR1_FP_fused_MAC_Pos         28                                             /*!< MVFR1: FP fused MAC bits Position */

+#define FPU_MVFR1_FP_fused_MAC_Msk         (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos)          /*!< MVFR1: FP fused MAC bits Mask */

+

+#define FPU_MVFR1_FP_HPFP_Pos              24                                             /*!< MVFR1: FP HPFP bits Position */

+#define FPU_MVFR1_FP_HPFP_Msk              (0xFUL << FPU_MVFR1_FP_HPFP_Pos)               /*!< MVFR1: FP HPFP bits Mask */

+

+#define FPU_MVFR1_D_NaN_mode_Pos            4                                             /*!< MVFR1: D_NaN mode bits Position */

+#define FPU_MVFR1_D_NaN_mode_Msk           (0xFUL << FPU_MVFR1_D_NaN_mode_Pos)            /*!< MVFR1: D_NaN mode bits Mask */

+

+#define FPU_MVFR1_FtZ_mode_Pos              0                                             /*!< MVFR1: FtZ mode bits Position */

+#define FPU_MVFR1_FtZ_mode_Msk             (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/)          /*!< MVFR1: FtZ mode bits Mask */

+

+/* Media and FP Feature Register 2 */

+

+/*@} end of group CMSIS_FPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Type definitions for the Core Debug Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Core Debug Register (CoreDebug).

+ */

+typedef struct

+{

+  __IO uint32_t DHCSR;                   /*!< Offset: 0x000 (R/W)  Debug Halting Control and Status Register    */

+  __O  uint32_t DCRSR;                   /*!< Offset: 0x004 ( /W)  Debug Core Register Selector Register        */

+  __IO uint32_t DCRDR;                   /*!< Offset: 0x008 (R/W)  Debug Core Register Data Register            */

+  __IO uint32_t DEMCR;                   /*!< Offset: 0x00C (R/W)  Debug Exception and Monitor Control Register */

+} CoreDebug_Type;

+

+/* Debug Halting Control and Status Register */

+#define CoreDebug_DHCSR_DBGKEY_Pos         16                                             /*!< CoreDebug DHCSR: DBGKEY Position */

+#define CoreDebug_DHCSR_DBGKEY_Msk         (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos)       /*!< CoreDebug DHCSR: DBGKEY Mask */

+

+#define CoreDebug_DHCSR_S_RESET_ST_Pos     25                                             /*!< CoreDebug DHCSR: S_RESET_ST Position */

+#define CoreDebug_DHCSR_S_RESET_ST_Msk     (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos)        /*!< CoreDebug DHCSR: S_RESET_ST Mask */

+

+#define CoreDebug_DHCSR_S_RETIRE_ST_Pos    24                                             /*!< CoreDebug DHCSR: S_RETIRE_ST Position */

+#define CoreDebug_DHCSR_S_RETIRE_ST_Msk    (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos)       /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */

+

+#define CoreDebug_DHCSR_S_LOCKUP_Pos       19                                             /*!< CoreDebug DHCSR: S_LOCKUP Position */

+#define CoreDebug_DHCSR_S_LOCKUP_Msk       (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos)          /*!< CoreDebug DHCSR: S_LOCKUP Mask */

+

+#define CoreDebug_DHCSR_S_SLEEP_Pos        18                                             /*!< CoreDebug DHCSR: S_SLEEP Position */

+#define CoreDebug_DHCSR_S_SLEEP_Msk        (1UL << CoreDebug_DHCSR_S_SLEEP_Pos)           /*!< CoreDebug DHCSR: S_SLEEP Mask */

+

+#define CoreDebug_DHCSR_S_HALT_Pos         17                                             /*!< CoreDebug DHCSR: S_HALT Position */

+#define CoreDebug_DHCSR_S_HALT_Msk         (1UL << CoreDebug_DHCSR_S_HALT_Pos)            /*!< CoreDebug DHCSR: S_HALT Mask */

+

+#define CoreDebug_DHCSR_S_REGRDY_Pos       16                                             /*!< CoreDebug DHCSR: S_REGRDY Position */

+#define CoreDebug_DHCSR_S_REGRDY_Msk       (1UL << CoreDebug_DHCSR_S_REGRDY_Pos)          /*!< CoreDebug DHCSR: S_REGRDY Mask */

+

+#define CoreDebug_DHCSR_C_SNAPSTALL_Pos     5                                             /*!< CoreDebug DHCSR: C_SNAPSTALL Position */

+#define CoreDebug_DHCSR_C_SNAPSTALL_Msk    (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos)       /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */

+

+#define CoreDebug_DHCSR_C_MASKINTS_Pos      3                                             /*!< CoreDebug DHCSR: C_MASKINTS Position */

+#define CoreDebug_DHCSR_C_MASKINTS_Msk     (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos)        /*!< CoreDebug DHCSR: C_MASKINTS Mask */

+

+#define CoreDebug_DHCSR_C_STEP_Pos          2                                             /*!< CoreDebug DHCSR: C_STEP Position */

+#define CoreDebug_DHCSR_C_STEP_Msk         (1UL << CoreDebug_DHCSR_C_STEP_Pos)            /*!< CoreDebug DHCSR: C_STEP Mask */

+

+#define CoreDebug_DHCSR_C_HALT_Pos          1                                             /*!< CoreDebug DHCSR: C_HALT Position */

+#define CoreDebug_DHCSR_C_HALT_Msk         (1UL << CoreDebug_DHCSR_C_HALT_Pos)            /*!< CoreDebug DHCSR: C_HALT Mask */

+

+#define CoreDebug_DHCSR_C_DEBUGEN_Pos       0                                             /*!< CoreDebug DHCSR: C_DEBUGEN Position */

+#define CoreDebug_DHCSR_C_DEBUGEN_Msk      (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/)     /*!< CoreDebug DHCSR: C_DEBUGEN Mask */

+

+/* Debug Core Register Selector Register */

+#define CoreDebug_DCRSR_REGWnR_Pos         16                                             /*!< CoreDebug DCRSR: REGWnR Position */

+#define CoreDebug_DCRSR_REGWnR_Msk         (1UL << CoreDebug_DCRSR_REGWnR_Pos)            /*!< CoreDebug DCRSR: REGWnR Mask */

+

+#define CoreDebug_DCRSR_REGSEL_Pos          0                                             /*!< CoreDebug DCRSR: REGSEL Position */

+#define CoreDebug_DCRSR_REGSEL_Msk         (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/)     /*!< CoreDebug DCRSR: REGSEL Mask */

+

+/* Debug Exception and Monitor Control Register */

+#define CoreDebug_DEMCR_TRCENA_Pos         24                                             /*!< CoreDebug DEMCR: TRCENA Position */

+#define CoreDebug_DEMCR_TRCENA_Msk         (1UL << CoreDebug_DEMCR_TRCENA_Pos)            /*!< CoreDebug DEMCR: TRCENA Mask */

+

+#define CoreDebug_DEMCR_MON_REQ_Pos        19                                             /*!< CoreDebug DEMCR: MON_REQ Position */

+#define CoreDebug_DEMCR_MON_REQ_Msk        (1UL << CoreDebug_DEMCR_MON_REQ_Pos)           /*!< CoreDebug DEMCR: MON_REQ Mask */

+

+#define CoreDebug_DEMCR_MON_STEP_Pos       18                                             /*!< CoreDebug DEMCR: MON_STEP Position */

+#define CoreDebug_DEMCR_MON_STEP_Msk       (1UL << CoreDebug_DEMCR_MON_STEP_Pos)          /*!< CoreDebug DEMCR: MON_STEP Mask */

+

+#define CoreDebug_DEMCR_MON_PEND_Pos       17                                             /*!< CoreDebug DEMCR: MON_PEND Position */

+#define CoreDebug_DEMCR_MON_PEND_Msk       (1UL << CoreDebug_DEMCR_MON_PEND_Pos)          /*!< CoreDebug DEMCR: MON_PEND Mask */

+

+#define CoreDebug_DEMCR_MON_EN_Pos         16                                             /*!< CoreDebug DEMCR: MON_EN Position */

+#define CoreDebug_DEMCR_MON_EN_Msk         (1UL << CoreDebug_DEMCR_MON_EN_Pos)            /*!< CoreDebug DEMCR: MON_EN Mask */

+

+#define CoreDebug_DEMCR_VC_HARDERR_Pos     10                                             /*!< CoreDebug DEMCR: VC_HARDERR Position */

+#define CoreDebug_DEMCR_VC_HARDERR_Msk     (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos)        /*!< CoreDebug DEMCR: VC_HARDERR Mask */

+

+#define CoreDebug_DEMCR_VC_INTERR_Pos       9                                             /*!< CoreDebug DEMCR: VC_INTERR Position */

+#define CoreDebug_DEMCR_VC_INTERR_Msk      (1UL << CoreDebug_DEMCR_VC_INTERR_Pos)         /*!< CoreDebug DEMCR: VC_INTERR Mask */

+

+#define CoreDebug_DEMCR_VC_BUSERR_Pos       8                                             /*!< CoreDebug DEMCR: VC_BUSERR Position */

+#define CoreDebug_DEMCR_VC_BUSERR_Msk      (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos)         /*!< CoreDebug DEMCR: VC_BUSERR Mask */

+

+#define CoreDebug_DEMCR_VC_STATERR_Pos      7                                             /*!< CoreDebug DEMCR: VC_STATERR Position */

+#define CoreDebug_DEMCR_VC_STATERR_Msk     (1UL << CoreDebug_DEMCR_VC_STATERR_Pos)        /*!< CoreDebug DEMCR: VC_STATERR Mask */

+

+#define CoreDebug_DEMCR_VC_CHKERR_Pos       6                                             /*!< CoreDebug DEMCR: VC_CHKERR Position */

+#define CoreDebug_DEMCR_VC_CHKERR_Msk      (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos)         /*!< CoreDebug DEMCR: VC_CHKERR Mask */

+

+#define CoreDebug_DEMCR_VC_NOCPERR_Pos      5                                             /*!< CoreDebug DEMCR: VC_NOCPERR Position */

+#define CoreDebug_DEMCR_VC_NOCPERR_Msk     (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos)        /*!< CoreDebug DEMCR: VC_NOCPERR Mask */

+

+#define CoreDebug_DEMCR_VC_MMERR_Pos        4                                             /*!< CoreDebug DEMCR: VC_MMERR Position */

+#define CoreDebug_DEMCR_VC_MMERR_Msk       (1UL << CoreDebug_DEMCR_VC_MMERR_Pos)          /*!< CoreDebug DEMCR: VC_MMERR Mask */

+

+#define CoreDebug_DEMCR_VC_CORERESET_Pos    0                                             /*!< CoreDebug DEMCR: VC_CORERESET Position */

+#define CoreDebug_DEMCR_VC_CORERESET_Msk   (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/)  /*!< CoreDebug DEMCR: VC_CORERESET Mask */

+

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M4 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address  */

+#define ITM_BASE            (0xE0000000UL)                            /*!< ITM Base Address                   */

+#define DWT_BASE            (0xE0001000UL)                            /*!< DWT Base Address                   */

+#define TPI_BASE            (0xE0040000UL)                            /*!< TPI Base Address                   */

+#define CoreDebug_BASE      (0xE000EDF0UL)                            /*!< Core Debug Base Address            */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address               */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                  */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address  */

+

+#define SCnSCB              ((SCnSCB_Type    *)     SCS_BASE      )   /*!< System control Register not in SCB */

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+#define ITM                 ((ITM_Type       *)     ITM_BASE      )   /*!< ITM configuration struct           */

+#define DWT                 ((DWT_Type       *)     DWT_BASE      )   /*!< DWT configuration struct           */

+#define TPI                 ((TPI_Type       *)     TPI_BASE      )   /*!< TPI configuration struct           */

+#define CoreDebug           ((CoreDebug_Type *)     CoreDebug_BASE)   /*!< Core Debug configuration struct    */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+#if (__FPU_PRESENT == 1)

+  #define FPU_BASE          (SCS_BASE +  0x0F30UL)                    /*!< Floating Point Unit                */

+  #define FPU               ((FPU_Type       *)     FPU_BASE      )   /*!< Floating Point Unit                */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Debug Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/** \brief  Set Priority Grouping

+

+  The function sets the priority grouping field using the required unlock sequence.

+  The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field.

+  Only values from 0..7 are used.

+  In case of a conflict between priority grouping and available

+  priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]      PriorityGroup  Priority grouping field.

+ */

+__STATIC_INLINE void NVIC_SetPriorityGrouping(uint32_t PriorityGroup)

+{

+  uint32_t reg_value;

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);             /* only values 0..7 are used          */

+

+  reg_value  =  SCB->AIRCR;                                                   /* read old register configuration    */

+  reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk));             /* clear bits to change               */

+  reg_value  =  (reg_value                                   |

+                ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                (PriorityGroupTmp << 8)                       );              /* Insert write key and priorty group */

+  SCB->AIRCR =  reg_value;

+}

+

+

+/** \brief  Get Priority Grouping

+

+  The function reads the priority grouping field from the NVIC Interrupt Controller.

+

+    \return                Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field).

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriorityGrouping(void)

+{

+  return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos));

+}

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Active Interrupt

+

+    The function reads the active register in NVIC and returns the active bit.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not active.

+    \return             1  Interrupt status is active.

+ */

+__STATIC_INLINE uint32_t NVIC_GetActive(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->IABR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)IRQn < 0) {

+    SCB->SHPR[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+  else {

+    NVIC->IP[((uint32_t)(int32_t)IRQn)]                = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)IRQn < 0) {

+    return(((uint32_t)SCB->SHPR[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return(((uint32_t)NVIC->IP[((uint32_t)(int32_t)IRQn)]               >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  Encode Priority

+

+    The function encodes the priority for an interrupt with the given priority group,

+    preemptive priority value, and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]     PriorityGroup  Used priority group.

+    \param [in]   PreemptPriority  Preemptive priority value (starting from 0).

+    \param [in]       SubPriority  Subpriority value (starting from 0).

+    \return                        Encoded priority. Value can be used in the function \ref NVIC_SetPriority().

+ */

+__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  return (

+           ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |

+           ((SubPriority     & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL)))

+         );

+}

+

+

+/** \brief  Decode Priority

+

+    The function decodes an interrupt priority value with a given priority group to

+    preemptive priority value and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.

+

+    \param [in]         Priority   Priority value, which can be retrieved with the function \ref NVIC_GetPriority().

+    \param [in]     PriorityGroup  Used priority group.

+    \param [out] pPreemptPriority  Preemptive priority value (starting from 0).

+    \param [out]     pSubPriority  Subpriority value (starting from 0).

+ */

+__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);

+  *pSubPriority     = (Priority                   ) & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL);

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                          /* Ensure all outstanding memory accesses included

+                                                                       buffered write are completed before reset */

+  SCB->AIRCR  = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos)    |

+                           (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) |

+                            SCB_AIRCR_SYSRESETREQ_Msk    );         /* Keep priority group unchanged */

+  __DSB();                                                          /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                             /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+/* ##########################  FPU functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_FpuFunctions FPU Functions

+    \brief      Function that provides FPU type.

+    @{

+ */

+

+/**

+  \fn          uint32_t SCB_GetFPUType(void)

+  \brief       get FPU type

+  \returns

+   - \b  0: No FPU

+   - \b  1: Single precision FPU

+   - \b  2: Double + Single precision FPU

+ */

+__STATIC_INLINE uint32_t SCB_GetFPUType(void)

+{

+  uint32_t mvfr0;

+

+  mvfr0 = SCB->MVFR0;

+  if        ((mvfr0 & 0x00000FF0UL) == 0x220UL) {

+    return 2UL;           // Double + Single precision FPU

+  } else if ((mvfr0 & 0x00000FF0UL) == 0x020UL) {

+    return 1UL;           // Single precision FPU

+  } else {

+    return 0UL;           // No FPU

+  }

+}

+

+

+/*@} end of CMSIS_Core_FpuFunctions */

+

+

+

+/* ##########################  Cache functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_CacheFunctions Cache Functions

+    \brief      Functions that configure Instruction and Data cache.

+    @{

+ */

+

+/* Cache Size ID Register Macros */

+#define CCSIDR_WAYS(x)         (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos)

+#define CCSIDR_SETS(x)         (((x) & SCB_CCSIDR_NUMSETS_Msk      ) >> SCB_CCSIDR_NUMSETS_Pos      )

+#define CCSIDR_LSSHIFT(x)      (((x) & SCB_CCSIDR_LINESIZE_Msk     ) /*>> SCB_CCSIDR_LINESIZE_Pos*/ )

+

+

+/** \brief Enable I-Cache

+

+    The function turns on I-Cache

+  */

+__STATIC_INLINE void SCB_EnableICache (void)

+{

+  #if (__ICACHE_PRESENT == 1)

+    __DSB();

+    __ISB();

+    SCB->ICIALLU = 0UL;                     // invalidate I-Cache

+    SCB->CCR |=  (uint32_t)SCB_CCR_IC_Msk;  // enable I-Cache

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Disable I-Cache

+

+    The function turns off I-Cache

+  */

+__STATIC_INLINE void SCB_DisableICache (void)

+{

+  #if (__ICACHE_PRESENT == 1)

+    __DSB();

+    __ISB();

+    SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk;  // disable I-Cache

+    SCB->ICIALLU = 0UL;                     // invalidate I-Cache

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Invalidate I-Cache

+

+    The function invalidates I-Cache

+  */

+__STATIC_INLINE void SCB_InvalidateICache (void)

+{

+  #if (__ICACHE_PRESENT == 1)

+    __DSB();

+    __ISB();

+    SCB->ICIALLU = 0UL;

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Enable D-Cache

+

+    The function turns on D-Cache

+  */

+__STATIC_INLINE void SCB_EnableDCache (void)

+{

+  #if (__DCACHE_PRESENT == 1)

+    uint32_t ccsidr, sshift, wshift, sw;

+    uint32_t sets, ways;

+

+    SCB->CSSELR = (0UL << 1) | 0UL;         // Level 1 data cache

+    ccsidr  = SCB->CCSIDR;

+    sets    = (uint32_t)(CCSIDR_SETS(ccsidr));

+    sshift  = (uint32_t)(CCSIDR_LSSHIFT(ccsidr) + 4UL);

+    ways    = (uint32_t)(CCSIDR_WAYS(ccsidr));

+    wshift  = (uint32_t)((uint32_t)__CLZ(ways) & 0x1FUL);

+

+    __DSB();

+

+    do {                                   // invalidate D-Cache

+         uint32_t tmpways = ways;

+         do {

+              sw = ((tmpways << wshift) | (sets << sshift));

+              SCB->DCISW = sw;

+            } while(tmpways--);

+        } while(sets--);

+    __DSB();

+

+    SCB->CCR |=  (uint32_t)SCB_CCR_DC_Msk;   // enable D-Cache

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Disable D-Cache

+

+    The function turns off D-Cache

+  */

+__STATIC_INLINE void SCB_DisableDCache (void)

+{

+  #if (__DCACHE_PRESENT == 1)

+    uint32_t ccsidr, sshift, wshift, sw;

+    uint32_t sets, ways;

+

+    SCB->CSSELR = (0UL << 1) | 0UL;         // Level 1 data cache

+    ccsidr  = SCB->CCSIDR;

+    sets    = (uint32_t)(CCSIDR_SETS(ccsidr));

+    sshift  = (uint32_t)(CCSIDR_LSSHIFT(ccsidr) + 4UL);

+    ways    = (uint32_t)(CCSIDR_WAYS(ccsidr));

+    wshift  = (uint32_t)((uint32_t)__CLZ(ways) & 0x1FUL);

+

+    __DSB();

+

+    SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk;  // disable D-Cache

+

+    do {                                    // clean & invalidate D-Cache

+         uint32_t tmpways = ways;

+         do {

+              sw = ((tmpways << wshift) | (sets << sshift));

+              SCB->DCCISW = sw;

+            } while(tmpways--);

+        } while(sets--);

+

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Invalidate D-Cache

+

+    The function invalidates D-Cache

+  */

+__STATIC_INLINE void SCB_InvalidateDCache (void)

+{

+  #if (__DCACHE_PRESENT == 1)

+    uint32_t ccsidr, sshift, wshift, sw;

+    uint32_t sets, ways;

+

+    SCB->CSSELR = (0UL << 1) | 0UL;         // Level 1 data cache

+    ccsidr  = SCB->CCSIDR;

+    sets    = (uint32_t)(CCSIDR_SETS(ccsidr));

+    sshift  = (uint32_t)(CCSIDR_LSSHIFT(ccsidr) + 4UL);

+    ways    = (uint32_t)(CCSIDR_WAYS(ccsidr));

+    wshift  = (uint32_t)((uint32_t)__CLZ(ways) & 0x1FUL);

+

+    __DSB();

+

+    do {                                    // invalidate D-Cache

+         uint32_t tmpways = ways;

+         do {

+              sw = ((tmpways << wshift) | (sets << sshift));

+              SCB->DCISW = sw;

+            } while(tmpways--);

+        } while(sets--);

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Clean D-Cache

+

+    The function cleans D-Cache

+  */

+__STATIC_INLINE void SCB_CleanDCache (void)

+{

+  #if (__DCACHE_PRESENT == 1)

+    uint32_t ccsidr, sshift, wshift, sw;

+    uint32_t sets, ways;

+

+    SCB->CSSELR = (0UL << 1) | 0UL;         // Level 1 data cache

+    ccsidr  = SCB->CCSIDR;

+    sets    = (uint32_t)(CCSIDR_SETS(ccsidr));

+    sshift  = (uint32_t)(CCSIDR_LSSHIFT(ccsidr) + 4UL);

+    ways    = (uint32_t)(CCSIDR_WAYS(ccsidr));

+    wshift  = (uint32_t)((uint32_t)__CLZ(ways) & 0x1FUL);

+

+    __DSB();

+

+    do {                                    // clean D-Cache

+         uint32_t tmpways = ways;

+         do {

+              sw = ((tmpways << wshift) | (sets << sshift));

+              SCB->DCCSW = sw;

+            } while(tmpways--);

+        } while(sets--);

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/** \brief Clean & Invalidate D-Cache

+

+    The function cleans and Invalidates D-Cache

+  */

+__STATIC_INLINE void SCB_CleanInvalidateDCache (void)

+{

+  #if (__DCACHE_PRESENT == 1)

+    uint32_t ccsidr, sshift, wshift, sw;

+    uint32_t sets, ways;

+

+    SCB->CSSELR = (0UL << 1) | 0UL;         // Level 1 data cache

+    ccsidr  = SCB->CCSIDR;

+    sets    = (uint32_t)(CCSIDR_SETS(ccsidr));

+    sshift  = (uint32_t)(CCSIDR_LSSHIFT(ccsidr) + 4UL);

+    ways    = (uint32_t)(CCSIDR_WAYS(ccsidr));

+    wshift  = (uint32_t)((uint32_t)__CLZ(ways) & 0x1FUL);

+

+    __DSB();

+

+    do {                                    // clean & invalidate D-Cache

+         uint32_t tmpways = ways;

+         do {

+              sw = ((tmpways << wshift) | (sets << sshift));

+              SCB->DCCISW = sw;

+            } while(tmpways--);

+        } while(sets--);

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/**

+  \fn          void SCB_InvalidateDCache_by_Addr(volatile uint32_t *addr, int32_t dsize)

+  \brief       D-Cache Invalidate by address

+  \param[in]   addr    address (aligned to 32-byte boundary)

+  \param[in]   dsize   size of memory block (in number of bytes)

+*/

+__STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)

+{

+  #if (__DCACHE_PRESENT == 1)

+    int32_t  op_size = dsize;

+    uint32_t op_addr = (uint32_t)addr;

+    uint32_t linesize = 32UL;               // in Cortex-M7 size of cache line is fixed to 8 words (32 bytes)

+

+    __DSB();

+

+    while (op_size > 0) {

+      SCB->DCIMVAC = op_addr;

+      op_addr +=          linesize;

+      op_size -= (int32_t)linesize;

+    }

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/**

+  \fn          void SCB_CleanDCache_by_Addr(volatile uint32_t *addr, int32_t dsize)

+  \brief       D-Cache Clean by address

+  \param[in]   addr    address (aligned to 32-byte boundary)

+  \param[in]   dsize   size of memory block (in number of bytes)

+*/

+__STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)

+{

+  #if (__DCACHE_PRESENT == 1)

+    int32_t  op_size = dsize;

+    uint32_t op_addr = (uint32_t) addr;

+    uint32_t linesize = 32UL;               // in Cortex-M7 size of cache line is fixed to 8 words (32 bytes)

+

+    __DSB();

+

+    while (op_size > 0) {

+      SCB->DCCMVAC = op_addr;

+      op_addr +=          linesize;

+      op_size -= (int32_t)linesize;

+    }

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/**

+  \fn          void SCB_CleanInvalidateDCache_by_Addr(volatile uint32_t *addr, int32_t dsize)

+  \brief       D-Cache Clean and Invalidate by address

+  \param[in]   addr    address (aligned to 32-byte boundary)

+  \param[in]   dsize   size of memory block (in number of bytes)

+*/

+__STATIC_INLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)

+{

+  #if (__DCACHE_PRESENT == 1)

+    int32_t  op_size = dsize;

+    uint32_t op_addr = (uint32_t) addr;

+    uint32_t linesize = 32UL;               // in Cortex-M7 size of cache line is fixed to 8 words (32 bytes)

+

+    __DSB();

+

+    while (op_size > 0) {

+      SCB->DCCIMVAC = op_addr;

+      op_addr +=          linesize;

+      op_size -= (int32_t)linesize;

+    }

+

+    __DSB();

+    __ISB();

+  #endif

+}

+

+

+/*@} end of CMSIS_Core_CacheFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) { return (1UL); }    /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+/* ##################################### Debug In/Output function ########################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_core_DebugFunctions ITM Functions

+    \brief   Functions that access the ITM debug interface.

+  @{

+ */

+

+extern volatile int32_t ITM_RxBuffer;                    /*!< External variable to receive characters.                         */

+#define                 ITM_RXBUFFER_EMPTY    0x5AA55AA5 /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */

+

+

+/** \brief  ITM Send Character

+

+    The function transmits a character via the ITM channel 0, and

+    \li Just returns when no debugger is connected that has booked the output.

+    \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.

+

+    \param [in]     ch  Character to transmit.

+

+    \returns            Character to transmit.

+ */

+__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch)

+{

+  if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) &&      /* ITM enabled */

+      ((ITM->TER & 1UL               ) != 0UL)   )     /* ITM Port #0 enabled */

+  {

+    while (ITM->PORT[0].u32 == 0UL) { __NOP(); }

+    ITM->PORT[0].u8 = (uint8_t)ch;

+  }

+  return (ch);

+}

+

+

+/** \brief  ITM Receive Character

+

+    The function inputs a character via the external variable \ref ITM_RxBuffer.

+

+    \return             Received character.

+    \return         -1  No character pending.

+ */

+__STATIC_INLINE int32_t ITM_ReceiveChar (void) {

+  int32_t ch = -1;                           /* no character available */

+

+  if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) {

+    ch = ITM_RxBuffer;

+    ITM_RxBuffer = ITM_RXBUFFER_EMPTY;       /* ready for next character */

+  }

+

+  return (ch);

+}

+

+

+/** \brief  ITM Check Character

+

+    The function checks whether a character is pending for reading in the variable \ref ITM_RxBuffer.

+

+    \return          0  No character available.

+    \return          1  Character available.

+ */

+__STATIC_INLINE int32_t ITM_CheckChar (void) {

+

+  if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) {

+    return (0);                                 /* no character available */

+  } else {

+    return (1);                                 /*    character available */

+  }

+}

+

+/*@} end of CMSIS_core_DebugFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CM7_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmFunc.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmFunc.h
new file mode 100644
index 0000000..e3c057e
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmFunc.h
@@ -0,0 +1,664 @@
+/**************************************************************************//**

+ * @file     core_cmFunc.h

+ * @brief    CMSIS Cortex-M Core Function Access Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#ifndef __CORE_CMFUNC_H

+#define __CORE_CMFUNC_H

+

+

+/* ###########################  Core Function Access  ########################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions

+  @{

+ */

+

+#if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/

+/* ARM armcc specific functions */

+

+#if (__ARMCC_VERSION < 400677)

+  #error "Please use ARM Compiler Toolchain V4.0.677 or later!"

+#endif

+

+/* intrinsic void __enable_irq();     */

+/* intrinsic void __disable_irq();    */

+

+/** \brief  Get Control Register

+

+    This function returns the content of the Control Register.

+

+    \return               Control Register value

+ */

+__STATIC_INLINE uint32_t __get_CONTROL(void)

+{

+  register uint32_t __regControl         __ASM("control");

+  return(__regControl);

+}

+

+

+/** \brief  Set Control Register

+

+    This function writes the given value to the Control Register.

+

+    \param [in]    control  Control Register value to set

+ */

+__STATIC_INLINE void __set_CONTROL(uint32_t control)

+{

+  register uint32_t __regControl         __ASM("control");

+  __regControl = control;

+}

+

+

+/** \brief  Get IPSR Register

+

+    This function returns the content of the IPSR Register.

+

+    \return               IPSR Register value

+ */

+__STATIC_INLINE uint32_t __get_IPSR(void)

+{

+  register uint32_t __regIPSR          __ASM("ipsr");

+  return(__regIPSR);

+}

+

+

+/** \brief  Get APSR Register

+

+    This function returns the content of the APSR Register.

+

+    \return               APSR Register value

+ */

+__STATIC_INLINE uint32_t __get_APSR(void)

+{

+  register uint32_t __regAPSR          __ASM("apsr");

+  return(__regAPSR);

+}

+

+

+/** \brief  Get xPSR Register

+

+    This function returns the content of the xPSR Register.

+

+    \return               xPSR Register value

+ */

+__STATIC_INLINE uint32_t __get_xPSR(void)

+{

+  register uint32_t __regXPSR          __ASM("xpsr");

+  return(__regXPSR);

+}

+

+

+/** \brief  Get Process Stack Pointer

+

+    This function returns the current value of the Process Stack Pointer (PSP).

+

+    \return               PSP Register value

+ */

+__STATIC_INLINE uint32_t __get_PSP(void)

+{

+  register uint32_t __regProcessStackPointer  __ASM("psp");

+  return(__regProcessStackPointer);

+}

+

+

+/** \brief  Set Process Stack Pointer

+

+    This function assigns the given value to the Process Stack Pointer (PSP).

+

+    \param [in]    topOfProcStack  Process Stack Pointer value to set

+ */

+__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)

+{

+  register uint32_t __regProcessStackPointer  __ASM("psp");

+  __regProcessStackPointer = topOfProcStack;

+}

+

+

+/** \brief  Get Main Stack Pointer

+

+    This function returns the current value of the Main Stack Pointer (MSP).

+

+    \return               MSP Register value

+ */

+__STATIC_INLINE uint32_t __get_MSP(void)

+{

+  register uint32_t __regMainStackPointer     __ASM("msp");

+  return(__regMainStackPointer);

+}

+

+

+/** \brief  Set Main Stack Pointer

+

+    This function assigns the given value to the Main Stack Pointer (MSP).

+

+    \param [in]    topOfMainStack  Main Stack Pointer value to set

+ */

+__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)

+{

+  register uint32_t __regMainStackPointer     __ASM("msp");

+  __regMainStackPointer = topOfMainStack;

+}

+

+

+/** \brief  Get Priority Mask

+

+    This function returns the current state of the priority mask bit from the Priority Mask Register.

+

+    \return               Priority Mask value

+ */

+__STATIC_INLINE uint32_t __get_PRIMASK(void)

+{

+  register uint32_t __regPriMask         __ASM("primask");

+  return(__regPriMask);

+}

+

+

+/** \brief  Set Priority Mask

+

+    This function assigns the given value to the Priority Mask Register.

+

+    \param [in]    priMask  Priority Mask

+ */

+__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)

+{

+  register uint32_t __regPriMask         __ASM("primask");

+  __regPriMask = (priMask);

+}

+

+

+#if       (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)

+

+/** \brief  Enable FIQ

+

+    This function enables FIQ interrupts by clearing the F-bit in the CPSR.

+    Can only be executed in Privileged modes.

+ */

+#define __enable_fault_irq                __enable_fiq

+

+

+/** \brief  Disable FIQ

+

+    This function disables FIQ interrupts by setting the F-bit in the CPSR.

+    Can only be executed in Privileged modes.

+ */

+#define __disable_fault_irq               __disable_fiq

+

+

+/** \brief  Get Base Priority

+

+    This function returns the current value of the Base Priority register.

+

+    \return               Base Priority register value

+ */

+__STATIC_INLINE uint32_t  __get_BASEPRI(void)

+{

+  register uint32_t __regBasePri         __ASM("basepri");

+  return(__regBasePri);

+}

+

+

+/** \brief  Set Base Priority

+

+    This function assigns the given value to the Base Priority register.

+

+    \param [in]    basePri  Base Priority value to set

+ */

+__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)

+{

+  register uint32_t __regBasePri         __ASM("basepri");

+  __regBasePri = (basePri & 0xff);

+}

+

+

+/** \brief  Set Base Priority with condition

+

+    This function assigns the given value to the Base Priority register only if BASEPRI masking is disabled,

+    or the new value increases the BASEPRI priority level.

+

+    \param [in]    basePri  Base Priority value to set

+ */

+__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)

+{

+  register uint32_t __regBasePriMax      __ASM("basepri_max");

+  __regBasePriMax = (basePri & 0xff);

+}

+

+

+/** \brief  Get Fault Mask

+

+    This function returns the current value of the Fault Mask register.

+

+    \return               Fault Mask register value

+ */

+__STATIC_INLINE uint32_t __get_FAULTMASK(void)

+{

+  register uint32_t __regFaultMask       __ASM("faultmask");

+  return(__regFaultMask);

+}

+

+

+/** \brief  Set Fault Mask

+

+    This function assigns the given value to the Fault Mask register.

+

+    \param [in]    faultMask  Fault Mask value to set

+ */

+__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)

+{

+  register uint32_t __regFaultMask       __ASM("faultmask");

+  __regFaultMask = (faultMask & (uint32_t)1);

+}

+

+#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */

+

+

+#if       (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07)

+

+/** \brief  Get FPSCR

+

+    This function returns the current value of the Floating Point Status/Control register.

+

+    \return               Floating Point Status/Control register value

+ */

+__STATIC_INLINE uint32_t __get_FPSCR(void)

+{

+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)

+  register uint32_t __regfpscr         __ASM("fpscr");

+  return(__regfpscr);

+#else

+   return(0);

+#endif

+}

+

+

+/** \brief  Set FPSCR

+

+    This function assigns the given value to the Floating Point Status/Control register.

+

+    \param [in]    fpscr  Floating Point Status/Control value to set

+ */

+__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)

+{

+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)

+  register uint32_t __regfpscr         __ASM("fpscr");

+  __regfpscr = (fpscr);

+#endif

+}

+

+#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */

+

+

+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/

+/* GNU gcc specific functions */

+

+/** \brief  Enable IRQ Interrupts

+

+  This function enables IRQ interrupts by clearing the I-bit in the CPSR.

+  Can only be executed in Privileged modes.

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)

+{

+  __ASM volatile ("cpsie i" : : : "memory");

+}

+

+

+/** \brief  Disable IRQ Interrupts

+

+  This function disables IRQ interrupts by setting the I-bit in the CPSR.

+  Can only be executed in Privileged modes.

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)

+{

+  __ASM volatile ("cpsid i" : : : "memory");

+}

+

+

+/** \brief  Get Control Register

+

+    This function returns the content of the Control Register.

+

+    \return               Control Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, control" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Control Register

+

+    This function writes the given value to the Control Register.

+

+    \param [in]    control  Control Register value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)

+{

+  __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");

+}

+

+

+/** \brief  Get IPSR Register

+

+    This function returns the content of the IPSR Register.

+

+    \return               IPSR Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, ipsr" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Get APSR Register

+

+    This function returns the content of the APSR Register.

+

+    \return               APSR Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, apsr" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Get xPSR Register

+

+    This function returns the content of the xPSR Register.

+

+    \return               xPSR Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, xpsr" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Get Process Stack Pointer

+

+    This function returns the current value of the Process Stack Pointer (PSP).

+

+    \return               PSP Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)

+{

+  register uint32_t result;

+

+  __ASM volatile ("MRS %0, psp\n"  : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Process Stack Pointer

+

+    This function assigns the given value to the Process Stack Pointer (PSP).

+

+    \param [in]    topOfProcStack  Process Stack Pointer value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)

+{

+  __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");

+}

+

+

+/** \brief  Get Main Stack Pointer

+

+    This function returns the current value of the Main Stack Pointer (MSP).

+

+    \return               MSP Register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)

+{

+  register uint32_t result;

+

+  __ASM volatile ("MRS %0, msp\n" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Main Stack Pointer

+

+    This function assigns the given value to the Main Stack Pointer (MSP).

+

+    \param [in]    topOfMainStack  Main Stack Pointer value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)

+{

+  __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");

+}

+

+

+/** \brief  Get Priority Mask

+

+    This function returns the current state of the priority mask bit from the Priority Mask Register.

+

+    \return               Priority Mask value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, primask" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Priority Mask

+

+    This function assigns the given value to the Priority Mask Register.

+

+    \param [in]    priMask  Priority Mask

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)

+{

+  __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");

+}

+

+

+#if       (__CORTEX_M >= 0x03)

+

+/** \brief  Enable FIQ

+

+    This function enables FIQ interrupts by clearing the F-bit in the CPSR.

+    Can only be executed in Privileged modes.

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)

+{

+  __ASM volatile ("cpsie f" : : : "memory");

+}

+

+

+/** \brief  Disable FIQ

+

+    This function disables FIQ interrupts by setting the F-bit in the CPSR.

+    Can only be executed in Privileged modes.

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)

+{

+  __ASM volatile ("cpsid f" : : : "memory");

+}

+

+

+/** \brief  Get Base Priority

+

+    This function returns the current value of the Base Priority register.

+

+    \return               Base Priority register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, basepri" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Base Priority

+

+    This function assigns the given value to the Base Priority register.

+

+    \param [in]    basePri  Base Priority value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)

+{

+  __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");

+}

+

+

+/** \brief  Set Base Priority with condition

+

+    This function assigns the given value to the Base Priority register only if BASEPRI masking is disabled,

+	or the new value increases the BASEPRI priority level.

+

+    \param [in]    basePri  Base Priority value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)

+{

+  __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");

+}

+

+

+/** \brief  Get Fault Mask

+

+    This function returns the current value of the Fault Mask register.

+

+    \return               Fault Mask register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)

+{

+  uint32_t result;

+

+  __ASM volatile ("MRS %0, faultmask" : "=r" (result) );

+  return(result);

+}

+

+

+/** \brief  Set Fault Mask

+

+    This function assigns the given value to the Fault Mask register.

+

+    \param [in]    faultMask  Fault Mask value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)

+{

+  __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");

+}

+

+#endif /* (__CORTEX_M >= 0x03) */

+

+

+#if       (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07)

+

+/** \brief  Get FPSCR

+

+    This function returns the current value of the Floating Point Status/Control register.

+

+    \return               Floating Point Status/Control register value

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)

+{

+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)

+  uint32_t result;

+

+  /* Empty asm statement works as a scheduling barrier */

+  __ASM volatile ("");

+  __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );

+  __ASM volatile ("");

+  return(result);

+#else

+   return(0);

+#endif

+}

+

+

+/** \brief  Set FPSCR

+

+    This function assigns the given value to the Floating Point Status/Control register.

+

+    \param [in]    fpscr  Floating Point Status/Control value to set

+ */

+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)

+{

+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)

+  /* Empty asm statement works as a scheduling barrier */

+  __ASM volatile ("");

+  __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");

+  __ASM volatile ("");

+#endif

+}

+

+#endif /* (__CORTEX_M == 0x04) || (__CORTEX_M == 0x07) */

+

+

+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/

+/* IAR iccarm specific functions */

+#include <cmsis_iar.h>

+

+

+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/

+/* TI CCS specific functions */

+#include <cmsis_ccs.h>

+

+

+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/

+/* TASKING carm specific functions */

+/*

+ * The CMSIS functions have been implemented as intrinsics in the compiler.

+ * Please use "carm -?i" to get an up to date list of all intrinsics,

+ * Including the CMSIS ones.

+ */

+

+

+#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/

+/* Cosmic specific functions */

+#include <cmsis_csm.h>

+

+#endif

+

+/*@} end of CMSIS_Core_RegAccFunctions */

+

+#endif /* __CORE_CMFUNC_H */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmInstr.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmInstr.h
new file mode 100644
index 0000000..c8e045f
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmInstr.h
@@ -0,0 +1,916 @@
+/**************************************************************************//**

+ * @file     core_cmInstr.h

+ * @brief    CMSIS Cortex-M Core Instruction Access Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2014 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#ifndef __CORE_CMINSTR_H

+#define __CORE_CMINSTR_H

+

+

+/* ##########################  Core Instruction Access  ######################### */

+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface

+  Access to dedicated instructions

+  @{

+*/

+

+#if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/

+/* ARM armcc specific functions */

+

+#if (__ARMCC_VERSION < 400677)

+  #error "Please use ARM Compiler Toolchain V4.0.677 or later!"

+#endif

+

+

+/** \brief  No Operation

+

+    No Operation does nothing. This instruction can be used for code alignment purposes.

+ */

+#define __NOP                             __nop

+

+

+/** \brief  Wait For Interrupt

+

+    Wait For Interrupt is a hint instruction that suspends execution

+    until one of a number of events occurs.

+ */

+#define __WFI                             __wfi

+

+

+/** \brief  Wait For Event

+

+    Wait For Event is a hint instruction that permits the processor to enter

+    a low-power state until one of a number of events occurs.

+ */

+#define __WFE                             __wfe

+

+

+/** \brief  Send Event

+

+    Send Event is a hint instruction. It causes an event to be signaled to the CPU.

+ */

+#define __SEV                             __sev

+

+

+/** \brief  Instruction Synchronization Barrier

+

+    Instruction Synchronization Barrier flushes the pipeline in the processor,

+    so that all instructions following the ISB are fetched from cache or

+    memory, after the instruction has been completed.

+ */

+#define __ISB() do {\

+                   __schedule_barrier();\

+                   __isb(0xF);\

+                   __schedule_barrier();\

+                } while (0)

+

+/** \brief  Data Synchronization Barrier

+

+    This function acts as a special kind of Data Memory Barrier.

+    It completes when all explicit memory accesses before this instruction complete.

+ */

+#define __DSB() do {\

+                   __schedule_barrier();\

+                   __dsb(0xF);\

+                   __schedule_barrier();\

+                } while (0)

+

+/** \brief  Data Memory Barrier

+

+    This function ensures the apparent order of the explicit memory operations before

+    and after the instruction, without ensuring their completion.

+ */

+#define __DMB() do {\

+                   __schedule_barrier();\

+                   __dmb(0xF);\

+                   __schedule_barrier();\

+                } while (0)

+

+/** \brief  Reverse byte order (32 bit)

+

+    This function reverses the byte order in integer value.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+#define __REV                             __rev

+

+

+/** \brief  Reverse byte order (16 bit)

+

+    This function reverses the byte order in two unsigned short values.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+#ifndef __NO_EMBEDDED_ASM

+__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)

+{

+  rev16 r0, r0

+  bx lr

+}

+#endif

+

+/** \brief  Reverse byte order in signed short value

+

+    This function reverses the byte order in a signed short value with sign extension to integer.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+#ifndef __NO_EMBEDDED_ASM

+__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)

+{

+  revsh r0, r0

+  bx lr

+}

+#endif

+

+

+/** \brief  Rotate Right in unsigned value (32 bit)

+

+    This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.

+

+    \param [in]    value  Value to rotate

+    \param [in]    value  Number of Bits to rotate

+    \return               Rotated value

+ */

+#define __ROR                             __ror

+

+

+/** \brief  Breakpoint

+

+    This function causes the processor to enter Debug state.

+    Debug tools can use this to investigate system state when the instruction at a particular address is reached.

+

+    \param [in]    value  is ignored by the processor.

+                   If required, a debugger can use it to store additional information about the breakpoint.

+ */

+#define __BKPT(value)                       __breakpoint(value)

+

+

+/** \brief  Reverse bit order of value

+

+    This function reverses the bit order of the given value.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+#if       (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)

+  #define __RBIT                          __rbit

+#else

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)

+{

+  uint32_t result;

+  int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end

+

+  result = value;                      // r will be reversed bits of v; first get LSB of v

+  for (value >>= 1; value; value >>= 1)

+  {

+    result <<= 1;

+    result |= value & 1;

+    s--;

+  }

+  result <<= s;                       // shift when v's highest bits are zero

+  return(result);

+}

+#endif

+

+

+/** \brief  Count leading zeros

+

+    This function counts the number of leading zeros of a data value.

+

+    \param [in]  value  Value to count the leading zeros

+    \return             number of leading zeros in value

+ */

+#define __CLZ                             __clz

+

+

+#if       (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)

+

+/** \brief  LDR Exclusive (8 bit)

+

+    This function executes a exclusive LDR instruction for 8 bit value.

+

+    \param [in]    ptr  Pointer to data

+    \return             value of type uint8_t at (*ptr)

+ */

+#define __LDREXB(ptr)                     ((uint8_t ) __ldrex(ptr))

+

+

+/** \brief  LDR Exclusive (16 bit)

+

+    This function executes a exclusive LDR instruction for 16 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint16_t at (*ptr)

+ */

+#define __LDREXH(ptr)                     ((uint16_t) __ldrex(ptr))

+

+

+/** \brief  LDR Exclusive (32 bit)

+

+    This function executes a exclusive LDR instruction for 32 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint32_t at (*ptr)

+ */

+#define __LDREXW(ptr)                     ((uint32_t ) __ldrex(ptr))

+

+

+/** \brief  STR Exclusive (8 bit)

+

+    This function executes a exclusive STR instruction for 8 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+#define __STREXB(value, ptr)              __strex(value, ptr)

+

+

+/** \brief  STR Exclusive (16 bit)

+

+    This function executes a exclusive STR instruction for 16 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+#define __STREXH(value, ptr)              __strex(value, ptr)

+

+

+/** \brief  STR Exclusive (32 bit)

+

+    This function executes a exclusive STR instruction for 32 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+#define __STREXW(value, ptr)              __strex(value, ptr)

+

+

+/** \brief  Remove the exclusive lock

+

+    This function removes the exclusive lock which is created by LDREX.

+

+ */

+#define __CLREX                           __clrex

+

+

+/** \brief  Signed Saturate

+

+    This function saturates a signed value.

+

+    \param [in]  value  Value to be saturated

+    \param [in]    sat  Bit position to saturate to (1..32)

+    \return             Saturated value

+ */

+#define __SSAT                            __ssat

+

+

+/** \brief  Unsigned Saturate

+

+    This function saturates an unsigned value.

+

+    \param [in]  value  Value to be saturated

+    \param [in]    sat  Bit position to saturate to (0..31)

+    \return             Saturated value

+ */

+#define __USAT                            __usat

+

+

+/** \brief  Rotate Right with Extend (32 bit)

+

+    This function moves each bit of a bitstring right by one bit.

+    The carry input is shifted in at the left end of the bitstring.

+

+    \param [in]    value  Value to rotate

+    \return               Rotated value

+ */

+#ifndef __NO_EMBEDDED_ASM

+__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)

+{

+  rrx r0, r0

+  bx lr

+}

+#endif

+

+

+/** \brief  LDRT Unprivileged (8 bit)

+

+    This function executes a Unprivileged LDRT instruction for 8 bit value.

+

+    \param [in]    ptr  Pointer to data

+    \return             value of type uint8_t at (*ptr)

+ */

+#define __LDRBT(ptr)                      ((uint8_t )  __ldrt(ptr))

+

+

+/** \brief  LDRT Unprivileged (16 bit)

+

+    This function executes a Unprivileged LDRT instruction for 16 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint16_t at (*ptr)

+ */

+#define __LDRHT(ptr)                      ((uint16_t)  __ldrt(ptr))

+

+

+/** \brief  LDRT Unprivileged (32 bit)

+

+    This function executes a Unprivileged LDRT instruction for 32 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint32_t at (*ptr)

+ */

+#define __LDRT(ptr)                       ((uint32_t ) __ldrt(ptr))

+

+

+/** \brief  STRT Unprivileged (8 bit)

+

+    This function executes a Unprivileged STRT instruction for 8 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+#define __STRBT(value, ptr)               __strt(value, ptr)

+

+

+/** \brief  STRT Unprivileged (16 bit)

+

+    This function executes a Unprivileged STRT instruction for 16 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+#define __STRHT(value, ptr)               __strt(value, ptr)

+

+

+/** \brief  STRT Unprivileged (32 bit)

+

+    This function executes a Unprivileged STRT instruction for 32 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+#define __STRT(value, ptr)                __strt(value, ptr)

+

+#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */

+

+

+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/

+/* GNU gcc specific functions */

+

+/* Define macros for porting to both thumb1 and thumb2.

+ * For thumb1, use low register (r0-r7), specified by constrant "l"

+ * Otherwise, use general registers, specified by constrant "r" */

+#if defined (__thumb__) && !defined (__thumb2__)

+#define __CMSIS_GCC_OUT_REG(r) "=l" (r)

+#define __CMSIS_GCC_USE_REG(r) "l" (r)

+#else

+#define __CMSIS_GCC_OUT_REG(r) "=r" (r)

+#define __CMSIS_GCC_USE_REG(r) "r" (r)

+#endif

+

+/** \brief  No Operation

+

+    No Operation does nothing. This instruction can be used for code alignment purposes.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)

+{

+  __ASM volatile ("nop");

+}

+

+

+/** \brief  Wait For Interrupt

+

+    Wait For Interrupt is a hint instruction that suspends execution

+    until one of a number of events occurs.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)

+{

+  __ASM volatile ("wfi");

+}

+

+

+/** \brief  Wait For Event

+

+    Wait For Event is a hint instruction that permits the processor to enter

+    a low-power state until one of a number of events occurs.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)

+{

+  __ASM volatile ("wfe");

+}

+

+

+/** \brief  Send Event

+

+    Send Event is a hint instruction. It causes an event to be signaled to the CPU.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)

+{

+  __ASM volatile ("sev");

+}

+

+

+/** \brief  Instruction Synchronization Barrier

+

+    Instruction Synchronization Barrier flushes the pipeline in the processor,

+    so that all instructions following the ISB are fetched from cache or

+    memory, after the instruction has been completed.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)

+{

+  __ASM volatile ("isb 0xF":::"memory");

+}

+

+

+/** \brief  Data Synchronization Barrier

+

+    This function acts as a special kind of Data Memory Barrier.

+    It completes when all explicit memory accesses before this instruction complete.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)

+{

+  __ASM volatile ("dsb 0xF":::"memory");

+}

+

+

+/** \brief  Data Memory Barrier

+

+    This function ensures the apparent order of the explicit memory operations before

+    and after the instruction, without ensuring their completion.

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)

+{

+  __ASM volatile ("dmb 0xF":::"memory");

+}

+

+

+/** \brief  Reverse byte order (32 bit)

+

+    This function reverses the byte order in integer value.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)

+{

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)

+  return __builtin_bswap32(value);

+#else

+  uint32_t result;

+

+  __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );

+  return(result);

+#endif

+}

+

+

+/** \brief  Reverse byte order (16 bit)

+

+    This function reverses the byte order in two unsigned short values.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)

+{

+  uint32_t result;

+

+  __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );

+  return(result);

+}

+

+

+/** \brief  Reverse byte order in signed short value

+

+    This function reverses the byte order in a signed short value with sign extension to integer.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)

+{

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)

+  return (short)__builtin_bswap16(value);

+#else

+  uint32_t result;

+

+  __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );

+  return(result);

+#endif

+}

+

+

+/** \brief  Rotate Right in unsigned value (32 bit)

+

+    This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.

+

+    \param [in]    value  Value to rotate

+    \param [in]    value  Number of Bits to rotate

+    \return               Rotated value

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)

+{

+  return (op1 >> op2) | (op1 << (32 - op2));

+}

+

+

+/** \brief  Breakpoint

+

+    This function causes the processor to enter Debug state.

+    Debug tools can use this to investigate system state when the instruction at a particular address is reached.

+

+    \param [in]    value  is ignored by the processor.

+                   If required, a debugger can use it to store additional information about the breakpoint.

+ */

+#define __BKPT(value)                       __ASM volatile ("bkpt "#value)

+

+

+/** \brief  Reverse bit order of value

+

+    This function reverses the bit order of the given value.

+

+    \param [in]    value  Value to reverse

+    \return               Reversed value

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)

+{

+  uint32_t result;

+

+#if       (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)

+   __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );

+#else

+  int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end

+

+  result = value;                      // r will be reversed bits of v; first get LSB of v

+  for (value >>= 1; value; value >>= 1)

+  {

+    result <<= 1;

+    result |= value & 1;

+    s--;

+  }

+  result <<= s;                       // shift when v's highest bits are zero

+#endif

+  return(result);

+}

+

+

+/** \brief  Count leading zeros

+

+    This function counts the number of leading zeros of a data value.

+

+    \param [in]  value  Value to count the leading zeros

+    \return             number of leading zeros in value

+ */

+#define __CLZ             __builtin_clz

+

+

+#if       (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)

+

+/** \brief  LDR Exclusive (8 bit)

+

+    This function executes a exclusive LDR instruction for 8 bit value.

+

+    \param [in]    ptr  Pointer to data

+    \return             value of type uint8_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)

+{

+    uint32_t result;

+

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)

+   __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );

+#else

+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not

+       accepted by assembler. So has to use following less efficient pattern.

+    */

+   __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );

+#endif

+   return ((uint8_t) result);    /* Add explicit type cast here */

+}

+

+

+/** \brief  LDR Exclusive (16 bit)

+

+    This function executes a exclusive LDR instruction for 16 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint16_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)

+{

+    uint32_t result;

+

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)

+   __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );

+#else

+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not

+       accepted by assembler. So has to use following less efficient pattern.

+    */

+   __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );

+#endif

+   return ((uint16_t) result);    /* Add explicit type cast here */

+}

+

+

+/** \brief  LDR Exclusive (32 bit)

+

+    This function executes a exclusive LDR instruction for 32 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint32_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)

+{

+    uint32_t result;

+

+   __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );

+   return(result);

+}

+

+

+/** \brief  STR Exclusive (8 bit)

+

+    This function executes a exclusive STR instruction for 8 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)

+{

+   uint32_t result;

+

+   __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );

+   return(result);

+}

+

+

+/** \brief  STR Exclusive (16 bit)

+

+    This function executes a exclusive STR instruction for 16 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)

+{

+   uint32_t result;

+

+   __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );

+   return(result);

+}

+

+

+/** \brief  STR Exclusive (32 bit)

+

+    This function executes a exclusive STR instruction for 32 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+    \return          0  Function succeeded

+    \return          1  Function failed

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)

+{

+   uint32_t result;

+

+   __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );

+   return(result);

+}

+

+

+/** \brief  Remove the exclusive lock

+

+    This function removes the exclusive lock which is created by LDREX.

+

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)

+{

+  __ASM volatile ("clrex" ::: "memory");

+}

+

+

+/** \brief  Signed Saturate

+

+    This function saturates a signed value.

+

+    \param [in]  value  Value to be saturated

+    \param [in]    sat  Bit position to saturate to (1..32)

+    \return             Saturated value

+ */

+#define __SSAT(ARG1,ARG2) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1); \

+  __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \

+  __RES; \

+ })

+

+

+/** \brief  Unsigned Saturate

+

+    This function saturates an unsigned value.

+

+    \param [in]  value  Value to be saturated

+    \param [in]    sat  Bit position to saturate to (0..31)

+    \return             Saturated value

+ */

+#define __USAT(ARG1,ARG2) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1); \

+  __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \

+  __RES; \

+ })

+

+

+/** \brief  Rotate Right with Extend (32 bit)

+

+    This function moves each bit of a bitstring right by one bit.

+    The carry input is shifted in at the left end of the bitstring.

+

+    \param [in]    value  Value to rotate

+    \return               Rotated value

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)

+{

+  uint32_t result;

+

+  __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );

+  return(result);

+}

+

+

+/** \brief  LDRT Unprivileged (8 bit)

+

+    This function executes a Unprivileged LDRT instruction for 8 bit value.

+

+    \param [in]    ptr  Pointer to data

+    \return             value of type uint8_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)

+{

+    uint32_t result;

+

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)

+   __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );

+#else

+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not

+       accepted by assembler. So has to use following less efficient pattern.

+    */

+   __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );

+#endif

+   return ((uint8_t) result);    /* Add explicit type cast here */

+}

+

+

+/** \brief  LDRT Unprivileged (16 bit)

+

+    This function executes a Unprivileged LDRT instruction for 16 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint16_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)

+{

+    uint32_t result;

+

+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)

+   __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );

+#else

+    /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not

+       accepted by assembler. So has to use following less efficient pattern.

+    */

+   __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );

+#endif

+   return ((uint16_t) result);    /* Add explicit type cast here */

+}

+

+

+/** \brief  LDRT Unprivileged (32 bit)

+

+    This function executes a Unprivileged LDRT instruction for 32 bit values.

+

+    \param [in]    ptr  Pointer to data

+    \return        value of type uint32_t at (*ptr)

+ */

+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)

+{

+    uint32_t result;

+

+   __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );

+   return(result);

+}

+

+

+/** \brief  STRT Unprivileged (8 bit)

+

+    This function executes a Unprivileged STRT instruction for 8 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)

+{

+   __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );

+}

+

+

+/** \brief  STRT Unprivileged (16 bit)

+

+    This function executes a Unprivileged STRT instruction for 16 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)

+{

+   __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );

+}

+

+

+/** \brief  STRT Unprivileged (32 bit)

+

+    This function executes a Unprivileged STRT instruction for 32 bit values.

+

+    \param [in]  value  Value to store

+    \param [in]    ptr  Pointer to location

+ */

+__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)

+{

+   __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );

+}

+

+#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */

+

+

+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/

+/* IAR iccarm specific functions */

+#include <cmsis_iar.h>

+

+

+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/

+/* TI CCS specific functions */

+#include <cmsis_ccs.h>

+

+

+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/

+/* TASKING carm specific functions */

+/*

+ * The CMSIS functions have been implemented as intrinsics in the compiler.

+ * Please use "carm -?i" to get an up to date list of all intrinsics,

+ * Including the CMSIS ones.

+ */

+

+

+#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/

+/* Cosmic specific functions */

+#include <cmsis_csm.h>

+

+#endif

+

+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */

+

+#endif /* __CORE_CMINSTR_H */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmSimd.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmSimd.h
new file mode 100644
index 0000000..fd7214e
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_cmSimd.h
@@ -0,0 +1,697 @@
+/**************************************************************************//**

+ * @file     core_cmSimd.h

+ * @brief    CMSIS Cortex-M SIMD Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2014 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_CMSIMD_H

+#define __CORE_CMSIMD_H

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+ ******************************************************************************/

+

+

+/* ###################  Compiler specific Intrinsics  ########################### */

+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics

+  Access to dedicated SIMD instructions

+  @{

+*/

+

+#if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/

+/* ARM armcc specific functions */

+#define __SADD8                           __sadd8

+#define __QADD8                           __qadd8

+#define __SHADD8                          __shadd8

+#define __UADD8                           __uadd8

+#define __UQADD8                          __uqadd8

+#define __UHADD8                          __uhadd8

+#define __SSUB8                           __ssub8

+#define __QSUB8                           __qsub8

+#define __SHSUB8                          __shsub8

+#define __USUB8                           __usub8

+#define __UQSUB8                          __uqsub8

+#define __UHSUB8                          __uhsub8

+#define __SADD16                          __sadd16

+#define __QADD16                          __qadd16

+#define __SHADD16                         __shadd16

+#define __UADD16                          __uadd16

+#define __UQADD16                         __uqadd16

+#define __UHADD16                         __uhadd16

+#define __SSUB16                          __ssub16

+#define __QSUB16                          __qsub16

+#define __SHSUB16                         __shsub16

+#define __USUB16                          __usub16

+#define __UQSUB16                         __uqsub16

+#define __UHSUB16                         __uhsub16

+#define __SASX                            __sasx

+#define __QASX                            __qasx

+#define __SHASX                           __shasx

+#define __UASX                            __uasx

+#define __UQASX                           __uqasx

+#define __UHASX                           __uhasx

+#define __SSAX                            __ssax

+#define __QSAX                            __qsax

+#define __SHSAX                           __shsax

+#define __USAX                            __usax

+#define __UQSAX                           __uqsax

+#define __UHSAX                           __uhsax

+#define __USAD8                           __usad8

+#define __USADA8                          __usada8

+#define __SSAT16                          __ssat16

+#define __USAT16                          __usat16

+#define __UXTB16                          __uxtb16

+#define __UXTAB16                         __uxtab16

+#define __SXTB16                          __sxtb16

+#define __SXTAB16                         __sxtab16

+#define __SMUAD                           __smuad

+#define __SMUADX                          __smuadx

+#define __SMLAD                           __smlad

+#define __SMLADX                          __smladx

+#define __SMLALD                          __smlald

+#define __SMLALDX                         __smlaldx

+#define __SMUSD                           __smusd

+#define __SMUSDX                          __smusdx

+#define __SMLSD                           __smlsd

+#define __SMLSDX                          __smlsdx

+#define __SMLSLD                          __smlsld

+#define __SMLSLDX                         __smlsldx

+#define __SEL                             __sel

+#define __QADD                            __qadd

+#define __QSUB                            __qsub

+

+#define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \

+                                           ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )

+

+#define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \

+                                           ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )

+

+#define __SMMLA(ARG1,ARG2,ARG3)          ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \

+                                                      ((int64_t)(ARG3) << 32)      ) >> 32))

+

+

+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/

+/* GNU gcc specific functions */

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)

+{

+  uint32_t result;

+

+  __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );

+  return(result);

+}

+

+#define __SSAT16(ARG1,ARG2) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1); \

+  __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \

+  __RES; \

+ })

+

+#define __USAT16(ARG1,ARG2) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1); \

+  __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \

+  __RES; \

+ })

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)

+{

+  uint32_t result;

+

+  __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)

+{

+  uint32_t result;

+

+  __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)

+{

+  uint32_t result;

+

+  __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)

+{

+  uint32_t result;

+

+  __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)

+{

+  union llreg_u{

+    uint32_t w32[2];

+    uint64_t w64;

+  } llr;

+  llr.w64 = acc;

+

+#ifndef __ARMEB__   // Little endian

+  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );

+#else               // Big endian

+  __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );

+#endif

+

+  return(llr.w64);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)

+{

+  union llreg_u{

+    uint32_t w32[2];

+    uint64_t w64;

+  } llr;

+  llr.w64 = acc;

+

+#ifndef __ARMEB__   // Little endian

+  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );

+#else               // Big endian

+  __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );

+#endif

+

+  return(llr.w64);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)

+{

+  uint32_t result;

+

+  __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)

+{

+  uint32_t result;

+

+  __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)

+{

+  union llreg_u{

+    uint32_t w32[2];

+    uint64_t w64;

+  } llr;

+  llr.w64 = acc;

+

+#ifndef __ARMEB__   // Little endian

+  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );

+#else               // Big endian

+  __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );

+#endif

+

+  return(llr.w64);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)

+{

+  union llreg_u{

+    uint32_t w32[2];

+    uint64_t w64;

+  } llr;

+  llr.w64 = acc;

+

+#ifndef __ARMEB__   // Little endian

+  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );

+#else               // Big endian

+  __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );

+#endif

+

+  return(llr.w64);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)

+{

+  uint32_t result;

+

+  __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );

+  return(result);

+}

+

+#define __PKHBT(ARG1,ARG2,ARG3) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \

+  __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \

+  __RES; \

+ })

+

+#define __PKHTB(ARG1,ARG2,ARG3) \

+({                          \

+  uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \

+  if (ARG3 == 0) \

+    __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \

+  else \

+    __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \

+  __RES; \

+ })

+

+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)

+{

+ int32_t result;

+

+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );

+ return(result);

+}

+

+

+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/

+/* IAR iccarm specific functions */

+#include <cmsis_iar.h>

+

+

+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/

+/* TI CCS specific functions */

+#include <cmsis_ccs.h>

+

+

+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/

+/* TASKING carm specific functions */

+/* not yet supported */

+

+

+#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/

+/* Cosmic specific functions */

+#include <cmsis_csm.h>

+

+#endif

+

+/*@} end of group CMSIS_SIMD_intrinsics */

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_CMSIMD_H */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc000.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc000.h
new file mode 100644
index 0000000..c442606
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc000.h
@@ -0,0 +1,864 @@
+/**************************************************************************//**

+ * @file     core_sc000.h

+ * @brief    CMSIS SC000 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_SC000_H_GENERIC

+#define __CORE_SC000_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup SC000

+  @{

+ */

+

+/*  CMSIS SC000 definitions */

+#define __SC000_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version */

+#define __SC000_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version  */

+#define __SC000_CMSIS_VERSION       ((__SC000_CMSIS_VERSION_MAIN << 16) | \

+                                      __SC000_CMSIS_VERSION_SUB          )   /*!< CMSIS HAL version number       */

+

+#define __CORTEX_SC                 (000)                                       /*!< Cortex secure core             */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    This core does not support an FPU at all

+*/

+#define __FPU_USED       0

+

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI__VFP_SUPPORT____

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_SC000_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_SC000_H_DEPENDANT

+#define __CORE_SC000_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __SC000_REV

+    #define __SC000_REV             0x0000

+    #warning "__SC000_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          2

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group SC000 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core MPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:28;              /*!< bit:  0..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t _reserved1:3;               /*!< bit: 25..27  Reserved                           */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:1;               /*!< bit:      0  Reserved                           */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[1];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[31];

+  __IO uint32_t ICER[1];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register          */

+       uint32_t RSERVED1[31];

+  __IO uint32_t ISPR[1];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register           */

+       uint32_t RESERVED2[31];

+  __IO uint32_t ICPR[1];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register         */

+       uint32_t RESERVED3[31];

+       uint32_t RESERVED4[64];

+  __IO uint32_t IP[8];                   /*!< Offset: 0x300 (R/W)  Interrupt Priority Register              */

+}  NVIC_Type;

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+       uint32_t RESERVED0[1];

+  __IO uint32_t SHP[2];                  /*!< Offset: 0x01C (R/W)  System Handlers Priority Registers. [0] is RESERVED   */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+       uint32_t RESERVED1[154];

+  __IO uint32_t SFCR;                    /*!< Offset: 0x290 (R/W)  Security Features Control Register                    */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos)           /*!< SCB VTOR: TBLOFF Mask */

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)

+    \brief      Type definitions for the System Control and ID Register not in the SCB

+  @{

+ */

+

+/** \brief  Structure type to access the System Control and ID Register not in the SCB.

+ */

+typedef struct

+{

+       uint32_t RESERVED0[2];

+  __IO uint32_t ACTLR;                   /*!< Offset: 0x008 (R/W)  Auxiliary Control Register      */

+} SCnSCB_Type;

+

+/* Auxiliary Control Register Definitions */

+#define SCnSCB_ACTLR_DISMCYCINT_Pos         0                                          /*!< ACTLR: DISMCYCINT Position */

+#define SCnSCB_ACTLR_DISMCYCINT_Msk        (1UL /*<< SCnSCB_ACTLR_DISMCYCINT_Pos*/)    /*!< ACTLR: DISMCYCINT Mask */

+

+/*@} end of group CMSIS_SCnotSCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   8                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0xFFFFFFUL << MPU_RBAR_ADDR_Pos)              /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      SC000 Core Debug Registers (DCB registers, SHCSR, and DFSR)

+                are only accessible over DAP and not via processor. Therefore

+                they are not covered by the Cortex-M0 header file.

+  @{

+ */

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of SC000 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address              */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                 */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address */

+

+#define SCnSCB              ((SCnSCB_Type    *)     SCS_BASE      )   /*!< System control Register not in SCB */

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/* Interrupt Priorities are WORD accessible only under ARMv6M                   */

+/* The following MACROS handle generation of the register offset and byte masks */

+#define _BIT_SHIFT(IRQn)         (  ((((uint32_t)(int32_t)(IRQn))         )      &  0x03UL) * 8UL)

+#define _SHP_IDX(IRQn)           ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >>    2UL)      )

+#define _IP_IDX(IRQn)            (   (((uint32_t)(int32_t)(IRQn))                >>    2UL)      )

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[0] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[0] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)(IRQn) < 0) {

+    SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+  else {

+    NVIC->IP[_IP_IDX(IRQn)]  = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)]  & ~(0xFFUL << _BIT_SHIFT(IRQn))) |

+       (((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)(IRQn) < 0) {

+    return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                     /* Ensure all outstanding memory accesses included

+                                                                  buffered write are completed before reset */

+  SCB->AIRCR  = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                 SCB_AIRCR_SYSRESETREQ_Msk);

+  __DSB();                                                     /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                        /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) {return (1UL);}      /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_SC000_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc300.h b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc300.h
new file mode 100644
index 0000000..d458d71
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Include/core_sc300.h
@@ -0,0 +1,1675 @@
+/**************************************************************************//**

+ * @file     core_sc300.h

+ * @brief    CMSIS SC300 Core Peripheral Access Layer Header File

+ * @version  V4.10

+ * @date     18. March 2015

+ *

+ * @note

+ *

+ ******************************************************************************/

+/* Copyright (c) 2009 - 2015 ARM LIMITED

+

+   All rights reserved.

+   Redistribution and use in source and binary forms, with or without

+   modification, are permitted provided that the following conditions are met:

+   - Redistributions of source code must retain the above copyright

+     notice, this list of conditions and the following disclaimer.

+   - Redistributions in binary form must reproduce the above copyright

+     notice, this list of conditions and the following disclaimer in the

+     documentation and/or other materials provided with the distribution.

+   - Neither the name of ARM nor the names of its contributors may be used

+     to endorse or promote products derived from this software without

+     specific prior written permission.

+   *

+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

+   ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE

+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR

+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF

+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS

+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN

+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)

+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE

+   POSSIBILITY OF SUCH DAMAGE.

+   ---------------------------------------------------------------------------*/

+

+

+#if defined ( __ICCARM__ )

+ #pragma system_include  /* treat file as system include file for MISRA check */

+#endif

+

+#ifndef __CORE_SC300_H_GENERIC

+#define __CORE_SC300_H_GENERIC

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/** \page CMSIS_MISRA_Exceptions  MISRA-C:2004 Compliance Exceptions

+  CMSIS violates the following MISRA-C:2004 rules:

+

+   \li Required Rule 8.5, object/function definition in header file.<br>

+     Function definitions in header files are used to allow 'inlining'.

+

+   \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>

+     Unions are used for effective representation of core registers.

+

+   \li Advisory Rule 19.7, Function-like macro defined.<br>

+     Function-like macros are used to allow more efficient code.

+ */

+

+

+/*******************************************************************************

+ *                 CMSIS definitions

+ ******************************************************************************/

+/** \ingroup SC3000

+  @{

+ */

+

+/*  CMSIS SC300 definitions */

+#define __SC300_CMSIS_VERSION_MAIN  (0x04)                                   /*!< [31:16] CMSIS HAL main version */

+#define __SC300_CMSIS_VERSION_SUB   (0x00)                                   /*!< [15:0]  CMSIS HAL sub version  */

+#define __SC300_CMSIS_VERSION       ((__SC300_CMSIS_VERSION_MAIN << 16) | \

+                                      __SC300_CMSIS_VERSION_SUB          )   /*!< CMSIS HAL version number       */

+

+#define __CORTEX_SC                 (300)                                     /*!< Cortex secure core             */

+

+

+#if   defined ( __CC_ARM )

+  #define __ASM            __asm                                      /*!< asm keyword for ARM Compiler          */

+  #define __INLINE         __inline                                   /*!< inline keyword for ARM Compiler       */

+  #define __STATIC_INLINE  static __inline

+

+#elif defined ( __GNUC__ )

+  #define __ASM            __asm                                      /*!< asm keyword for GNU Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for GNU Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __ICCARM__ )

+  #define __ASM            __asm                                      /*!< asm keyword for IAR Compiler          */

+  #define __INLINE         inline                                     /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TMS470__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TI CCS Compiler       */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __TASKING__ )

+  #define __ASM            __asm                                      /*!< asm keyword for TASKING Compiler      */

+  #define __INLINE         inline                                     /*!< inline keyword for TASKING Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#elif defined ( __CSMC__ )

+  #define __packed

+  #define __ASM            _asm                                      /*!< asm keyword for COSMIC Compiler      */

+  #define __INLINE         inline                                    /*use -pc99 on compile line !< inline keyword for COSMIC Compiler   */

+  #define __STATIC_INLINE  static inline

+

+#endif

+

+/** __FPU_USED indicates whether an FPU is used or not.

+    This core does not support an FPU at all

+*/

+#define __FPU_USED       0

+

+#if defined ( __CC_ARM )

+  #if defined __TARGET_FPU_VFP

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __GNUC__ )

+  #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __ICCARM__ )

+  #if defined __ARMVFP__

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TMS470__ )

+  #if defined __TI__VFP_SUPPORT____

+    #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __TASKING__ )

+  #if defined __FPU_VFP__

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+

+#elif defined ( __CSMC__ )		/* Cosmic */

+  #if ( __CSMC__ & 0x400)		// FPU present for parser

+    #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"

+  #endif

+#endif

+

+#include <stdint.h>                      /* standard types definitions                      */

+#include <core_cmInstr.h>                /* Core Instruction Access                         */

+#include <core_cmFunc.h>                 /* Core Function Access                            */

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_SC300_H_GENERIC */

+

+#ifndef __CMSIS_GENERIC

+

+#ifndef __CORE_SC300_H_DEPENDANT

+#define __CORE_SC300_H_DEPENDANT

+

+#ifdef __cplusplus

+ extern "C" {

+#endif

+

+/* check device defines and use defaults */

+#if defined __CHECK_DEVICE_DEFINES

+  #ifndef __SC300_REV

+    #define __SC300_REV               0x0000

+    #warning "__SC300_REV not defined in device header file; using default!"

+  #endif

+

+  #ifndef __MPU_PRESENT

+    #define __MPU_PRESENT             0

+    #warning "__MPU_PRESENT not defined in device header file; using default!"

+  #endif

+

+  #ifndef __NVIC_PRIO_BITS

+    #define __NVIC_PRIO_BITS          4

+    #warning "__NVIC_PRIO_BITS not defined in device header file; using default!"

+  #endif

+

+  #ifndef __Vendor_SysTickConfig

+    #define __Vendor_SysTickConfig    0

+    #warning "__Vendor_SysTickConfig not defined in device header file; using default!"

+  #endif

+#endif

+

+/* IO definitions (access restrictions to peripheral registers) */

+/**

+    \defgroup CMSIS_glob_defs CMSIS Global Defines

+

+    <strong>IO Type Qualifiers</strong> are used

+    \li to specify the access to peripheral variables.

+    \li for automatic generation of peripheral register debug information.

+*/

+#ifdef __cplusplus

+  #define   __I     volatile             /*!< Defines 'read only' permissions                 */

+#else

+  #define   __I     volatile const       /*!< Defines 'read only' permissions                 */

+#endif

+#define     __O     volatile             /*!< Defines 'write only' permissions                */

+#define     __IO    volatile             /*!< Defines 'read / write' permissions              */

+

+/*@} end of group SC300 */

+

+

+

+/*******************************************************************************

+ *                 Register Abstraction

+  Core Register contain:

+  - Core Register

+  - Core NVIC Register

+  - Core SCB Register

+  - Core SysTick Register

+  - Core Debug Register

+  - Core MPU Register

+ ******************************************************************************/

+/** \defgroup CMSIS_core_register Defines and Type Definitions

+    \brief Type definitions and defines for Cortex-M processor based devices.

+*/

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_CORE  Status and Control Registers

+    \brief  Core Register type definitions.

+  @{

+ */

+

+/** \brief  Union type to access the Application Program Status Register (APSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t _reserved0:27;              /*!< bit:  0..26  Reserved                           */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} APSR_Type;

+

+/* APSR Register Definitions */

+#define APSR_N_Pos                         31                                             /*!< APSR: N Position */

+#define APSR_N_Msk                         (1UL << APSR_N_Pos)                            /*!< APSR: N Mask */

+

+#define APSR_Z_Pos                         30                                             /*!< APSR: Z Position */

+#define APSR_Z_Msk                         (1UL << APSR_Z_Pos)                            /*!< APSR: Z Mask */

+

+#define APSR_C_Pos                         29                                             /*!< APSR: C Position */

+#define APSR_C_Msk                         (1UL << APSR_C_Pos)                            /*!< APSR: C Mask */

+

+#define APSR_V_Pos                         28                                             /*!< APSR: V Position */

+#define APSR_V_Msk                         (1UL << APSR_V_Pos)                            /*!< APSR: V Mask */

+

+#define APSR_Q_Pos                         27                                             /*!< APSR: Q Position */

+#define APSR_Q_Msk                         (1UL << APSR_Q_Pos)                            /*!< APSR: Q Mask */

+

+

+/** \brief  Union type to access the Interrupt Program Status Register (IPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:23;              /*!< bit:  9..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} IPSR_Type;

+

+/* IPSR Register Definitions */

+#define IPSR_ISR_Pos                        0                                             /*!< IPSR: ISR Position */

+#define IPSR_ISR_Msk                       (0x1FFUL /*<< IPSR_ISR_Pos*/)                  /*!< IPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Special-Purpose Program Status Registers (xPSR).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t ISR:9;                      /*!< bit:  0.. 8  Exception number                   */

+    uint32_t _reserved0:15;              /*!< bit:  9..23  Reserved                           */

+    uint32_t T:1;                        /*!< bit:     24  Thumb bit        (read 0)          */

+    uint32_t IT:2;                       /*!< bit: 25..26  saved IT state   (read 0)          */

+    uint32_t Q:1;                        /*!< bit:     27  Saturation condition flag          */

+    uint32_t V:1;                        /*!< bit:     28  Overflow condition code flag       */

+    uint32_t C:1;                        /*!< bit:     29  Carry condition code flag          */

+    uint32_t Z:1;                        /*!< bit:     30  Zero condition code flag           */

+    uint32_t N:1;                        /*!< bit:     31  Negative condition code flag       */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} xPSR_Type;

+

+/* xPSR Register Definitions */

+#define xPSR_N_Pos                         31                                             /*!< xPSR: N Position */

+#define xPSR_N_Msk                         (1UL << xPSR_N_Pos)                            /*!< xPSR: N Mask */

+

+#define xPSR_Z_Pos                         30                                             /*!< xPSR: Z Position */

+#define xPSR_Z_Msk                         (1UL << xPSR_Z_Pos)                            /*!< xPSR: Z Mask */

+

+#define xPSR_C_Pos                         29                                             /*!< xPSR: C Position */

+#define xPSR_C_Msk                         (1UL << xPSR_C_Pos)                            /*!< xPSR: C Mask */

+

+#define xPSR_V_Pos                         28                                             /*!< xPSR: V Position */

+#define xPSR_V_Msk                         (1UL << xPSR_V_Pos)                            /*!< xPSR: V Mask */

+

+#define xPSR_Q_Pos                         27                                             /*!< xPSR: Q Position */

+#define xPSR_Q_Msk                         (1UL << xPSR_Q_Pos)                            /*!< xPSR: Q Mask */

+

+#define xPSR_IT_Pos                        25                                             /*!< xPSR: IT Position */

+#define xPSR_IT_Msk                        (3UL << xPSR_IT_Pos)                           /*!< xPSR: IT Mask */

+

+#define xPSR_T_Pos                         24                                             /*!< xPSR: T Position */

+#define xPSR_T_Msk                         (1UL << xPSR_T_Pos)                            /*!< xPSR: T Mask */

+

+#define xPSR_ISR_Pos                        0                                             /*!< xPSR: ISR Position */

+#define xPSR_ISR_Msk                       (0x1FFUL /*<< xPSR_ISR_Pos*/)                  /*!< xPSR: ISR Mask */

+

+

+/** \brief  Union type to access the Control Registers (CONTROL).

+ */

+typedef union

+{

+  struct

+  {

+    uint32_t nPRIV:1;                    /*!< bit:      0  Execution privilege in Thread mode */

+    uint32_t SPSEL:1;                    /*!< bit:      1  Stack to be used                   */

+    uint32_t _reserved1:30;              /*!< bit:  2..31  Reserved                           */

+  } b;                                   /*!< Structure used for bit  access                  */

+  uint32_t w;                            /*!< Type      used for word access                  */

+} CONTROL_Type;

+

+/* CONTROL Register Definitions */

+#define CONTROL_SPSEL_Pos                   1                                             /*!< CONTROL: SPSEL Position */

+#define CONTROL_SPSEL_Msk                  (1UL << CONTROL_SPSEL_Pos)                     /*!< CONTROL: SPSEL Mask */

+

+#define CONTROL_nPRIV_Pos                   0                                             /*!< CONTROL: nPRIV Position */

+#define CONTROL_nPRIV_Msk                  (1UL /*<< CONTROL_nPRIV_Pos*/)                 /*!< CONTROL: nPRIV Mask */

+

+/*@} end of group CMSIS_CORE */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_NVIC  Nested Vectored Interrupt Controller (NVIC)

+    \brief      Type definitions for the NVIC Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Nested Vectored Interrupt Controller (NVIC).

+ */

+typedef struct

+{

+  __IO uint32_t ISER[8];                 /*!< Offset: 0x000 (R/W)  Interrupt Set Enable Register           */

+       uint32_t RESERVED0[24];

+  __IO uint32_t ICER[8];                 /*!< Offset: 0x080 (R/W)  Interrupt Clear Enable Register         */

+       uint32_t RSERVED1[24];

+  __IO uint32_t ISPR[8];                 /*!< Offset: 0x100 (R/W)  Interrupt Set Pending Register          */

+       uint32_t RESERVED2[24];

+  __IO uint32_t ICPR[8];                 /*!< Offset: 0x180 (R/W)  Interrupt Clear Pending Register        */

+       uint32_t RESERVED3[24];

+  __IO uint32_t IABR[8];                 /*!< Offset: 0x200 (R/W)  Interrupt Active bit Register           */

+       uint32_t RESERVED4[56];

+  __IO uint8_t  IP[240];                 /*!< Offset: 0x300 (R/W)  Interrupt Priority Register (8Bit wide) */

+       uint32_t RESERVED5[644];

+  __O  uint32_t STIR;                    /*!< Offset: 0xE00 ( /W)  Software Trigger Interrupt Register     */

+}  NVIC_Type;

+

+/* Software Triggered Interrupt Register Definitions */

+#define NVIC_STIR_INTID_Pos                 0                                          /*!< STIR: INTLINESNUM Position */

+#define NVIC_STIR_INTID_Msk                (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/)        /*!< STIR: INTLINESNUM Mask */

+

+/*@} end of group CMSIS_NVIC */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCB     System Control Block (SCB)

+    \brief      Type definitions for the System Control Block Registers

+  @{

+ */

+

+/** \brief  Structure type to access the System Control Block (SCB).

+ */

+typedef struct

+{

+  __I  uint32_t CPUID;                   /*!< Offset: 0x000 (R/ )  CPUID Base Register                                   */

+  __IO uint32_t ICSR;                    /*!< Offset: 0x004 (R/W)  Interrupt Control and State Register                  */

+  __IO uint32_t VTOR;                    /*!< Offset: 0x008 (R/W)  Vector Table Offset Register                          */

+  __IO uint32_t AIRCR;                   /*!< Offset: 0x00C (R/W)  Application Interrupt and Reset Control Register      */

+  __IO uint32_t SCR;                     /*!< Offset: 0x010 (R/W)  System Control Register                               */

+  __IO uint32_t CCR;                     /*!< Offset: 0x014 (R/W)  Configuration Control Register                        */

+  __IO uint8_t  SHP[12];                 /*!< Offset: 0x018 (R/W)  System Handlers Priority Registers (4-7, 8-11, 12-15) */

+  __IO uint32_t SHCSR;                   /*!< Offset: 0x024 (R/W)  System Handler Control and State Register             */

+  __IO uint32_t CFSR;                    /*!< Offset: 0x028 (R/W)  Configurable Fault Status Register                    */

+  __IO uint32_t HFSR;                    /*!< Offset: 0x02C (R/W)  HardFault Status Register                             */

+  __IO uint32_t DFSR;                    /*!< Offset: 0x030 (R/W)  Debug Fault Status Register                           */

+  __IO uint32_t MMFAR;                   /*!< Offset: 0x034 (R/W)  MemManage Fault Address Register                      */

+  __IO uint32_t BFAR;                    /*!< Offset: 0x038 (R/W)  BusFault Address Register                             */

+  __IO uint32_t AFSR;                    /*!< Offset: 0x03C (R/W)  Auxiliary Fault Status Register                       */

+  __I  uint32_t PFR[2];                  /*!< Offset: 0x040 (R/ )  Processor Feature Register                            */

+  __I  uint32_t DFR;                     /*!< Offset: 0x048 (R/ )  Debug Feature Register                                */

+  __I  uint32_t ADR;                     /*!< Offset: 0x04C (R/ )  Auxiliary Feature Register                            */

+  __I  uint32_t MMFR[4];                 /*!< Offset: 0x050 (R/ )  Memory Model Feature Register                         */

+  __I  uint32_t ISAR[5];                 /*!< Offset: 0x060 (R/ )  Instruction Set Attributes Register                   */

+       uint32_t RESERVED0[5];

+  __IO uint32_t CPACR;                   /*!< Offset: 0x088 (R/W)  Coprocessor Access Control Register                   */

+       uint32_t RESERVED1[129];

+  __IO uint32_t SFCR;                    /*!< Offset: 0x290 (R/W)  Security Features Control Register                    */

+} SCB_Type;

+

+/* SCB CPUID Register Definitions */

+#define SCB_CPUID_IMPLEMENTER_Pos          24                                             /*!< SCB CPUID: IMPLEMENTER Position */

+#define SCB_CPUID_IMPLEMENTER_Msk          (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos)          /*!< SCB CPUID: IMPLEMENTER Mask */

+

+#define SCB_CPUID_VARIANT_Pos              20                                             /*!< SCB CPUID: VARIANT Position */

+#define SCB_CPUID_VARIANT_Msk              (0xFUL << SCB_CPUID_VARIANT_Pos)               /*!< SCB CPUID: VARIANT Mask */

+

+#define SCB_CPUID_ARCHITECTURE_Pos         16                                             /*!< SCB CPUID: ARCHITECTURE Position */

+#define SCB_CPUID_ARCHITECTURE_Msk         (0xFUL << SCB_CPUID_ARCHITECTURE_Pos)          /*!< SCB CPUID: ARCHITECTURE Mask */

+

+#define SCB_CPUID_PARTNO_Pos                4                                             /*!< SCB CPUID: PARTNO Position */

+#define SCB_CPUID_PARTNO_Msk               (0xFFFUL << SCB_CPUID_PARTNO_Pos)              /*!< SCB CPUID: PARTNO Mask */

+

+#define SCB_CPUID_REVISION_Pos              0                                             /*!< SCB CPUID: REVISION Position */

+#define SCB_CPUID_REVISION_Msk             (0xFUL /*<< SCB_CPUID_REVISION_Pos*/)          /*!< SCB CPUID: REVISION Mask */

+

+/* SCB Interrupt Control State Register Definitions */

+#define SCB_ICSR_NMIPENDSET_Pos            31                                             /*!< SCB ICSR: NMIPENDSET Position */

+#define SCB_ICSR_NMIPENDSET_Msk            (1UL << SCB_ICSR_NMIPENDSET_Pos)               /*!< SCB ICSR: NMIPENDSET Mask */

+

+#define SCB_ICSR_PENDSVSET_Pos             28                                             /*!< SCB ICSR: PENDSVSET Position */

+#define SCB_ICSR_PENDSVSET_Msk             (1UL << SCB_ICSR_PENDSVSET_Pos)                /*!< SCB ICSR: PENDSVSET Mask */

+

+#define SCB_ICSR_PENDSVCLR_Pos             27                                             /*!< SCB ICSR: PENDSVCLR Position */

+#define SCB_ICSR_PENDSVCLR_Msk             (1UL << SCB_ICSR_PENDSVCLR_Pos)                /*!< SCB ICSR: PENDSVCLR Mask */

+

+#define SCB_ICSR_PENDSTSET_Pos             26                                             /*!< SCB ICSR: PENDSTSET Position */

+#define SCB_ICSR_PENDSTSET_Msk             (1UL << SCB_ICSR_PENDSTSET_Pos)                /*!< SCB ICSR: PENDSTSET Mask */

+

+#define SCB_ICSR_PENDSTCLR_Pos             25                                             /*!< SCB ICSR: PENDSTCLR Position */

+#define SCB_ICSR_PENDSTCLR_Msk             (1UL << SCB_ICSR_PENDSTCLR_Pos)                /*!< SCB ICSR: PENDSTCLR Mask */

+

+#define SCB_ICSR_ISRPREEMPT_Pos            23                                             /*!< SCB ICSR: ISRPREEMPT Position */

+#define SCB_ICSR_ISRPREEMPT_Msk            (1UL << SCB_ICSR_ISRPREEMPT_Pos)               /*!< SCB ICSR: ISRPREEMPT Mask */

+

+#define SCB_ICSR_ISRPENDING_Pos            22                                             /*!< SCB ICSR: ISRPENDING Position */

+#define SCB_ICSR_ISRPENDING_Msk            (1UL << SCB_ICSR_ISRPENDING_Pos)               /*!< SCB ICSR: ISRPENDING Mask */

+

+#define SCB_ICSR_VECTPENDING_Pos           12                                             /*!< SCB ICSR: VECTPENDING Position */

+#define SCB_ICSR_VECTPENDING_Msk           (0x1FFUL << SCB_ICSR_VECTPENDING_Pos)          /*!< SCB ICSR: VECTPENDING Mask */

+

+#define SCB_ICSR_RETTOBASE_Pos             11                                             /*!< SCB ICSR: RETTOBASE Position */

+#define SCB_ICSR_RETTOBASE_Msk             (1UL << SCB_ICSR_RETTOBASE_Pos)                /*!< SCB ICSR: RETTOBASE Mask */

+

+#define SCB_ICSR_VECTACTIVE_Pos             0                                             /*!< SCB ICSR: VECTACTIVE Position */

+#define SCB_ICSR_VECTACTIVE_Msk            (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/)       /*!< SCB ICSR: VECTACTIVE Mask */

+

+/* SCB Vector Table Offset Register Definitions */

+#define SCB_VTOR_TBLBASE_Pos               29                                             /*!< SCB VTOR: TBLBASE Position */

+#define SCB_VTOR_TBLBASE_Msk               (1UL << SCB_VTOR_TBLBASE_Pos)                  /*!< SCB VTOR: TBLBASE Mask */

+

+#define SCB_VTOR_TBLOFF_Pos                 7                                             /*!< SCB VTOR: TBLOFF Position */

+#define SCB_VTOR_TBLOFF_Msk                (0x3FFFFFUL << SCB_VTOR_TBLOFF_Pos)            /*!< SCB VTOR: TBLOFF Mask */

+

+/* SCB Application Interrupt and Reset Control Register Definitions */

+#define SCB_AIRCR_VECTKEY_Pos              16                                             /*!< SCB AIRCR: VECTKEY Position */

+#define SCB_AIRCR_VECTKEY_Msk              (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos)            /*!< SCB AIRCR: VECTKEY Mask */

+

+#define SCB_AIRCR_VECTKEYSTAT_Pos          16                                             /*!< SCB AIRCR: VECTKEYSTAT Position */

+#define SCB_AIRCR_VECTKEYSTAT_Msk          (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos)        /*!< SCB AIRCR: VECTKEYSTAT Mask */

+

+#define SCB_AIRCR_ENDIANESS_Pos            15                                             /*!< SCB AIRCR: ENDIANESS Position */

+#define SCB_AIRCR_ENDIANESS_Msk            (1UL << SCB_AIRCR_ENDIANESS_Pos)               /*!< SCB AIRCR: ENDIANESS Mask */

+

+#define SCB_AIRCR_PRIGROUP_Pos              8                                             /*!< SCB AIRCR: PRIGROUP Position */

+#define SCB_AIRCR_PRIGROUP_Msk             (7UL << SCB_AIRCR_PRIGROUP_Pos)                /*!< SCB AIRCR: PRIGROUP Mask */

+

+#define SCB_AIRCR_SYSRESETREQ_Pos           2                                             /*!< SCB AIRCR: SYSRESETREQ Position */

+#define SCB_AIRCR_SYSRESETREQ_Msk          (1UL << SCB_AIRCR_SYSRESETREQ_Pos)             /*!< SCB AIRCR: SYSRESETREQ Mask */

+

+#define SCB_AIRCR_VECTCLRACTIVE_Pos         1                                             /*!< SCB AIRCR: VECTCLRACTIVE Position */

+#define SCB_AIRCR_VECTCLRACTIVE_Msk        (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos)           /*!< SCB AIRCR: VECTCLRACTIVE Mask */

+

+#define SCB_AIRCR_VECTRESET_Pos             0                                             /*!< SCB AIRCR: VECTRESET Position */

+#define SCB_AIRCR_VECTRESET_Msk            (1UL /*<< SCB_AIRCR_VECTRESET_Pos*/)           /*!< SCB AIRCR: VECTRESET Mask */

+

+/* SCB System Control Register Definitions */

+#define SCB_SCR_SEVONPEND_Pos               4                                             /*!< SCB SCR: SEVONPEND Position */

+#define SCB_SCR_SEVONPEND_Msk              (1UL << SCB_SCR_SEVONPEND_Pos)                 /*!< SCB SCR: SEVONPEND Mask */

+

+#define SCB_SCR_SLEEPDEEP_Pos               2                                             /*!< SCB SCR: SLEEPDEEP Position */

+#define SCB_SCR_SLEEPDEEP_Msk              (1UL << SCB_SCR_SLEEPDEEP_Pos)                 /*!< SCB SCR: SLEEPDEEP Mask */

+

+#define SCB_SCR_SLEEPONEXIT_Pos             1                                             /*!< SCB SCR: SLEEPONEXIT Position */

+#define SCB_SCR_SLEEPONEXIT_Msk            (1UL << SCB_SCR_SLEEPONEXIT_Pos)               /*!< SCB SCR: SLEEPONEXIT Mask */

+

+/* SCB Configuration Control Register Definitions */

+#define SCB_CCR_STKALIGN_Pos                9                                             /*!< SCB CCR: STKALIGN Position */

+#define SCB_CCR_STKALIGN_Msk               (1UL << SCB_CCR_STKALIGN_Pos)                  /*!< SCB CCR: STKALIGN Mask */

+

+#define SCB_CCR_BFHFNMIGN_Pos               8                                             /*!< SCB CCR: BFHFNMIGN Position */

+#define SCB_CCR_BFHFNMIGN_Msk              (1UL << SCB_CCR_BFHFNMIGN_Pos)                 /*!< SCB CCR: BFHFNMIGN Mask */

+

+#define SCB_CCR_DIV_0_TRP_Pos               4                                             /*!< SCB CCR: DIV_0_TRP Position */

+#define SCB_CCR_DIV_0_TRP_Msk              (1UL << SCB_CCR_DIV_0_TRP_Pos)                 /*!< SCB CCR: DIV_0_TRP Mask */

+

+#define SCB_CCR_UNALIGN_TRP_Pos             3                                             /*!< SCB CCR: UNALIGN_TRP Position */

+#define SCB_CCR_UNALIGN_TRP_Msk            (1UL << SCB_CCR_UNALIGN_TRP_Pos)               /*!< SCB CCR: UNALIGN_TRP Mask */

+

+#define SCB_CCR_USERSETMPEND_Pos            1                                             /*!< SCB CCR: USERSETMPEND Position */

+#define SCB_CCR_USERSETMPEND_Msk           (1UL << SCB_CCR_USERSETMPEND_Pos)              /*!< SCB CCR: USERSETMPEND Mask */

+

+#define SCB_CCR_NONBASETHRDENA_Pos          0                                             /*!< SCB CCR: NONBASETHRDENA Position */

+#define SCB_CCR_NONBASETHRDENA_Msk         (1UL /*<< SCB_CCR_NONBASETHRDENA_Pos*/)        /*!< SCB CCR: NONBASETHRDENA Mask */

+

+/* SCB System Handler Control and State Register Definitions */

+#define SCB_SHCSR_USGFAULTENA_Pos          18                                             /*!< SCB SHCSR: USGFAULTENA Position */

+#define SCB_SHCSR_USGFAULTENA_Msk          (1UL << SCB_SHCSR_USGFAULTENA_Pos)             /*!< SCB SHCSR: USGFAULTENA Mask */

+

+#define SCB_SHCSR_BUSFAULTENA_Pos          17                                             /*!< SCB SHCSR: BUSFAULTENA Position */

+#define SCB_SHCSR_BUSFAULTENA_Msk          (1UL << SCB_SHCSR_BUSFAULTENA_Pos)             /*!< SCB SHCSR: BUSFAULTENA Mask */

+

+#define SCB_SHCSR_MEMFAULTENA_Pos          16                                             /*!< SCB SHCSR: MEMFAULTENA Position */

+#define SCB_SHCSR_MEMFAULTENA_Msk          (1UL << SCB_SHCSR_MEMFAULTENA_Pos)             /*!< SCB SHCSR: MEMFAULTENA Mask */

+

+#define SCB_SHCSR_SVCALLPENDED_Pos         15                                             /*!< SCB SHCSR: SVCALLPENDED Position */

+#define SCB_SHCSR_SVCALLPENDED_Msk         (1UL << SCB_SHCSR_SVCALLPENDED_Pos)            /*!< SCB SHCSR: SVCALLPENDED Mask */

+

+#define SCB_SHCSR_BUSFAULTPENDED_Pos       14                                             /*!< SCB SHCSR: BUSFAULTPENDED Position */

+#define SCB_SHCSR_BUSFAULTPENDED_Msk       (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos)          /*!< SCB SHCSR: BUSFAULTPENDED Mask */

+

+#define SCB_SHCSR_MEMFAULTPENDED_Pos       13                                             /*!< SCB SHCSR: MEMFAULTPENDED Position */

+#define SCB_SHCSR_MEMFAULTPENDED_Msk       (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos)          /*!< SCB SHCSR: MEMFAULTPENDED Mask */

+

+#define SCB_SHCSR_USGFAULTPENDED_Pos       12                                             /*!< SCB SHCSR: USGFAULTPENDED Position */

+#define SCB_SHCSR_USGFAULTPENDED_Msk       (1UL << SCB_SHCSR_USGFAULTPENDED_Pos)          /*!< SCB SHCSR: USGFAULTPENDED Mask */

+

+#define SCB_SHCSR_SYSTICKACT_Pos           11                                             /*!< SCB SHCSR: SYSTICKACT Position */

+#define SCB_SHCSR_SYSTICKACT_Msk           (1UL << SCB_SHCSR_SYSTICKACT_Pos)              /*!< SCB SHCSR: SYSTICKACT Mask */

+

+#define SCB_SHCSR_PENDSVACT_Pos            10                                             /*!< SCB SHCSR: PENDSVACT Position */

+#define SCB_SHCSR_PENDSVACT_Msk            (1UL << SCB_SHCSR_PENDSVACT_Pos)               /*!< SCB SHCSR: PENDSVACT Mask */

+

+#define SCB_SHCSR_MONITORACT_Pos            8                                             /*!< SCB SHCSR: MONITORACT Position */

+#define SCB_SHCSR_MONITORACT_Msk           (1UL << SCB_SHCSR_MONITORACT_Pos)              /*!< SCB SHCSR: MONITORACT Mask */

+

+#define SCB_SHCSR_SVCALLACT_Pos             7                                             /*!< SCB SHCSR: SVCALLACT Position */

+#define SCB_SHCSR_SVCALLACT_Msk            (1UL << SCB_SHCSR_SVCALLACT_Pos)               /*!< SCB SHCSR: SVCALLACT Mask */

+

+#define SCB_SHCSR_USGFAULTACT_Pos           3                                             /*!< SCB SHCSR: USGFAULTACT Position */

+#define SCB_SHCSR_USGFAULTACT_Msk          (1UL << SCB_SHCSR_USGFAULTACT_Pos)             /*!< SCB SHCSR: USGFAULTACT Mask */

+

+#define SCB_SHCSR_BUSFAULTACT_Pos           1                                             /*!< SCB SHCSR: BUSFAULTACT Position */

+#define SCB_SHCSR_BUSFAULTACT_Msk          (1UL << SCB_SHCSR_BUSFAULTACT_Pos)             /*!< SCB SHCSR: BUSFAULTACT Mask */

+

+#define SCB_SHCSR_MEMFAULTACT_Pos           0                                             /*!< SCB SHCSR: MEMFAULTACT Position */

+#define SCB_SHCSR_MEMFAULTACT_Msk          (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/)         /*!< SCB SHCSR: MEMFAULTACT Mask */

+

+/* SCB Configurable Fault Status Registers Definitions */

+#define SCB_CFSR_USGFAULTSR_Pos            16                                             /*!< SCB CFSR: Usage Fault Status Register Position */

+#define SCB_CFSR_USGFAULTSR_Msk            (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos)          /*!< SCB CFSR: Usage Fault Status Register Mask */

+

+#define SCB_CFSR_BUSFAULTSR_Pos             8                                             /*!< SCB CFSR: Bus Fault Status Register Position */

+#define SCB_CFSR_BUSFAULTSR_Msk            (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos)            /*!< SCB CFSR: Bus Fault Status Register Mask */

+

+#define SCB_CFSR_MEMFAULTSR_Pos             0                                             /*!< SCB CFSR: Memory Manage Fault Status Register Position */

+#define SCB_CFSR_MEMFAULTSR_Msk            (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/)        /*!< SCB CFSR: Memory Manage Fault Status Register Mask */

+

+/* SCB Hard Fault Status Registers Definitions */

+#define SCB_HFSR_DEBUGEVT_Pos              31                                             /*!< SCB HFSR: DEBUGEVT Position */

+#define SCB_HFSR_DEBUGEVT_Msk              (1UL << SCB_HFSR_DEBUGEVT_Pos)                 /*!< SCB HFSR: DEBUGEVT Mask */

+

+#define SCB_HFSR_FORCED_Pos                30                                             /*!< SCB HFSR: FORCED Position */

+#define SCB_HFSR_FORCED_Msk                (1UL << SCB_HFSR_FORCED_Pos)                   /*!< SCB HFSR: FORCED Mask */

+

+#define SCB_HFSR_VECTTBL_Pos                1                                             /*!< SCB HFSR: VECTTBL Position */

+#define SCB_HFSR_VECTTBL_Msk               (1UL << SCB_HFSR_VECTTBL_Pos)                  /*!< SCB HFSR: VECTTBL Mask */

+

+/* SCB Debug Fault Status Register Definitions */

+#define SCB_DFSR_EXTERNAL_Pos               4                                             /*!< SCB DFSR: EXTERNAL Position */

+#define SCB_DFSR_EXTERNAL_Msk              (1UL << SCB_DFSR_EXTERNAL_Pos)                 /*!< SCB DFSR: EXTERNAL Mask */

+

+#define SCB_DFSR_VCATCH_Pos                 3                                             /*!< SCB DFSR: VCATCH Position */

+#define SCB_DFSR_VCATCH_Msk                (1UL << SCB_DFSR_VCATCH_Pos)                   /*!< SCB DFSR: VCATCH Mask */

+

+#define SCB_DFSR_DWTTRAP_Pos                2                                             /*!< SCB DFSR: DWTTRAP Position */

+#define SCB_DFSR_DWTTRAP_Msk               (1UL << SCB_DFSR_DWTTRAP_Pos)                  /*!< SCB DFSR: DWTTRAP Mask */

+

+#define SCB_DFSR_BKPT_Pos                   1                                             /*!< SCB DFSR: BKPT Position */

+#define SCB_DFSR_BKPT_Msk                  (1UL << SCB_DFSR_BKPT_Pos)                     /*!< SCB DFSR: BKPT Mask */

+

+#define SCB_DFSR_HALTED_Pos                 0                                             /*!< SCB DFSR: HALTED Position */

+#define SCB_DFSR_HALTED_Msk                (1UL /*<< SCB_DFSR_HALTED_Pos*/)               /*!< SCB DFSR: HALTED Mask */

+

+/*@} end of group CMSIS_SCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)

+    \brief      Type definitions for the System Control and ID Register not in the SCB

+  @{

+ */

+

+/** \brief  Structure type to access the System Control and ID Register not in the SCB.

+ */

+typedef struct

+{

+       uint32_t RESERVED0[1];

+  __I  uint32_t ICTR;                    /*!< Offset: 0x004 (R/ )  Interrupt Controller Type Register      */

+       uint32_t RESERVED1[1];

+} SCnSCB_Type;

+

+/* Interrupt Controller Type Register Definitions */

+#define SCnSCB_ICTR_INTLINESNUM_Pos         0                                          /*!< ICTR: INTLINESNUM Position */

+#define SCnSCB_ICTR_INTLINESNUM_Msk        (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/)  /*!< ICTR: INTLINESNUM Mask */

+

+/*@} end of group CMSIS_SCnotSCB */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_SysTick     System Tick Timer (SysTick)

+    \brief      Type definitions for the System Timer Registers.

+  @{

+ */

+

+/** \brief  Structure type to access the System Timer (SysTick).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  SysTick Control and Status Register */

+  __IO uint32_t LOAD;                    /*!< Offset: 0x004 (R/W)  SysTick Reload Value Register       */

+  __IO uint32_t VAL;                     /*!< Offset: 0x008 (R/W)  SysTick Current Value Register      */

+  __I  uint32_t CALIB;                   /*!< Offset: 0x00C (R/ )  SysTick Calibration Register        */

+} SysTick_Type;

+

+/* SysTick Control / Status Register Definitions */

+#define SysTick_CTRL_COUNTFLAG_Pos         16                                             /*!< SysTick CTRL: COUNTFLAG Position */

+#define SysTick_CTRL_COUNTFLAG_Msk         (1UL << SysTick_CTRL_COUNTFLAG_Pos)            /*!< SysTick CTRL: COUNTFLAG Mask */

+

+#define SysTick_CTRL_CLKSOURCE_Pos          2                                             /*!< SysTick CTRL: CLKSOURCE Position */

+#define SysTick_CTRL_CLKSOURCE_Msk         (1UL << SysTick_CTRL_CLKSOURCE_Pos)            /*!< SysTick CTRL: CLKSOURCE Mask */

+

+#define SysTick_CTRL_TICKINT_Pos            1                                             /*!< SysTick CTRL: TICKINT Position */

+#define SysTick_CTRL_TICKINT_Msk           (1UL << SysTick_CTRL_TICKINT_Pos)              /*!< SysTick CTRL: TICKINT Mask */

+

+#define SysTick_CTRL_ENABLE_Pos             0                                             /*!< SysTick CTRL: ENABLE Position */

+#define SysTick_CTRL_ENABLE_Msk            (1UL /*<< SysTick_CTRL_ENABLE_Pos*/)           /*!< SysTick CTRL: ENABLE Mask */

+

+/* SysTick Reload Register Definitions */

+#define SysTick_LOAD_RELOAD_Pos             0                                             /*!< SysTick LOAD: RELOAD Position */

+#define SysTick_LOAD_RELOAD_Msk            (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/)    /*!< SysTick LOAD: RELOAD Mask */

+

+/* SysTick Current Register Definitions */

+#define SysTick_VAL_CURRENT_Pos             0                                             /*!< SysTick VAL: CURRENT Position */

+#define SysTick_VAL_CURRENT_Msk            (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/)    /*!< SysTick VAL: CURRENT Mask */

+

+/* SysTick Calibration Register Definitions */

+#define SysTick_CALIB_NOREF_Pos            31                                             /*!< SysTick CALIB: NOREF Position */

+#define SysTick_CALIB_NOREF_Msk            (1UL << SysTick_CALIB_NOREF_Pos)               /*!< SysTick CALIB: NOREF Mask */

+

+#define SysTick_CALIB_SKEW_Pos             30                                             /*!< SysTick CALIB: SKEW Position */

+#define SysTick_CALIB_SKEW_Msk             (1UL << SysTick_CALIB_SKEW_Pos)                /*!< SysTick CALIB: SKEW Mask */

+

+#define SysTick_CALIB_TENMS_Pos             0                                             /*!< SysTick CALIB: TENMS Position */

+#define SysTick_CALIB_TENMS_Msk            (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/)    /*!< SysTick CALIB: TENMS Mask */

+

+/*@} end of group CMSIS_SysTick */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_ITM     Instrumentation Trace Macrocell (ITM)

+    \brief      Type definitions for the Instrumentation Trace Macrocell (ITM)

+  @{

+ */

+

+/** \brief  Structure type to access the Instrumentation Trace Macrocell Register (ITM).

+ */

+typedef struct

+{

+  __O  union

+  {

+    __O  uint8_t    u8;                  /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 8-bit                   */

+    __O  uint16_t   u16;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 16-bit                  */

+    __O  uint32_t   u32;                 /*!< Offset: 0x000 ( /W)  ITM Stimulus Port 32-bit                  */

+  }  PORT [32];                          /*!< Offset: 0x000 ( /W)  ITM Stimulus Port Registers               */

+       uint32_t RESERVED0[864];

+  __IO uint32_t TER;                     /*!< Offset: 0xE00 (R/W)  ITM Trace Enable Register                 */

+       uint32_t RESERVED1[15];

+  __IO uint32_t TPR;                     /*!< Offset: 0xE40 (R/W)  ITM Trace Privilege Register              */

+       uint32_t RESERVED2[15];

+  __IO uint32_t TCR;                     /*!< Offset: 0xE80 (R/W)  ITM Trace Control Register                */

+       uint32_t RESERVED3[29];

+  __O  uint32_t IWR;                     /*!< Offset: 0xEF8 ( /W)  ITM Integration Write Register            */

+  __I  uint32_t IRR;                     /*!< Offset: 0xEFC (R/ )  ITM Integration Read Register             */

+  __IO uint32_t IMCR;                    /*!< Offset: 0xF00 (R/W)  ITM Integration Mode Control Register     */

+       uint32_t RESERVED4[43];

+  __O  uint32_t LAR;                     /*!< Offset: 0xFB0 ( /W)  ITM Lock Access Register                  */

+  __I  uint32_t LSR;                     /*!< Offset: 0xFB4 (R/ )  ITM Lock Status Register                  */

+       uint32_t RESERVED5[6];

+  __I  uint32_t PID4;                    /*!< Offset: 0xFD0 (R/ )  ITM Peripheral Identification Register #4 */

+  __I  uint32_t PID5;                    /*!< Offset: 0xFD4 (R/ )  ITM Peripheral Identification Register #5 */

+  __I  uint32_t PID6;                    /*!< Offset: 0xFD8 (R/ )  ITM Peripheral Identification Register #6 */

+  __I  uint32_t PID7;                    /*!< Offset: 0xFDC (R/ )  ITM Peripheral Identification Register #7 */

+  __I  uint32_t PID0;                    /*!< Offset: 0xFE0 (R/ )  ITM Peripheral Identification Register #0 */

+  __I  uint32_t PID1;                    /*!< Offset: 0xFE4 (R/ )  ITM Peripheral Identification Register #1 */

+  __I  uint32_t PID2;                    /*!< Offset: 0xFE8 (R/ )  ITM Peripheral Identification Register #2 */

+  __I  uint32_t PID3;                    /*!< Offset: 0xFEC (R/ )  ITM Peripheral Identification Register #3 */

+  __I  uint32_t CID0;                    /*!< Offset: 0xFF0 (R/ )  ITM Component  Identification Register #0 */

+  __I  uint32_t CID1;                    /*!< Offset: 0xFF4 (R/ )  ITM Component  Identification Register #1 */

+  __I  uint32_t CID2;                    /*!< Offset: 0xFF8 (R/ )  ITM Component  Identification Register #2 */

+  __I  uint32_t CID3;                    /*!< Offset: 0xFFC (R/ )  ITM Component  Identification Register #3 */

+} ITM_Type;

+

+/* ITM Trace Privilege Register Definitions */

+#define ITM_TPR_PRIVMASK_Pos                0                                             /*!< ITM TPR: PRIVMASK Position */

+#define ITM_TPR_PRIVMASK_Msk               (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/)            /*!< ITM TPR: PRIVMASK Mask */

+

+/* ITM Trace Control Register Definitions */

+#define ITM_TCR_BUSY_Pos                   23                                             /*!< ITM TCR: BUSY Position */

+#define ITM_TCR_BUSY_Msk                   (1UL << ITM_TCR_BUSY_Pos)                      /*!< ITM TCR: BUSY Mask */

+

+#define ITM_TCR_TraceBusID_Pos             16                                             /*!< ITM TCR: ATBID Position */

+#define ITM_TCR_TraceBusID_Msk             (0x7FUL << ITM_TCR_TraceBusID_Pos)             /*!< ITM TCR: ATBID Mask */

+

+#define ITM_TCR_GTSFREQ_Pos                10                                             /*!< ITM TCR: Global timestamp frequency Position */

+#define ITM_TCR_GTSFREQ_Msk                (3UL << ITM_TCR_GTSFREQ_Pos)                   /*!< ITM TCR: Global timestamp frequency Mask */

+

+#define ITM_TCR_TSPrescale_Pos              8                                             /*!< ITM TCR: TSPrescale Position */

+#define ITM_TCR_TSPrescale_Msk             (3UL << ITM_TCR_TSPrescale_Pos)                /*!< ITM TCR: TSPrescale Mask */

+

+#define ITM_TCR_SWOENA_Pos                  4                                             /*!< ITM TCR: SWOENA Position */

+#define ITM_TCR_SWOENA_Msk                 (1UL << ITM_TCR_SWOENA_Pos)                    /*!< ITM TCR: SWOENA Mask */

+

+#define ITM_TCR_DWTENA_Pos                  3                                             /*!< ITM TCR: DWTENA Position */

+#define ITM_TCR_DWTENA_Msk                 (1UL << ITM_TCR_DWTENA_Pos)                    /*!< ITM TCR: DWTENA Mask */

+

+#define ITM_TCR_SYNCENA_Pos                 2                                             /*!< ITM TCR: SYNCENA Position */

+#define ITM_TCR_SYNCENA_Msk                (1UL << ITM_TCR_SYNCENA_Pos)                   /*!< ITM TCR: SYNCENA Mask */

+

+#define ITM_TCR_TSENA_Pos                   1                                             /*!< ITM TCR: TSENA Position */

+#define ITM_TCR_TSENA_Msk                  (1UL << ITM_TCR_TSENA_Pos)                     /*!< ITM TCR: TSENA Mask */

+

+#define ITM_TCR_ITMENA_Pos                  0                                             /*!< ITM TCR: ITM Enable bit Position */

+#define ITM_TCR_ITMENA_Msk                 (1UL /*<< ITM_TCR_ITMENA_Pos*/)                /*!< ITM TCR: ITM Enable bit Mask */

+

+/* ITM Integration Write Register Definitions */

+#define ITM_IWR_ATVALIDM_Pos                0                                             /*!< ITM IWR: ATVALIDM Position */

+#define ITM_IWR_ATVALIDM_Msk               (1UL /*<< ITM_IWR_ATVALIDM_Pos*/)              /*!< ITM IWR: ATVALIDM Mask */

+

+/* ITM Integration Read Register Definitions */

+#define ITM_IRR_ATREADYM_Pos                0                                             /*!< ITM IRR: ATREADYM Position */

+#define ITM_IRR_ATREADYM_Msk               (1UL /*<< ITM_IRR_ATREADYM_Pos*/)              /*!< ITM IRR: ATREADYM Mask */

+

+/* ITM Integration Mode Control Register Definitions */

+#define ITM_IMCR_INTEGRATION_Pos            0                                             /*!< ITM IMCR: INTEGRATION Position */

+#define ITM_IMCR_INTEGRATION_Msk           (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/)          /*!< ITM IMCR: INTEGRATION Mask */

+

+/* ITM Lock Status Register Definitions */

+#define ITM_LSR_ByteAcc_Pos                 2                                             /*!< ITM LSR: ByteAcc Position */

+#define ITM_LSR_ByteAcc_Msk                (1UL << ITM_LSR_ByteAcc_Pos)                   /*!< ITM LSR: ByteAcc Mask */

+

+#define ITM_LSR_Access_Pos                  1                                             /*!< ITM LSR: Access Position */

+#define ITM_LSR_Access_Msk                 (1UL << ITM_LSR_Access_Pos)                    /*!< ITM LSR: Access Mask */

+

+#define ITM_LSR_Present_Pos                 0                                             /*!< ITM LSR: Present Position */

+#define ITM_LSR_Present_Msk                (1UL /*<< ITM_LSR_Present_Pos*/)               /*!< ITM LSR: Present Mask */

+

+/*@}*/ /* end of group CMSIS_ITM */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_DWT     Data Watchpoint and Trace (DWT)

+    \brief      Type definitions for the Data Watchpoint and Trace (DWT)

+  @{

+ */

+

+/** \brief  Structure type to access the Data Watchpoint and Trace Register (DWT).

+ */

+typedef struct

+{

+  __IO uint32_t CTRL;                    /*!< Offset: 0x000 (R/W)  Control Register                          */

+  __IO uint32_t CYCCNT;                  /*!< Offset: 0x004 (R/W)  Cycle Count Register                      */

+  __IO uint32_t CPICNT;                  /*!< Offset: 0x008 (R/W)  CPI Count Register                        */

+  __IO uint32_t EXCCNT;                  /*!< Offset: 0x00C (R/W)  Exception Overhead Count Register         */

+  __IO uint32_t SLEEPCNT;                /*!< Offset: 0x010 (R/W)  Sleep Count Register                      */

+  __IO uint32_t LSUCNT;                  /*!< Offset: 0x014 (R/W)  LSU Count Register                        */

+  __IO uint32_t FOLDCNT;                 /*!< Offset: 0x018 (R/W)  Folded-instruction Count Register         */

+  __I  uint32_t PCSR;                    /*!< Offset: 0x01C (R/ )  Program Counter Sample Register           */

+  __IO uint32_t COMP0;                   /*!< Offset: 0x020 (R/W)  Comparator Register 0                     */

+  __IO uint32_t MASK0;                   /*!< Offset: 0x024 (R/W)  Mask Register 0                           */

+  __IO uint32_t FUNCTION0;               /*!< Offset: 0x028 (R/W)  Function Register 0                       */

+       uint32_t RESERVED0[1];

+  __IO uint32_t COMP1;                   /*!< Offset: 0x030 (R/W)  Comparator Register 1                     */

+  __IO uint32_t MASK1;                   /*!< Offset: 0x034 (R/W)  Mask Register 1                           */

+  __IO uint32_t FUNCTION1;               /*!< Offset: 0x038 (R/W)  Function Register 1                       */

+       uint32_t RESERVED1[1];

+  __IO uint32_t COMP2;                   /*!< Offset: 0x040 (R/W)  Comparator Register 2                     */

+  __IO uint32_t MASK2;                   /*!< Offset: 0x044 (R/W)  Mask Register 2                           */

+  __IO uint32_t FUNCTION2;               /*!< Offset: 0x048 (R/W)  Function Register 2                       */

+       uint32_t RESERVED2[1];

+  __IO uint32_t COMP3;                   /*!< Offset: 0x050 (R/W)  Comparator Register 3                     */

+  __IO uint32_t MASK3;                   /*!< Offset: 0x054 (R/W)  Mask Register 3                           */

+  __IO uint32_t FUNCTION3;               /*!< Offset: 0x058 (R/W)  Function Register 3                       */

+} DWT_Type;

+

+/* DWT Control Register Definitions */

+#define DWT_CTRL_NUMCOMP_Pos               28                                          /*!< DWT CTRL: NUMCOMP Position */

+#define DWT_CTRL_NUMCOMP_Msk               (0xFUL << DWT_CTRL_NUMCOMP_Pos)             /*!< DWT CTRL: NUMCOMP Mask */

+

+#define DWT_CTRL_NOTRCPKT_Pos              27                                          /*!< DWT CTRL: NOTRCPKT Position */

+#define DWT_CTRL_NOTRCPKT_Msk              (0x1UL << DWT_CTRL_NOTRCPKT_Pos)            /*!< DWT CTRL: NOTRCPKT Mask */

+

+#define DWT_CTRL_NOEXTTRIG_Pos             26                                          /*!< DWT CTRL: NOEXTTRIG Position */

+#define DWT_CTRL_NOEXTTRIG_Msk             (0x1UL << DWT_CTRL_NOEXTTRIG_Pos)           /*!< DWT CTRL: NOEXTTRIG Mask */

+

+#define DWT_CTRL_NOCYCCNT_Pos              25                                          /*!< DWT CTRL: NOCYCCNT Position */

+#define DWT_CTRL_NOCYCCNT_Msk              (0x1UL << DWT_CTRL_NOCYCCNT_Pos)            /*!< DWT CTRL: NOCYCCNT Mask */

+

+#define DWT_CTRL_NOPRFCNT_Pos              24                                          /*!< DWT CTRL: NOPRFCNT Position */

+#define DWT_CTRL_NOPRFCNT_Msk              (0x1UL << DWT_CTRL_NOPRFCNT_Pos)            /*!< DWT CTRL: NOPRFCNT Mask */

+

+#define DWT_CTRL_CYCEVTENA_Pos             22                                          /*!< DWT CTRL: CYCEVTENA Position */

+#define DWT_CTRL_CYCEVTENA_Msk             (0x1UL << DWT_CTRL_CYCEVTENA_Pos)           /*!< DWT CTRL: CYCEVTENA Mask */

+

+#define DWT_CTRL_FOLDEVTENA_Pos            21                                          /*!< DWT CTRL: FOLDEVTENA Position */

+#define DWT_CTRL_FOLDEVTENA_Msk            (0x1UL << DWT_CTRL_FOLDEVTENA_Pos)          /*!< DWT CTRL: FOLDEVTENA Mask */

+

+#define DWT_CTRL_LSUEVTENA_Pos             20                                          /*!< DWT CTRL: LSUEVTENA Position */

+#define DWT_CTRL_LSUEVTENA_Msk             (0x1UL << DWT_CTRL_LSUEVTENA_Pos)           /*!< DWT CTRL: LSUEVTENA Mask */

+

+#define DWT_CTRL_SLEEPEVTENA_Pos           19                                          /*!< DWT CTRL: SLEEPEVTENA Position */

+#define DWT_CTRL_SLEEPEVTENA_Msk           (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos)         /*!< DWT CTRL: SLEEPEVTENA Mask */

+

+#define DWT_CTRL_EXCEVTENA_Pos             18                                          /*!< DWT CTRL: EXCEVTENA Position */

+#define DWT_CTRL_EXCEVTENA_Msk             (0x1UL << DWT_CTRL_EXCEVTENA_Pos)           /*!< DWT CTRL: EXCEVTENA Mask */

+

+#define DWT_CTRL_CPIEVTENA_Pos             17                                          /*!< DWT CTRL: CPIEVTENA Position */

+#define DWT_CTRL_CPIEVTENA_Msk             (0x1UL << DWT_CTRL_CPIEVTENA_Pos)           /*!< DWT CTRL: CPIEVTENA Mask */

+

+#define DWT_CTRL_EXCTRCENA_Pos             16                                          /*!< DWT CTRL: EXCTRCENA Position */

+#define DWT_CTRL_EXCTRCENA_Msk             (0x1UL << DWT_CTRL_EXCTRCENA_Pos)           /*!< DWT CTRL: EXCTRCENA Mask */

+

+#define DWT_CTRL_PCSAMPLENA_Pos            12                                          /*!< DWT CTRL: PCSAMPLENA Position */

+#define DWT_CTRL_PCSAMPLENA_Msk            (0x1UL << DWT_CTRL_PCSAMPLENA_Pos)          /*!< DWT CTRL: PCSAMPLENA Mask */

+

+#define DWT_CTRL_SYNCTAP_Pos               10                                          /*!< DWT CTRL: SYNCTAP Position */

+#define DWT_CTRL_SYNCTAP_Msk               (0x3UL << DWT_CTRL_SYNCTAP_Pos)             /*!< DWT CTRL: SYNCTAP Mask */

+

+#define DWT_CTRL_CYCTAP_Pos                 9                                          /*!< DWT CTRL: CYCTAP Position */

+#define DWT_CTRL_CYCTAP_Msk                (0x1UL << DWT_CTRL_CYCTAP_Pos)              /*!< DWT CTRL: CYCTAP Mask */

+

+#define DWT_CTRL_POSTINIT_Pos               5                                          /*!< DWT CTRL: POSTINIT Position */

+#define DWT_CTRL_POSTINIT_Msk              (0xFUL << DWT_CTRL_POSTINIT_Pos)            /*!< DWT CTRL: POSTINIT Mask */

+

+#define DWT_CTRL_POSTPRESET_Pos             1                                          /*!< DWT CTRL: POSTPRESET Position */

+#define DWT_CTRL_POSTPRESET_Msk            (0xFUL << DWT_CTRL_POSTPRESET_Pos)          /*!< DWT CTRL: POSTPRESET Mask */

+

+#define DWT_CTRL_CYCCNTENA_Pos              0                                          /*!< DWT CTRL: CYCCNTENA Position */

+#define DWT_CTRL_CYCCNTENA_Msk             (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/)       /*!< DWT CTRL: CYCCNTENA Mask */

+

+/* DWT CPI Count Register Definitions */

+#define DWT_CPICNT_CPICNT_Pos               0                                          /*!< DWT CPICNT: CPICNT Position */

+#define DWT_CPICNT_CPICNT_Msk              (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/)       /*!< DWT CPICNT: CPICNT Mask */

+

+/* DWT Exception Overhead Count Register Definitions */

+#define DWT_EXCCNT_EXCCNT_Pos               0                                          /*!< DWT EXCCNT: EXCCNT Position */

+#define DWT_EXCCNT_EXCCNT_Msk              (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/)       /*!< DWT EXCCNT: EXCCNT Mask */

+

+/* DWT Sleep Count Register Definitions */

+#define DWT_SLEEPCNT_SLEEPCNT_Pos           0                                          /*!< DWT SLEEPCNT: SLEEPCNT Position */

+#define DWT_SLEEPCNT_SLEEPCNT_Msk          (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/)   /*!< DWT SLEEPCNT: SLEEPCNT Mask */

+

+/* DWT LSU Count Register Definitions */

+#define DWT_LSUCNT_LSUCNT_Pos               0                                          /*!< DWT LSUCNT: LSUCNT Position */

+#define DWT_LSUCNT_LSUCNT_Msk              (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/)       /*!< DWT LSUCNT: LSUCNT Mask */

+

+/* DWT Folded-instruction Count Register Definitions */

+#define DWT_FOLDCNT_FOLDCNT_Pos             0                                          /*!< DWT FOLDCNT: FOLDCNT Position */

+#define DWT_FOLDCNT_FOLDCNT_Msk            (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/)     /*!< DWT FOLDCNT: FOLDCNT Mask */

+

+/* DWT Comparator Mask Register Definitions */

+#define DWT_MASK_MASK_Pos                   0                                          /*!< DWT MASK: MASK Position */

+#define DWT_MASK_MASK_Msk                  (0x1FUL /*<< DWT_MASK_MASK_Pos*/)           /*!< DWT MASK: MASK Mask */

+

+/* DWT Comparator Function Register Definitions */

+#define DWT_FUNCTION_MATCHED_Pos           24                                          /*!< DWT FUNCTION: MATCHED Position */

+#define DWT_FUNCTION_MATCHED_Msk           (0x1UL << DWT_FUNCTION_MATCHED_Pos)         /*!< DWT FUNCTION: MATCHED Mask */

+

+#define DWT_FUNCTION_DATAVADDR1_Pos        16                                          /*!< DWT FUNCTION: DATAVADDR1 Position */

+#define DWT_FUNCTION_DATAVADDR1_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR1_Pos)      /*!< DWT FUNCTION: DATAVADDR1 Mask */

+

+#define DWT_FUNCTION_DATAVADDR0_Pos        12                                          /*!< DWT FUNCTION: DATAVADDR0 Position */

+#define DWT_FUNCTION_DATAVADDR0_Msk        (0xFUL << DWT_FUNCTION_DATAVADDR0_Pos)      /*!< DWT FUNCTION: DATAVADDR0 Mask */

+

+#define DWT_FUNCTION_DATAVSIZE_Pos         10                                          /*!< DWT FUNCTION: DATAVSIZE Position */

+#define DWT_FUNCTION_DATAVSIZE_Msk         (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos)       /*!< DWT FUNCTION: DATAVSIZE Mask */

+

+#define DWT_FUNCTION_LNK1ENA_Pos            9                                          /*!< DWT FUNCTION: LNK1ENA Position */

+#define DWT_FUNCTION_LNK1ENA_Msk           (0x1UL << DWT_FUNCTION_LNK1ENA_Pos)         /*!< DWT FUNCTION: LNK1ENA Mask */

+

+#define DWT_FUNCTION_DATAVMATCH_Pos         8                                          /*!< DWT FUNCTION: DATAVMATCH Position */

+#define DWT_FUNCTION_DATAVMATCH_Msk        (0x1UL << DWT_FUNCTION_DATAVMATCH_Pos)      /*!< DWT FUNCTION: DATAVMATCH Mask */

+

+#define DWT_FUNCTION_CYCMATCH_Pos           7                                          /*!< DWT FUNCTION: CYCMATCH Position */

+#define DWT_FUNCTION_CYCMATCH_Msk          (0x1UL << DWT_FUNCTION_CYCMATCH_Pos)        /*!< DWT FUNCTION: CYCMATCH Mask */

+

+#define DWT_FUNCTION_EMITRANGE_Pos          5                                          /*!< DWT FUNCTION: EMITRANGE Position */

+#define DWT_FUNCTION_EMITRANGE_Msk         (0x1UL << DWT_FUNCTION_EMITRANGE_Pos)       /*!< DWT FUNCTION: EMITRANGE Mask */

+

+#define DWT_FUNCTION_FUNCTION_Pos           0                                          /*!< DWT FUNCTION: FUNCTION Position */

+#define DWT_FUNCTION_FUNCTION_Msk          (0xFUL /*<< DWT_FUNCTION_FUNCTION_Pos*/)    /*!< DWT FUNCTION: FUNCTION Mask */

+

+/*@}*/ /* end of group CMSIS_DWT */

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_TPI     Trace Port Interface (TPI)

+    \brief      Type definitions for the Trace Port Interface (TPI)

+  @{

+ */

+

+/** \brief  Structure type to access the Trace Port Interface Register (TPI).

+ */

+typedef struct

+{

+  __IO uint32_t SSPSR;                   /*!< Offset: 0x000 (R/ )  Supported Parallel Port Size Register     */

+  __IO uint32_t CSPSR;                   /*!< Offset: 0x004 (R/W)  Current Parallel Port Size Register */

+       uint32_t RESERVED0[2];

+  __IO uint32_t ACPR;                    /*!< Offset: 0x010 (R/W)  Asynchronous Clock Prescaler Register */

+       uint32_t RESERVED1[55];

+  __IO uint32_t SPPR;                    /*!< Offset: 0x0F0 (R/W)  Selected Pin Protocol Register */

+       uint32_t RESERVED2[131];

+  __I  uint32_t FFSR;                    /*!< Offset: 0x300 (R/ )  Formatter and Flush Status Register */

+  __IO uint32_t FFCR;                    /*!< Offset: 0x304 (R/W)  Formatter and Flush Control Register */

+  __I  uint32_t FSCR;                    /*!< Offset: 0x308 (R/ )  Formatter Synchronization Counter Register */

+       uint32_t RESERVED3[759];

+  __I  uint32_t TRIGGER;                 /*!< Offset: 0xEE8 (R/ )  TRIGGER */

+  __I  uint32_t FIFO0;                   /*!< Offset: 0xEEC (R/ )  Integration ETM Data */

+  __I  uint32_t ITATBCTR2;               /*!< Offset: 0xEF0 (R/ )  ITATBCTR2 */

+       uint32_t RESERVED4[1];

+  __I  uint32_t ITATBCTR0;               /*!< Offset: 0xEF8 (R/ )  ITATBCTR0 */

+  __I  uint32_t FIFO1;                   /*!< Offset: 0xEFC (R/ )  Integration ITM Data */

+  __IO uint32_t ITCTRL;                  /*!< Offset: 0xF00 (R/W)  Integration Mode Control */

+       uint32_t RESERVED5[39];

+  __IO uint32_t CLAIMSET;                /*!< Offset: 0xFA0 (R/W)  Claim tag set */

+  __IO uint32_t CLAIMCLR;                /*!< Offset: 0xFA4 (R/W)  Claim tag clear */

+       uint32_t RESERVED7[8];

+  __I  uint32_t DEVID;                   /*!< Offset: 0xFC8 (R/ )  TPIU_DEVID */

+  __I  uint32_t DEVTYPE;                 /*!< Offset: 0xFCC (R/ )  TPIU_DEVTYPE */

+} TPI_Type;

+

+/* TPI Asynchronous Clock Prescaler Register Definitions */

+#define TPI_ACPR_PRESCALER_Pos              0                                          /*!< TPI ACPR: PRESCALER Position */

+#define TPI_ACPR_PRESCALER_Msk             (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/)    /*!< TPI ACPR: PRESCALER Mask */

+

+/* TPI Selected Pin Protocol Register Definitions */

+#define TPI_SPPR_TXMODE_Pos                 0                                          /*!< TPI SPPR: TXMODE Position */

+#define TPI_SPPR_TXMODE_Msk                (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/)          /*!< TPI SPPR: TXMODE Mask */

+

+/* TPI Formatter and Flush Status Register Definitions */

+#define TPI_FFSR_FtNonStop_Pos              3                                          /*!< TPI FFSR: FtNonStop Position */

+#define TPI_FFSR_FtNonStop_Msk             (0x1UL << TPI_FFSR_FtNonStop_Pos)           /*!< TPI FFSR: FtNonStop Mask */

+

+#define TPI_FFSR_TCPresent_Pos              2                                          /*!< TPI FFSR: TCPresent Position */

+#define TPI_FFSR_TCPresent_Msk             (0x1UL << TPI_FFSR_TCPresent_Pos)           /*!< TPI FFSR: TCPresent Mask */

+

+#define TPI_FFSR_FtStopped_Pos              1                                          /*!< TPI FFSR: FtStopped Position */

+#define TPI_FFSR_FtStopped_Msk             (0x1UL << TPI_FFSR_FtStopped_Pos)           /*!< TPI FFSR: FtStopped Mask */

+

+#define TPI_FFSR_FlInProg_Pos               0                                          /*!< TPI FFSR: FlInProg Position */

+#define TPI_FFSR_FlInProg_Msk              (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/)        /*!< TPI FFSR: FlInProg Mask */

+

+/* TPI Formatter and Flush Control Register Definitions */

+#define TPI_FFCR_TrigIn_Pos                 8                                          /*!< TPI FFCR: TrigIn Position */

+#define TPI_FFCR_TrigIn_Msk                (0x1UL << TPI_FFCR_TrigIn_Pos)              /*!< TPI FFCR: TrigIn Mask */

+

+#define TPI_FFCR_EnFCont_Pos                1                                          /*!< TPI FFCR: EnFCont Position */

+#define TPI_FFCR_EnFCont_Msk               (0x1UL << TPI_FFCR_EnFCont_Pos)             /*!< TPI FFCR: EnFCont Mask */

+

+/* TPI TRIGGER Register Definitions */

+#define TPI_TRIGGER_TRIGGER_Pos             0                                          /*!< TPI TRIGGER: TRIGGER Position */

+#define TPI_TRIGGER_TRIGGER_Msk            (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/)      /*!< TPI TRIGGER: TRIGGER Mask */

+

+/* TPI Integration ETM Data Register Definitions (FIFO0) */

+#define TPI_FIFO0_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO0: ITM_ATVALID Position */

+#define TPI_FIFO0_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos)        /*!< TPI FIFO0: ITM_ATVALID Mask */

+

+#define TPI_FIFO0_ITM_bytecount_Pos        27                                          /*!< TPI FIFO0: ITM_bytecount Position */

+#define TPI_FIFO0_ITM_bytecount_Msk        (0x3UL << TPI_FIFO0_ITM_bytecount_Pos)      /*!< TPI FIFO0: ITM_bytecount Mask */

+

+#define TPI_FIFO0_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO0: ETM_ATVALID Position */

+#define TPI_FIFO0_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos)        /*!< TPI FIFO0: ETM_ATVALID Mask */

+

+#define TPI_FIFO0_ETM_bytecount_Pos        24                                          /*!< TPI FIFO0: ETM_bytecount Position */

+#define TPI_FIFO0_ETM_bytecount_Msk        (0x3UL << TPI_FIFO0_ETM_bytecount_Pos)      /*!< TPI FIFO0: ETM_bytecount Mask */

+

+#define TPI_FIFO0_ETM2_Pos                 16                                          /*!< TPI FIFO0: ETM2 Position */

+#define TPI_FIFO0_ETM2_Msk                 (0xFFUL << TPI_FIFO0_ETM2_Pos)              /*!< TPI FIFO0: ETM2 Mask */

+

+#define TPI_FIFO0_ETM1_Pos                  8                                          /*!< TPI FIFO0: ETM1 Position */

+#define TPI_FIFO0_ETM1_Msk                 (0xFFUL << TPI_FIFO0_ETM1_Pos)              /*!< TPI FIFO0: ETM1 Mask */

+

+#define TPI_FIFO0_ETM0_Pos                  0                                          /*!< TPI FIFO0: ETM0 Position */

+#define TPI_FIFO0_ETM0_Msk                 (0xFFUL /*<< TPI_FIFO0_ETM0_Pos*/)          /*!< TPI FIFO0: ETM0 Mask */

+

+/* TPI ITATBCTR2 Register Definitions */

+#define TPI_ITATBCTR2_ATREADY_Pos           0                                          /*!< TPI ITATBCTR2: ATREADY Position */

+#define TPI_ITATBCTR2_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR2_ATREADY_Pos*/)    /*!< TPI ITATBCTR2: ATREADY Mask */

+

+/* TPI Integration ITM Data Register Definitions (FIFO1) */

+#define TPI_FIFO1_ITM_ATVALID_Pos          29                                          /*!< TPI FIFO1: ITM_ATVALID Position */

+#define TPI_FIFO1_ITM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos)        /*!< TPI FIFO1: ITM_ATVALID Mask */

+

+#define TPI_FIFO1_ITM_bytecount_Pos        27                                          /*!< TPI FIFO1: ITM_bytecount Position */

+#define TPI_FIFO1_ITM_bytecount_Msk        (0x3UL << TPI_FIFO1_ITM_bytecount_Pos)      /*!< TPI FIFO1: ITM_bytecount Mask */

+

+#define TPI_FIFO1_ETM_ATVALID_Pos          26                                          /*!< TPI FIFO1: ETM_ATVALID Position */

+#define TPI_FIFO1_ETM_ATVALID_Msk          (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos)        /*!< TPI FIFO1: ETM_ATVALID Mask */

+

+#define TPI_FIFO1_ETM_bytecount_Pos        24                                          /*!< TPI FIFO1: ETM_bytecount Position */

+#define TPI_FIFO1_ETM_bytecount_Msk        (0x3UL << TPI_FIFO1_ETM_bytecount_Pos)      /*!< TPI FIFO1: ETM_bytecount Mask */

+

+#define TPI_FIFO1_ITM2_Pos                 16                                          /*!< TPI FIFO1: ITM2 Position */

+#define TPI_FIFO1_ITM2_Msk                 (0xFFUL << TPI_FIFO1_ITM2_Pos)              /*!< TPI FIFO1: ITM2 Mask */

+

+#define TPI_FIFO1_ITM1_Pos                  8                                          /*!< TPI FIFO1: ITM1 Position */

+#define TPI_FIFO1_ITM1_Msk                 (0xFFUL << TPI_FIFO1_ITM1_Pos)              /*!< TPI FIFO1: ITM1 Mask */

+

+#define TPI_FIFO1_ITM0_Pos                  0                                          /*!< TPI FIFO1: ITM0 Position */

+#define TPI_FIFO1_ITM0_Msk                 (0xFFUL /*<< TPI_FIFO1_ITM0_Pos*/)          /*!< TPI FIFO1: ITM0 Mask */

+

+/* TPI ITATBCTR0 Register Definitions */

+#define TPI_ITATBCTR0_ATREADY_Pos           0                                          /*!< TPI ITATBCTR0: ATREADY Position */

+#define TPI_ITATBCTR0_ATREADY_Msk          (0x1UL /*<< TPI_ITATBCTR0_ATREADY_Pos*/)    /*!< TPI ITATBCTR0: ATREADY Mask */

+

+/* TPI Integration Mode Control Register Definitions */

+#define TPI_ITCTRL_Mode_Pos                 0                                          /*!< TPI ITCTRL: Mode Position */

+#define TPI_ITCTRL_Mode_Msk                (0x1UL /*<< TPI_ITCTRL_Mode_Pos*/)          /*!< TPI ITCTRL: Mode Mask */

+

+/* TPI DEVID Register Definitions */

+#define TPI_DEVID_NRZVALID_Pos             11                                          /*!< TPI DEVID: NRZVALID Position */

+#define TPI_DEVID_NRZVALID_Msk             (0x1UL << TPI_DEVID_NRZVALID_Pos)           /*!< TPI DEVID: NRZVALID Mask */

+

+#define TPI_DEVID_MANCVALID_Pos            10                                          /*!< TPI DEVID: MANCVALID Position */

+#define TPI_DEVID_MANCVALID_Msk            (0x1UL << TPI_DEVID_MANCVALID_Pos)          /*!< TPI DEVID: MANCVALID Mask */

+

+#define TPI_DEVID_PTINVALID_Pos             9                                          /*!< TPI DEVID: PTINVALID Position */

+#define TPI_DEVID_PTINVALID_Msk            (0x1UL << TPI_DEVID_PTINVALID_Pos)          /*!< TPI DEVID: PTINVALID Mask */

+

+#define TPI_DEVID_MinBufSz_Pos              6                                          /*!< TPI DEVID: MinBufSz Position */

+#define TPI_DEVID_MinBufSz_Msk             (0x7UL << TPI_DEVID_MinBufSz_Pos)           /*!< TPI DEVID: MinBufSz Mask */

+

+#define TPI_DEVID_AsynClkIn_Pos             5                                          /*!< TPI DEVID: AsynClkIn Position */

+#define TPI_DEVID_AsynClkIn_Msk            (0x1UL << TPI_DEVID_AsynClkIn_Pos)          /*!< TPI DEVID: AsynClkIn Mask */

+

+#define TPI_DEVID_NrTraceInput_Pos          0                                          /*!< TPI DEVID: NrTraceInput Position */

+#define TPI_DEVID_NrTraceInput_Msk         (0x1FUL /*<< TPI_DEVID_NrTraceInput_Pos*/)  /*!< TPI DEVID: NrTraceInput Mask */

+

+/* TPI DEVTYPE Register Definitions */

+#define TPI_DEVTYPE_MajorType_Pos           4                                          /*!< TPI DEVTYPE: MajorType Position */

+#define TPI_DEVTYPE_MajorType_Msk          (0xFUL << TPI_DEVTYPE_MajorType_Pos)        /*!< TPI DEVTYPE: MajorType Mask */

+

+#define TPI_DEVTYPE_SubType_Pos             0                                          /*!< TPI DEVTYPE: SubType Position */

+#define TPI_DEVTYPE_SubType_Msk            (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/)      /*!< TPI DEVTYPE: SubType Mask */

+

+/*@}*/ /* end of group CMSIS_TPI */

+

+

+#if (__MPU_PRESENT == 1)

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_MPU     Memory Protection Unit (MPU)

+    \brief      Type definitions for the Memory Protection Unit (MPU)

+  @{

+ */

+

+/** \brief  Structure type to access the Memory Protection Unit (MPU).

+ */

+typedef struct

+{

+  __I  uint32_t TYPE;                    /*!< Offset: 0x000 (R/ )  MPU Type Register                              */

+  __IO uint32_t CTRL;                    /*!< Offset: 0x004 (R/W)  MPU Control Register                           */

+  __IO uint32_t RNR;                     /*!< Offset: 0x008 (R/W)  MPU Region RNRber Register                     */

+  __IO uint32_t RBAR;                    /*!< Offset: 0x00C (R/W)  MPU Region Base Address Register               */

+  __IO uint32_t RASR;                    /*!< Offset: 0x010 (R/W)  MPU Region Attribute and Size Register         */

+  __IO uint32_t RBAR_A1;                 /*!< Offset: 0x014 (R/W)  MPU Alias 1 Region Base Address Register       */

+  __IO uint32_t RASR_A1;                 /*!< Offset: 0x018 (R/W)  MPU Alias 1 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A2;                 /*!< Offset: 0x01C (R/W)  MPU Alias 2 Region Base Address Register       */

+  __IO uint32_t RASR_A2;                 /*!< Offset: 0x020 (R/W)  MPU Alias 2 Region Attribute and Size Register */

+  __IO uint32_t RBAR_A3;                 /*!< Offset: 0x024 (R/W)  MPU Alias 3 Region Base Address Register       */

+  __IO uint32_t RASR_A3;                 /*!< Offset: 0x028 (R/W)  MPU Alias 3 Region Attribute and Size Register */

+} MPU_Type;

+

+/* MPU Type Register */

+#define MPU_TYPE_IREGION_Pos               16                                             /*!< MPU TYPE: IREGION Position */

+#define MPU_TYPE_IREGION_Msk               (0xFFUL << MPU_TYPE_IREGION_Pos)               /*!< MPU TYPE: IREGION Mask */

+

+#define MPU_TYPE_DREGION_Pos                8                                             /*!< MPU TYPE: DREGION Position */

+#define MPU_TYPE_DREGION_Msk               (0xFFUL << MPU_TYPE_DREGION_Pos)               /*!< MPU TYPE: DREGION Mask */

+

+#define MPU_TYPE_SEPARATE_Pos               0                                             /*!< MPU TYPE: SEPARATE Position */

+#define MPU_TYPE_SEPARATE_Msk              (1UL /*<< MPU_TYPE_SEPARATE_Pos*/)             /*!< MPU TYPE: SEPARATE Mask */

+

+/* MPU Control Register */

+#define MPU_CTRL_PRIVDEFENA_Pos             2                                             /*!< MPU CTRL: PRIVDEFENA Position */

+#define MPU_CTRL_PRIVDEFENA_Msk            (1UL << MPU_CTRL_PRIVDEFENA_Pos)               /*!< MPU CTRL: PRIVDEFENA Mask */

+

+#define MPU_CTRL_HFNMIENA_Pos               1                                             /*!< MPU CTRL: HFNMIENA Position */

+#define MPU_CTRL_HFNMIENA_Msk              (1UL << MPU_CTRL_HFNMIENA_Pos)                 /*!< MPU CTRL: HFNMIENA Mask */

+

+#define MPU_CTRL_ENABLE_Pos                 0                                             /*!< MPU CTRL: ENABLE Position */

+#define MPU_CTRL_ENABLE_Msk                (1UL /*<< MPU_CTRL_ENABLE_Pos*/)               /*!< MPU CTRL: ENABLE Mask */

+

+/* MPU Region Number Register */

+#define MPU_RNR_REGION_Pos                  0                                             /*!< MPU RNR: REGION Position */

+#define MPU_RNR_REGION_Msk                 (0xFFUL /*<< MPU_RNR_REGION_Pos*/)             /*!< MPU RNR: REGION Mask */

+

+/* MPU Region Base Address Register */

+#define MPU_RBAR_ADDR_Pos                   5                                             /*!< MPU RBAR: ADDR Position */

+#define MPU_RBAR_ADDR_Msk                  (0x7FFFFFFUL << MPU_RBAR_ADDR_Pos)             /*!< MPU RBAR: ADDR Mask */

+

+#define MPU_RBAR_VALID_Pos                  4                                             /*!< MPU RBAR: VALID Position */

+#define MPU_RBAR_VALID_Msk                 (1UL << MPU_RBAR_VALID_Pos)                    /*!< MPU RBAR: VALID Mask */

+

+#define MPU_RBAR_REGION_Pos                 0                                             /*!< MPU RBAR: REGION Position */

+#define MPU_RBAR_REGION_Msk                (0xFUL /*<< MPU_RBAR_REGION_Pos*/)             /*!< MPU RBAR: REGION Mask */

+

+/* MPU Region Attribute and Size Register */

+#define MPU_RASR_ATTRS_Pos                 16                                             /*!< MPU RASR: MPU Region Attribute field Position */

+#define MPU_RASR_ATTRS_Msk                 (0xFFFFUL << MPU_RASR_ATTRS_Pos)               /*!< MPU RASR: MPU Region Attribute field Mask */

+

+#define MPU_RASR_XN_Pos                    28                                             /*!< MPU RASR: ATTRS.XN Position */

+#define MPU_RASR_XN_Msk                    (1UL << MPU_RASR_XN_Pos)                       /*!< MPU RASR: ATTRS.XN Mask */

+

+#define MPU_RASR_AP_Pos                    24                                             /*!< MPU RASR: ATTRS.AP Position */

+#define MPU_RASR_AP_Msk                    (0x7UL << MPU_RASR_AP_Pos)                     /*!< MPU RASR: ATTRS.AP Mask */

+

+#define MPU_RASR_TEX_Pos                   19                                             /*!< MPU RASR: ATTRS.TEX Position */

+#define MPU_RASR_TEX_Msk                   (0x7UL << MPU_RASR_TEX_Pos)                    /*!< MPU RASR: ATTRS.TEX Mask */

+

+#define MPU_RASR_S_Pos                     18                                             /*!< MPU RASR: ATTRS.S Position */

+#define MPU_RASR_S_Msk                     (1UL << MPU_RASR_S_Pos)                        /*!< MPU RASR: ATTRS.S Mask */

+

+#define MPU_RASR_C_Pos                     17                                             /*!< MPU RASR: ATTRS.C Position */

+#define MPU_RASR_C_Msk                     (1UL << MPU_RASR_C_Pos)                        /*!< MPU RASR: ATTRS.C Mask */

+

+#define MPU_RASR_B_Pos                     16                                             /*!< MPU RASR: ATTRS.B Position */

+#define MPU_RASR_B_Msk                     (1UL << MPU_RASR_B_Pos)                        /*!< MPU RASR: ATTRS.B Mask */

+

+#define MPU_RASR_SRD_Pos                    8                                             /*!< MPU RASR: Sub-Region Disable Position */

+#define MPU_RASR_SRD_Msk                   (0xFFUL << MPU_RASR_SRD_Pos)                   /*!< MPU RASR: Sub-Region Disable Mask */

+

+#define MPU_RASR_SIZE_Pos                   1                                             /*!< MPU RASR: Region Size Field Position */

+#define MPU_RASR_SIZE_Msk                  (0x1FUL << MPU_RASR_SIZE_Pos)                  /*!< MPU RASR: Region Size Field Mask */

+

+#define MPU_RASR_ENABLE_Pos                 0                                             /*!< MPU RASR: Region enable bit Position */

+#define MPU_RASR_ENABLE_Msk                (1UL /*<< MPU_RASR_ENABLE_Pos*/)               /*!< MPU RASR: Region enable bit Disable Mask */

+

+/*@} end of group CMSIS_MPU */

+#endif

+

+

+/** \ingroup  CMSIS_core_register

+    \defgroup CMSIS_CoreDebug       Core Debug Registers (CoreDebug)

+    \brief      Type definitions for the Core Debug Registers

+  @{

+ */

+

+/** \brief  Structure type to access the Core Debug Register (CoreDebug).

+ */

+typedef struct

+{

+  __IO uint32_t DHCSR;                   /*!< Offset: 0x000 (R/W)  Debug Halting Control and Status Register    */

+  __O  uint32_t DCRSR;                   /*!< Offset: 0x004 ( /W)  Debug Core Register Selector Register        */

+  __IO uint32_t DCRDR;                   /*!< Offset: 0x008 (R/W)  Debug Core Register Data Register            */

+  __IO uint32_t DEMCR;                   /*!< Offset: 0x00C (R/W)  Debug Exception and Monitor Control Register */

+} CoreDebug_Type;

+

+/* Debug Halting Control and Status Register */

+#define CoreDebug_DHCSR_DBGKEY_Pos         16                                             /*!< CoreDebug DHCSR: DBGKEY Position */

+#define CoreDebug_DHCSR_DBGKEY_Msk         (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos)       /*!< CoreDebug DHCSR: DBGKEY Mask */

+

+#define CoreDebug_DHCSR_S_RESET_ST_Pos     25                                             /*!< CoreDebug DHCSR: S_RESET_ST Position */

+#define CoreDebug_DHCSR_S_RESET_ST_Msk     (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos)        /*!< CoreDebug DHCSR: S_RESET_ST Mask */

+

+#define CoreDebug_DHCSR_S_RETIRE_ST_Pos    24                                             /*!< CoreDebug DHCSR: S_RETIRE_ST Position */

+#define CoreDebug_DHCSR_S_RETIRE_ST_Msk    (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos)       /*!< CoreDebug DHCSR: S_RETIRE_ST Mask */

+

+#define CoreDebug_DHCSR_S_LOCKUP_Pos       19                                             /*!< CoreDebug DHCSR: S_LOCKUP Position */

+#define CoreDebug_DHCSR_S_LOCKUP_Msk       (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos)          /*!< CoreDebug DHCSR: S_LOCKUP Mask */

+

+#define CoreDebug_DHCSR_S_SLEEP_Pos        18                                             /*!< CoreDebug DHCSR: S_SLEEP Position */

+#define CoreDebug_DHCSR_S_SLEEP_Msk        (1UL << CoreDebug_DHCSR_S_SLEEP_Pos)           /*!< CoreDebug DHCSR: S_SLEEP Mask */

+

+#define CoreDebug_DHCSR_S_HALT_Pos         17                                             /*!< CoreDebug DHCSR: S_HALT Position */

+#define CoreDebug_DHCSR_S_HALT_Msk         (1UL << CoreDebug_DHCSR_S_HALT_Pos)            /*!< CoreDebug DHCSR: S_HALT Mask */

+

+#define CoreDebug_DHCSR_S_REGRDY_Pos       16                                             /*!< CoreDebug DHCSR: S_REGRDY Position */

+#define CoreDebug_DHCSR_S_REGRDY_Msk       (1UL << CoreDebug_DHCSR_S_REGRDY_Pos)          /*!< CoreDebug DHCSR: S_REGRDY Mask */

+

+#define CoreDebug_DHCSR_C_SNAPSTALL_Pos     5                                             /*!< CoreDebug DHCSR: C_SNAPSTALL Position */

+#define CoreDebug_DHCSR_C_SNAPSTALL_Msk    (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos)       /*!< CoreDebug DHCSR: C_SNAPSTALL Mask */

+

+#define CoreDebug_DHCSR_C_MASKINTS_Pos      3                                             /*!< CoreDebug DHCSR: C_MASKINTS Position */

+#define CoreDebug_DHCSR_C_MASKINTS_Msk     (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos)        /*!< CoreDebug DHCSR: C_MASKINTS Mask */

+

+#define CoreDebug_DHCSR_C_STEP_Pos          2                                             /*!< CoreDebug DHCSR: C_STEP Position */

+#define CoreDebug_DHCSR_C_STEP_Msk         (1UL << CoreDebug_DHCSR_C_STEP_Pos)            /*!< CoreDebug DHCSR: C_STEP Mask */

+

+#define CoreDebug_DHCSR_C_HALT_Pos          1                                             /*!< CoreDebug DHCSR: C_HALT Position */

+#define CoreDebug_DHCSR_C_HALT_Msk         (1UL << CoreDebug_DHCSR_C_HALT_Pos)            /*!< CoreDebug DHCSR: C_HALT Mask */

+

+#define CoreDebug_DHCSR_C_DEBUGEN_Pos       0                                             /*!< CoreDebug DHCSR: C_DEBUGEN Position */

+#define CoreDebug_DHCSR_C_DEBUGEN_Msk      (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/)     /*!< CoreDebug DHCSR: C_DEBUGEN Mask */

+

+/* Debug Core Register Selector Register */

+#define CoreDebug_DCRSR_REGWnR_Pos         16                                             /*!< CoreDebug DCRSR: REGWnR Position */

+#define CoreDebug_DCRSR_REGWnR_Msk         (1UL << CoreDebug_DCRSR_REGWnR_Pos)            /*!< CoreDebug DCRSR: REGWnR Mask */

+

+#define CoreDebug_DCRSR_REGSEL_Pos          0                                             /*!< CoreDebug DCRSR: REGSEL Position */

+#define CoreDebug_DCRSR_REGSEL_Msk         (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/)     /*!< CoreDebug DCRSR: REGSEL Mask */

+

+/* Debug Exception and Monitor Control Register */

+#define CoreDebug_DEMCR_TRCENA_Pos         24                                             /*!< CoreDebug DEMCR: TRCENA Position */

+#define CoreDebug_DEMCR_TRCENA_Msk         (1UL << CoreDebug_DEMCR_TRCENA_Pos)            /*!< CoreDebug DEMCR: TRCENA Mask */

+

+#define CoreDebug_DEMCR_MON_REQ_Pos        19                                             /*!< CoreDebug DEMCR: MON_REQ Position */

+#define CoreDebug_DEMCR_MON_REQ_Msk        (1UL << CoreDebug_DEMCR_MON_REQ_Pos)           /*!< CoreDebug DEMCR: MON_REQ Mask */

+

+#define CoreDebug_DEMCR_MON_STEP_Pos       18                                             /*!< CoreDebug DEMCR: MON_STEP Position */

+#define CoreDebug_DEMCR_MON_STEP_Msk       (1UL << CoreDebug_DEMCR_MON_STEP_Pos)          /*!< CoreDebug DEMCR: MON_STEP Mask */

+

+#define CoreDebug_DEMCR_MON_PEND_Pos       17                                             /*!< CoreDebug DEMCR: MON_PEND Position */

+#define CoreDebug_DEMCR_MON_PEND_Msk       (1UL << CoreDebug_DEMCR_MON_PEND_Pos)          /*!< CoreDebug DEMCR: MON_PEND Mask */

+

+#define CoreDebug_DEMCR_MON_EN_Pos         16                                             /*!< CoreDebug DEMCR: MON_EN Position */

+#define CoreDebug_DEMCR_MON_EN_Msk         (1UL << CoreDebug_DEMCR_MON_EN_Pos)            /*!< CoreDebug DEMCR: MON_EN Mask */

+

+#define CoreDebug_DEMCR_VC_HARDERR_Pos     10                                             /*!< CoreDebug DEMCR: VC_HARDERR Position */

+#define CoreDebug_DEMCR_VC_HARDERR_Msk     (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos)        /*!< CoreDebug DEMCR: VC_HARDERR Mask */

+

+#define CoreDebug_DEMCR_VC_INTERR_Pos       9                                             /*!< CoreDebug DEMCR: VC_INTERR Position */

+#define CoreDebug_DEMCR_VC_INTERR_Msk      (1UL << CoreDebug_DEMCR_VC_INTERR_Pos)         /*!< CoreDebug DEMCR: VC_INTERR Mask */

+

+#define CoreDebug_DEMCR_VC_BUSERR_Pos       8                                             /*!< CoreDebug DEMCR: VC_BUSERR Position */

+#define CoreDebug_DEMCR_VC_BUSERR_Msk      (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos)         /*!< CoreDebug DEMCR: VC_BUSERR Mask */

+

+#define CoreDebug_DEMCR_VC_STATERR_Pos      7                                             /*!< CoreDebug DEMCR: VC_STATERR Position */

+#define CoreDebug_DEMCR_VC_STATERR_Msk     (1UL << CoreDebug_DEMCR_VC_STATERR_Pos)        /*!< CoreDebug DEMCR: VC_STATERR Mask */

+

+#define CoreDebug_DEMCR_VC_CHKERR_Pos       6                                             /*!< CoreDebug DEMCR: VC_CHKERR Position */

+#define CoreDebug_DEMCR_VC_CHKERR_Msk      (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos)         /*!< CoreDebug DEMCR: VC_CHKERR Mask */

+

+#define CoreDebug_DEMCR_VC_NOCPERR_Pos      5                                             /*!< CoreDebug DEMCR: VC_NOCPERR Position */

+#define CoreDebug_DEMCR_VC_NOCPERR_Msk     (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos)        /*!< CoreDebug DEMCR: VC_NOCPERR Mask */

+

+#define CoreDebug_DEMCR_VC_MMERR_Pos        4                                             /*!< CoreDebug DEMCR: VC_MMERR Position */

+#define CoreDebug_DEMCR_VC_MMERR_Msk       (1UL << CoreDebug_DEMCR_VC_MMERR_Pos)          /*!< CoreDebug DEMCR: VC_MMERR Mask */

+

+#define CoreDebug_DEMCR_VC_CORERESET_Pos    0                                             /*!< CoreDebug DEMCR: VC_CORERESET Position */

+#define CoreDebug_DEMCR_VC_CORERESET_Msk   (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/)  /*!< CoreDebug DEMCR: VC_CORERESET Mask */

+

+/*@} end of group CMSIS_CoreDebug */

+

+

+/** \ingroup    CMSIS_core_register

+    \defgroup   CMSIS_core_base     Core Definitions

+    \brief      Definitions for base addresses, unions, and structures.

+  @{

+ */

+

+/* Memory mapping of Cortex-M3 Hardware */

+#define SCS_BASE            (0xE000E000UL)                            /*!< System Control Space Base Address  */

+#define ITM_BASE            (0xE0000000UL)                            /*!< ITM Base Address                   */

+#define DWT_BASE            (0xE0001000UL)                            /*!< DWT Base Address                   */

+#define TPI_BASE            (0xE0040000UL)                            /*!< TPI Base Address                   */

+#define CoreDebug_BASE      (0xE000EDF0UL)                            /*!< Core Debug Base Address            */

+#define SysTick_BASE        (SCS_BASE +  0x0010UL)                    /*!< SysTick Base Address               */

+#define NVIC_BASE           (SCS_BASE +  0x0100UL)                    /*!< NVIC Base Address                  */

+#define SCB_BASE            (SCS_BASE +  0x0D00UL)                    /*!< System Control Block Base Address  */

+

+#define SCnSCB              ((SCnSCB_Type    *)     SCS_BASE      )   /*!< System control Register not in SCB */

+#define SCB                 ((SCB_Type       *)     SCB_BASE      )   /*!< SCB configuration struct           */

+#define SysTick             ((SysTick_Type   *)     SysTick_BASE  )   /*!< SysTick configuration struct       */

+#define NVIC                ((NVIC_Type      *)     NVIC_BASE     )   /*!< NVIC configuration struct          */

+#define ITM                 ((ITM_Type       *)     ITM_BASE      )   /*!< ITM configuration struct           */

+#define DWT                 ((DWT_Type       *)     DWT_BASE      )   /*!< DWT configuration struct           */

+#define TPI                 ((TPI_Type       *)     TPI_BASE      )   /*!< TPI configuration struct           */

+#define CoreDebug           ((CoreDebug_Type *)     CoreDebug_BASE)   /*!< Core Debug configuration struct    */

+

+#if (__MPU_PRESENT == 1)

+  #define MPU_BASE          (SCS_BASE +  0x0D90UL)                    /*!< Memory Protection Unit             */

+  #define MPU               ((MPU_Type       *)     MPU_BASE      )   /*!< Memory Protection Unit             */

+#endif

+

+/*@} */

+

+

+

+/*******************************************************************************

+ *                Hardware Abstraction Layer

+  Core Function Interface contains:

+  - Core NVIC Functions

+  - Core SysTick Functions

+  - Core Debug Functions

+  - Core Register Access Functions

+ ******************************************************************************/

+/** \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference

+*/

+

+

+

+/* ##########################   NVIC functions  #################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_NVICFunctions NVIC Functions

+    \brief      Functions that manage interrupts and exceptions via the NVIC.

+    @{

+ */

+

+/** \brief  Set Priority Grouping

+

+  The function sets the priority grouping field using the required unlock sequence.

+  The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field.

+  Only values from 0..7 are used.

+  In case of a conflict between priority grouping and available

+  priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]      PriorityGroup  Priority grouping field.

+ */

+__STATIC_INLINE void NVIC_SetPriorityGrouping(uint32_t PriorityGroup)

+{

+  uint32_t reg_value;

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);             /* only values 0..7 are used          */

+

+  reg_value  =  SCB->AIRCR;                                                   /* read old register configuration    */

+  reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk));             /* clear bits to change               */

+  reg_value  =  (reg_value                                   |

+                ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |

+                (PriorityGroupTmp << 8)                       );              /* Insert write key and priorty group */

+  SCB->AIRCR =  reg_value;

+}

+

+

+/** \brief  Get Priority Grouping

+

+  The function reads the priority grouping field from the NVIC Interrupt Controller.

+

+    \return                Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field).

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriorityGrouping(void)

+{

+  return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos));

+}

+

+

+/** \brief  Enable External Interrupt

+

+    The function enables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_EnableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Disable External Interrupt

+

+    The function disables a device-specific interrupt in the NVIC interrupt controller.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_DisableIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICER[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Pending Interrupt

+

+    The function reads the pending register in the NVIC and returns the pending bit

+    for the specified interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not pending.

+    \return             1  Interrupt status is pending.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPendingIRQ(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Pending Interrupt

+

+    The function sets the pending bit of an external interrupt.

+

+    \param [in]      IRQn  Interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_SetPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ISPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Clear Pending Interrupt

+

+    The function clears the pending bit of an external interrupt.

+

+    \param [in]      IRQn  External interrupt number. Value cannot be negative.

+ */

+__STATIC_INLINE void NVIC_ClearPendingIRQ(IRQn_Type IRQn)

+{

+  NVIC->ICPR[(((uint32_t)(int32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL));

+}

+

+

+/** \brief  Get Active Interrupt

+

+    The function reads the active register in NVIC and returns the active bit.

+

+    \param [in]      IRQn  Interrupt number.

+

+    \return             0  Interrupt status is not active.

+    \return             1  Interrupt status is active.

+ */

+__STATIC_INLINE uint32_t NVIC_GetActive(IRQn_Type IRQn)

+{

+  return((uint32_t)(((NVIC->IABR[(((uint32_t)(int32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)(int32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));

+}

+

+

+/** \brief  Set Interrupt Priority

+

+    The function sets the priority of an interrupt.

+

+    \note The priority cannot be set for every core interrupt.

+

+    \param [in]      IRQn  Interrupt number.

+    \param [in]  priority  Priority to set.

+ */

+__STATIC_INLINE void NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)

+{

+  if((int32_t)IRQn < 0) {

+    SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+  else {

+    NVIC->IP[((uint32_t)(int32_t)IRQn)]               = (uint8_t)((priority << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL);

+  }

+}

+

+

+/** \brief  Get Interrupt Priority

+

+    The function reads the priority of an interrupt. The interrupt

+    number can be positive to specify an external (device specific)

+    interrupt, or negative to specify an internal (core) interrupt.

+

+

+    \param [in]   IRQn  Interrupt number.

+    \return             Interrupt Priority. Value is aligned automatically to the implemented

+                        priority bits of the microcontroller.

+ */

+__STATIC_INLINE uint32_t NVIC_GetPriority(IRQn_Type IRQn)

+{

+

+  if((int32_t)IRQn < 0) {

+    return(((uint32_t)SCB->SHP[(((uint32_t)(int32_t)IRQn) & 0xFUL)-4UL] >> (8 - __NVIC_PRIO_BITS)));

+  }

+  else {

+    return(((uint32_t)NVIC->IP[((uint32_t)(int32_t)IRQn)]               >> (8 - __NVIC_PRIO_BITS)));

+  }

+}

+

+

+/** \brief  Encode Priority

+

+    The function encodes the priority for an interrupt with the given priority group,

+    preemptive priority value, and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.

+

+    \param [in]     PriorityGroup  Used priority group.

+    \param [in]   PreemptPriority  Preemptive priority value (starting from 0).

+    \param [in]       SubPriority  Subpriority value (starting from 0).

+    \return                        Encoded priority. Value can be used in the function \ref NVIC_SetPriority().

+ */

+__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  return (

+           ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |

+           ((SubPriority     & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL)))

+         );

+}

+

+

+/** \brief  Decode Priority

+

+    The function decodes an interrupt priority value with a given priority group to

+    preemptive priority value and subpriority value.

+    In case of a conflict between priority grouping and available

+    priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.

+

+    \param [in]         Priority   Priority value, which can be retrieved with the function \ref NVIC_GetPriority().

+    \param [in]     PriorityGroup  Used priority group.

+    \param [out] pPreemptPriority  Preemptive priority value (starting from 0).

+    \param [out]     pSubPriority  Subpriority value (starting from 0).

+ */

+__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* pPreemptPriority, uint32_t* pSubPriority)

+{

+  uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL);   /* only values 0..7 are used          */

+  uint32_t PreemptPriorityBits;

+  uint32_t SubPriorityBits;

+

+  PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);

+  SubPriorityBits     = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));

+

+  *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);

+  *pSubPriority     = (Priority                   ) & (uint32_t)((1UL << (SubPriorityBits    )) - 1UL);

+}

+

+

+/** \brief  System Reset

+

+    The function initiates a system reset request to reset the MCU.

+ */

+__STATIC_INLINE void NVIC_SystemReset(void)

+{

+  __DSB();                                                          /* Ensure all outstanding memory accesses included

+                                                                       buffered write are completed before reset */

+  SCB->AIRCR  = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos)    |

+                           (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) |

+                            SCB_AIRCR_SYSRESETREQ_Msk    );         /* Keep priority group unchanged */

+  __DSB();                                                          /* Ensure completion of memory access */

+  while(1) { __NOP(); }                                             /* wait until reset */

+}

+

+/*@} end of CMSIS_Core_NVICFunctions */

+

+

+

+/* ##################################    SysTick function  ############################################ */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_Core_SysTickFunctions SysTick Functions

+    \brief      Functions that configure the System.

+  @{

+ */

+

+#if (__Vendor_SysTickConfig == 0)

+

+/** \brief  System Tick Configuration

+

+    The function initializes the System Timer and its interrupt, and starts the System Tick Timer.

+    Counter is in free running mode to generate periodic interrupts.

+

+    \param [in]  ticks  Number of ticks between two interrupts.

+

+    \return          0  Function succeeded.

+    \return          1  Function failed.

+

+    \note     When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the

+    function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>

+    must contain a vendor-specific implementation of this function.

+

+ */

+__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)

+{

+  if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) { return (1UL); }    /* Reload value impossible */

+

+  SysTick->LOAD  = (uint32_t)(ticks - 1UL);                         /* set reload register */

+  NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */

+  SysTick->VAL   = 0UL;                                             /* Load the SysTick Counter Value */

+  SysTick->CTRL  = SysTick_CTRL_CLKSOURCE_Msk |

+                   SysTick_CTRL_TICKINT_Msk   |

+                   SysTick_CTRL_ENABLE_Msk;                         /* Enable SysTick IRQ and SysTick Timer */

+  return (0UL);                                                     /* Function successful */

+}

+

+#endif

+

+/*@} end of CMSIS_Core_SysTickFunctions */

+

+

+

+/* ##################################### Debug In/Output function ########################################### */

+/** \ingroup  CMSIS_Core_FunctionInterface

+    \defgroup CMSIS_core_DebugFunctions ITM Functions

+    \brief   Functions that access the ITM debug interface.

+  @{

+ */

+

+extern volatile int32_t ITM_RxBuffer;                    /*!< External variable to receive characters.                         */

+#define                 ITM_RXBUFFER_EMPTY    0x5AA55AA5 /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */

+

+

+/** \brief  ITM Send Character

+

+    The function transmits a character via the ITM channel 0, and

+    \li Just returns when no debugger is connected that has booked the output.

+    \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted.

+

+    \param [in]     ch  Character to transmit.

+

+    \returns            Character to transmit.

+ */

+__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch)

+{

+  if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) &&      /* ITM enabled */

+      ((ITM->TER & 1UL               ) != 0UL)   )     /* ITM Port #0 enabled */

+  {

+    while (ITM->PORT[0].u32 == 0UL) { __NOP(); }

+    ITM->PORT[0].u8 = (uint8_t)ch;

+  }

+  return (ch);

+}

+

+

+/** \brief  ITM Receive Character

+

+    The function inputs a character via the external variable \ref ITM_RxBuffer.

+

+    \return             Received character.

+    \return         -1  No character pending.

+ */

+__STATIC_INLINE int32_t ITM_ReceiveChar (void) {

+  int32_t ch = -1;                           /* no character available */

+

+  if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) {

+    ch = ITM_RxBuffer;

+    ITM_RxBuffer = ITM_RXBUFFER_EMPTY;       /* ready for next character */

+  }

+

+  return (ch);

+}

+

+

+/** \brief  ITM Check Character

+

+    The function checks whether a character is pending for reading in the variable \ref ITM_RxBuffer.

+

+    \return          0  No character available.

+    \return          1  Character available.

+ */

+__STATIC_INLINE int32_t ITM_CheckChar (void) {

+

+  if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) {

+    return (0);                                 /* no character available */

+  } else {

+    return (1);                                 /*    character available */

+  }

+}

+

+/*@} end of CMSIS_core_DebugFunctions */

+

+

+

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __CORE_SC300_H_DEPENDANT */

+

+#endif /* __CMSIS_GENERIC */

diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM0l_math.a b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM0l_math.a
new file mode 100644
index 0000000..c0c7b83
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM0l_math.a
Binary files differ
diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM3l_math.a b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM3l_math.a
new file mode 100644
index 0000000..0c90942
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM3l_math.a
Binary files differ
diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4l_math.a b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4l_math.a
new file mode 100644
index 0000000..724971a
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4l_math.a
Binary files differ
diff --git a/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4lf_math.a b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4lf_math.a
new file mode 100644
index 0000000..83c1dba
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/CMSIS/Lib/libarm_cortexM4lf_math.a
Binary files differ
diff --git a/src/bsp/lk/arch/arm/arm-m/arch.c b/src/bsp/lk/arch/arm/arm-m/arch.c
new file mode 100644
index 0000000..dddfbf5
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/arch.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2012-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/arm.h>
+#include <kernel/thread.h>
+#include <kernel/debug.h>
+#include <platform.h>
+#include <arch/arm/cm.h>
+#include <target.h>
+
+extern void *vectab;
+
+#if ARM_CM_DYNAMIC_PRIORITY_SIZE
+unsigned int arm_cm_num_irq_pri_bits;
+unsigned int arm_cm_irq_pri_mask;
+#endif
+
+void arch_early_init(void)
+{
+
+    arch_disable_ints();
+
+#if     (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300)
+    uint i;
+    /* set the vector table base */
+    SCB->VTOR = (uint32_t)&vectab;
+
+#if ARM_CM_DYNAMIC_PRIORITY_SIZE
+    /* number of priorities */
+    for (i=0; i < 7; i++) {
+        __set_BASEPRI(1 << i);
+        if (__get_BASEPRI() != 0)
+            break;
+    }
+    arm_cm_num_irq_pri_bits = 8 - i;
+    arm_cm_irq_pri_mask = ~((1 << i) - 1) & 0xff;
+#endif
+
+    /* clear any pending interrupts and set all the vectors to medium priority */
+    uint groups = (SCnSCB->ICTR & 0xf) + 1;
+    for (i = 0; i < groups; i++) {
+        NVIC->ICER[i] = 0xffffffff;
+        NVIC->ICPR[i] = 0xffffffff;
+        for (uint j = 0; j < 32; j++) {
+            NVIC_SetPriority(i*32 + j, arm_cm_medium_priority());
+        }
+    }
+
+    /* leave BASEPRI at 0 */
+    __set_BASEPRI(0);
+
+    /* set priority grouping to 0 */
+    NVIC_SetPriorityGrouping(0);
+
+    /* enable certain faults */
+    SCB->SHCSR |= (SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_BUSFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk);
+
+    /* set the svc and pendsv priority level to pretty low */
+#endif
+    NVIC_SetPriority(SVCall_IRQn, arm_cm_lowest_priority());
+    NVIC_SetPriority(PendSV_IRQn, arm_cm_lowest_priority());
+
+    /* set systick and debugmonitor to medium priority */
+    NVIC_SetPriority(SysTick_IRQn, arm_cm_medium_priority());
+
+#if (__CORTEX_M >= 0x03)
+    NVIC_SetPriority(DebugMonitor_IRQn, arm_cm_medium_priority());
+#endif
+
+#if ARM_WITH_CACHE
+    arch_enable_cache(UCACHE);
+#endif
+}
+
+void arch_init(void)
+{
+#if ENABLE_CYCLE_COUNTER
+    *REG32(SCB_DEMCR) |= 0x01000000; // global trace enable
+    *REG32(DWT_CYCCNT) = 0;
+    *REG32(DWT_CTRL) |= 1; // enable cycle counter
+#endif
+}
+
+void arch_quiesce(void)
+{
+}
+
+void arch_idle(void)
+{
+    __asm__ volatile("wfi");
+}
+
+#if     (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300)
+
+void _arm_cm_set_irqpri(uint32_t pri)
+{
+    if (pri == 0) {
+        __disable_irq(); // cpsid i
+        __set_BASEPRI(0);
+    } else if (pri >= 256) {
+        __set_BASEPRI(0);
+        __enable_irq();
+    } else {
+        uint32_t _pri = pri & arm_cm_irq_pri_mask;
+
+        if (_pri == 0)
+            __set_BASEPRI(1 << (8 - arm_cm_num_irq_pri_bits));
+        else
+            __set_BASEPRI(_pri);
+        __enable_irq(); // cpsie i
+    }
+}
+#endif
+
+
+void arm_cm_irq_entry(void)
+{
+    // Set PRIMASK to 1
+    // This is so that later calls to arch_ints_disabled() returns true while we're inside the int handler
+    // Note: this will probably screw up future efforts to stack higher priority interrupts since we're setting
+    // the cpu to essentially max interrupt priority here. Will have to rethink it then.
+    __disable_irq();
+
+    THREAD_STATS_INC(interrupts);
+    KEVLOG_IRQ_ENTER(__get_IPSR());
+
+    target_set_debug_led(1, true);
+}
+
+void arm_cm_irq_exit(bool reschedule)
+{
+    target_set_debug_led(1, false);
+
+    if (reschedule)
+        arm_cm_trigger_preempt();
+
+    KEVLOG_IRQ_EXIT(__get_IPSR());
+    
+    __enable_irq(); // clear PRIMASK
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
diff --git a/src/bsp/lk/arch/arm/arm-m/cache.c b/src/bsp/lk/arch/arm/arm-m/cache.c
new file mode 100644
index 0000000..4699fbd
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/cache.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/arm.h>
+#include <kernel/thread.h>
+#include <kernel/debug.h>
+#include <platform.h>
+#include <arch/arm/cm.h>
+
+#if ARM_WITH_CACHE
+
+/* cache flushing routines for cortex-m cores that support it */
+
+void arch_disable_cache(uint flags)
+{
+    if (flags & DCACHE)
+        SCB_DisableDCache();
+
+    if (flags & ICACHE)
+        SCB_DisableICache();
+}
+
+void arch_enable_cache(uint flags)
+{
+    if (flags & DCACHE)
+        SCB_EnableDCache();
+
+    if (flags & ICACHE)
+        SCB_EnableICache();
+}
+
+/* clean (writeback) data in the data cache on the range */
+void arch_clean_cache_range(addr_t start, size_t len)
+{
+    addr_t end = start + len;
+
+    /* align the start address on CACHE_LINE boundary */
+    start &= ~(CACHE_LINE - 1);
+
+    SCB_CleanDCache_by_Addr((uint32_t *)start, end - start);
+}
+
+/* clean (writeback) and then evict data from the data cache on the range */
+void arch_clean_invalidate_cache_range(addr_t start, size_t len)
+{
+    addr_t end = start + len;
+
+    /* align the start address on CACHE_LINE boundary */
+    start &= ~(CACHE_LINE - 1);
+
+    SCB_CleanInvalidateDCache_by_Addr((uint32_t *)start, end - start);
+}
+
+/* evict data from the data cache on the range */
+void arch_invalidate_cache_range(addr_t start, size_t len)
+{
+    addr_t end = start + len;
+
+    /* align the start address on CACHE_LINE boundary */
+    start &= ~(CACHE_LINE - 1);
+
+    SCB_InvalidateDCache_by_Addr((uint32_t *)start, end - start);
+}
+
+/*
+ * clean (writeback) data on the range and then throw away the instruction cache,
+ * ensuring that new instructions fetched from the range are not stale.
+ */
+void arch_sync_cache_range(addr_t start, size_t len)
+{
+    /* flush the dcache and invalidate the icache, ensuring fresh instructions */
+    arch_clean_cache_range(start, len);
+    SCB_InvalidateICache();
+}
+
+#else
+
+/* doesn't support cache flush, just nop */
+
+void arch_disable_cache(uint flags)
+{
+}
+
+void arch_enable_cache(uint flags)
+{
+}
+
+/* clean (writeback) data in the data cache on the range */
+void arch_clean_cache_range(addr_t start, size_t len)
+{
+}
+
+/* clean (writeback) and then evict data from the data cache on the range */
+void arch_clean_invalidate_cache_range(addr_t start, size_t len)
+{
+}
+
+/* evict data from the data cache on the range */
+void arch_invalidate_cache_range(addr_t start, size_t len)
+{
+}
+
+/*
+ * clean (writeback) data on the range and then throw away the instruction cache,
+ * ensuring that new instructions fetched from the range are not stale.
+ */
+void arch_sync_cache_range(addr_t start, size_t len)
+{
+}
+
+#endif // !ARM_WITH_CACHE
+
diff --git a/src/bsp/lk/arch/arm/arm-m/exceptions.c b/src/bsp/lk/arch/arm/arm-m/exceptions.c
new file mode 100644
index 0000000..d58d2bd
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/exceptions.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2012-2013 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <stdio.h>
+#include <compiler.h>
+#include <stdint.h>
+#include <kernel/thread.h>
+#include <arch/arm/cm.h>
+#include <platform.h>
+
+static void dump_frame(const struct arm_cm_exception_frame *frame)
+{
+
+    printf("exception frame at %p\n", frame);
+    printf("\tr0  0x%08x r1  0x%08x r2  0x%08x r3 0x%08x r4 0x%08x\n",
+           frame->r0, frame->r1, frame->r2, frame->r3, frame->r4);
+    printf("\tr5  0x%08x r6  0x%08x r7  0x%08x r8 0x%08x r9 0x%08x\n",
+           frame->r5, frame->r6, frame->r7, frame->r8, frame->r9);
+    printf("\tr10 0x%08x r11 0x%08x r12 0x%08x\n",
+           frame->r10, frame->r11, frame->r12);
+    printf("\tlr  0x%08x pc  0x%08x psr 0x%08x\n",
+           frame->lr, frame->pc, frame->psr);
+}
+
+static void hardfault(struct arm_cm_exception_frame *frame)
+{
+    printf("hardfault: ");
+    dump_frame(frame);
+
+#if     (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
+    printf("HFSR 0x%x\n", SCB->HFSR);
+#endif
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+
+static void memmanage(struct arm_cm_exception_frame *frame)
+{
+    printf("memmanage: ");
+    dump_frame(frame);
+
+#if     (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
+    uint32_t mmfsr = SCB->CFSR & 0xff;
+
+    if (mmfsr & (1<<0)) { // IACCVIOL
+        printf("instruction fault\n");
+    }
+    if (mmfsr & (1<<1)) { // DACCVIOL
+        printf("data fault\n");
+    }
+    if (mmfsr & (1<<3)) { // MUNSTKERR
+        printf("fault on exception return\n");
+    }
+    if (mmfsr & (1<<4)) { // MSTKERR
+        printf("fault on exception entry\n");
+    }
+    if (mmfsr & (1<<5)) { // MLSPERR
+        printf("fault on lazy fpu preserve\n");
+    }
+    if (mmfsr & (1<<7)) { // MMARVALID
+        printf("fault address 0x%x\n", SCB->MMFAR);
+    }
+#endif
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+
+
+static void usagefault(struct arm_cm_exception_frame *frame)
+{
+    printf("usagefault: ");
+    dump_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+
+static void busfault(struct arm_cm_exception_frame *frame)
+{
+    printf("busfault: ");
+    dump_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+
+/* raw exception vectors */
+
+void _nmi(void)
+{
+    printf("nmi\n");
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+#if     (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
+
+__NAKED void _hardfault(void)
+{
+    __asm__ volatile(
+        "push	{r4-r11};"
+        "mov	r0, sp;"
+        "b		%0;"
+        :: "i" (hardfault)
+    );
+    __UNREACHABLE;
+}
+
+void _memmanage(void)
+{
+    __asm__ volatile(
+        "push	{r4-r11};"
+        "mov	r0, sp;"
+        "b		%0;"
+        :: "i" (memmanage)
+    );
+    __UNREACHABLE;
+}
+
+void _busfault(void)
+{
+    __asm__ volatile(
+        "push	{r4-r11};"
+        "mov	r0, sp;"
+        "b		%0;"
+        :: "i" (busfault)
+    );
+    __UNREACHABLE;
+}
+
+void _usagefault(void)
+{
+    __asm__ volatile(
+        "push	{r4-r11};"
+        "mov	r0, sp;"
+        "b		%0;"
+        :: "i" (usagefault)
+    );
+    __UNREACHABLE;
+}
+#else
+
+__NAKED void _hardfault(void)
+{
+    struct arm_cm_exception_frame * frame;
+    __asm__ volatile(
+        "push	{r4-r7};"
+        "mov   r4, r8;"
+        "mov   r5, r9;"
+        "mov   r6, r10;"
+        "mov   r7, r11;"
+        "push   {r4-r7};"
+        "mov	%0, sp;"
+        : "=r" (frame):
+    );
+
+    printf("hardfault: ");
+    dump_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    __UNREACHABLE;
+}
+
+void _memmanage(void)
+{
+    struct arm_cm_exception_frame * frame;
+    __asm__ volatile(
+        "push	{r4-r7};"
+        "mov   r4, r8;"
+        "mov   r5, r9;"
+        "mov   r6, r10;"
+        "mov   r7, r11;"
+        "push   {r4-r7};"
+        "mov	%0, sp;"
+        : "=r" (frame):
+    );
+    printf("memmanage: ");
+    dump_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    __UNREACHABLE;
+}
+
+void _busfault(void)
+{
+    struct arm_cm_exception_frame * frame;
+    __asm__ volatile(
+        "push	{r4-r7};"
+        "mov   r4, r8;"
+        "mov   r5, r9;"
+        "mov   r6, r10;"
+        "mov   r7, r11;"
+        "push   {r4-r7};"
+        "mov	%0, sp;"
+        : "=r" (frame):
+    );
+    printf("busfault: ");
+    dump_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    __UNREACHABLE;
+}
+
+void _usagefault(void)
+{
+    struct arm_cm_exception_frame * frame;
+    __asm__ volatile(
+        "push	{r4-r7};"
+        "mov   r4, r8;"
+        "mov   r5, r9;"
+        "mov   r6, r10;"
+        "mov   r7, r11;"
+        "push   {r4-r7};"
+        "mov	%0, sp;"
+        : "=r" (frame):
+    );
+    printf("usagefault: ");
+    dump_frame(frame);
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    __UNREACHABLE;
+}
+#endif
+/* systick handler */
+void __WEAK _systick(void)
+{
+    printf("systick\n");
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
+
+void __WEAK _debugmonitor(void)
+{
+    printf("debugmonitor\n");
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+}
diff --git a/src/bsp/lk/arch/arm/arm-m/include/arch/arch_thread.h b/src/bsp/lk/arch/arm/arm-m/include/arch/arch_thread.h
new file mode 100644
index 0000000..4e2b839
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/include/arch/arch_thread.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2008-2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARM_M_ARCH_THREAD_H
+#define __ARM_M_ARCH_THREAD_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+struct arch_thread {
+    vaddr_t sp;
+    bool was_preempted;
+};
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/arm-m/include/arch/arm/cm.h b/src/bsp/lk/arch/arm/arm-m/include/arch/arm/cm.h
new file mode 100644
index 0000000..d3db3ac
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/include/arch/arm/cm.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2012-2013 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_ARM_CM_H
+#define __ARCH_ARM_CM_H
+
+/* support header for all cortex-m class cpus */
+
+#include <compiler.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <platform/platform_cm.h>
+
+#if ARM_CPU_CORTEX_M0
+#include <core_cm0.h>
+#elif ARM_CPU_CORTEX_M0_PLUS
+#include <core_cm0plus.h>
+#elif ARM_CPU_CORTEX_M3
+#include <core_cm3.h>
+#elif ARM_CPU_CORTEX_M4
+#include <core_cm4.h>
+#elif ARM_CPU_CORTEX_M7
+#include <core_cm7.h>
+#else
+#error "unknown cortex-m core"
+#endif
+
+/* registers dealing with the cycle counter */
+#define DWT_CTRL (0xE0001000)
+#define DWT_CYCCNT (0xE0001004)
+#define SCB_DEMCR (0xE000EDFC)
+
+struct arm_cm_exception_frame {
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    uint32_t r11;
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r12;
+    uint32_t lr;
+    uint32_t pc;
+    uint32_t psr;
+};
+
+struct arm_cm_exception_frame_short {
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r12;
+    uint32_t lr;
+    uint32_t pc;
+    uint32_t psr;
+};
+
+struct arm_cm_exception_frame_long {
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    uint32_t r11;
+    uint32_t lr;
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r12;
+    uint32_t exc_lr;
+    uint32_t pc;
+    uint32_t psr;
+};
+
+#if ARM_CM_DYNAMIC_PRIORITY_SIZE
+extern unsigned int arm_cm_num_irq_pri_bits;
+extern unsigned int arm_cm_irq_pri_mask;
+#else
+/* if we don't want to calculate the nubmer of priority bits, then assume
+ * the cpu implements 3 (8 priority levels), which is the minimum according to spec.
+ */
+#ifndef __NVIC_PRIO_BITS
+#define __NVIC_PRIO_BITS 3
+#endif
+static const unsigned int arm_cm_num_irq_pri_bits = __NVIC_PRIO_BITS;
+static const unsigned int arm_cm_irq_pri_mask = ~((1 << __NVIC_PRIO_BITS) - 1) & 0xff;
+#endif
+
+#if     (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300)
+
+void _arm_cm_set_irqpri(uint32_t pri);
+
+static void arm_cm_set_irqpri(uint32_t pri)
+{
+    if (__ISCONSTANT(pri)) {
+        if (pri == 0) {
+            __disable_irq(); // cpsid i
+            __set_BASEPRI(0);
+        } else if (pri >= 256) {
+            __set_BASEPRI(0);
+            __enable_irq();
+        } else {
+            uint32_t _pri = pri & arm_cm_irq_pri_mask;
+
+            if (_pri == 0)
+                __set_BASEPRI(1 << (8 - arm_cm_num_irq_pri_bits));
+            else
+                __set_BASEPRI(_pri);
+            __enable_irq(); // cpsie i
+        }
+    } else {
+        _arm_cm_set_irqpri(pri);
+    }
+}
+#endif
+
+static inline uint32_t arm_cm_highest_priority(void)
+{
+    return 0;
+}
+
+static inline uint32_t arm_cm_lowest_priority(void)
+{
+    return (1 << arm_cm_num_irq_pri_bits) - 1;
+}
+
+static inline uint32_t arm_cm_medium_priority(void)
+{
+    return (1 << (arm_cm_num_irq_pri_bits - 1));
+}
+
+#if     (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300)
+static inline void arm_cm_trigger_interrupt(int vector)
+{
+    NVIC->STIR = vector;
+}
+#endif
+
+
+static inline void arm_cm_trigger_preempt(void)
+{
+    SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
+}
+
+
+
+/* systick */
+void arm_cm_systick_init(uint32_t mhz);
+/* extern void _systick(void); // override this */
+
+/* interrupt glue */
+/*
+ * Platform code should put this as the first and last line of their irq handlers.
+ * Pass true to reschedule to request a preempt.
+ */
+void arm_cm_irq_entry(void);
+void arm_cm_irq_exit(bool reschedule);
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/arm-m/spin_cycles.c b/src/bsp/lk/arch/arm/arm-m/spin_cycles.c
new file mode 100644
index 0000000..01e07f0
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/spin_cycles.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013 Google Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <compiler.h>
+#include <kernel/debug.h>
+
+__ALIGNED(8) __NAKED
+#if     (__CORTEX_M >= 0x03) || (CORTEX_SC >= 300)
+
+void spin_cycles(uint32_t cycles)
+{
+    asm (
+        /* 4 cycles per loop, subtract out 8 cycles for the overhead of the next
+         * 4 instructions, plus the call into and return from the function.
+         * Then, add 3 then >> 2 to round up to the number of loop iterations.
+         */
+        "subs r1, %[cycles], #5\n"
+        "asrs r1, r1, #2\n"
+        "ble .Ldone\n"
+
+        /* Padding to stay aligned on an 8 byte boundary, also has the added
+         * advantage of normalizing the overhead (1+1+2 cycles if the branch is
+         * take, or 1+1+1+1 cycles if the branch is skipped and the nop is
+         * executed)
+         */
+        "nop\n"
+
+        /* Main delay loop.
+         * sub is 1 cycle
+         * nop is 1 cycle
+         * branch is 2 cycles
+         */
+        ".Lloop:\n"
+        "subs r1, r1, #1\n"
+        "nop\n"
+        "bne .Lloop\n"
+
+        ".Ldone:\n"
+        "bx lr\n"
+        :                       /* no output */
+        : [cycles] "r" (cycles) /* input is cycles */
+        : "r1"                  /* r1 gets clobbered */
+    );
+}
+
+#else
+/* Cortex-M0 & Cortex-M0+    */
+void spin_cycles(uint32_t cycles)
+{
+    asm (
+        /* 4 cycles per loop, subtract out 8 cycles for the overhead of the next
+         * 4 instructions, plus the call into and return from the function.
+         * Then, add 3 then >> 2 to round up to the number of loop iterations.
+         */
+        "sub r1, %[cycles], #5\n"
+        "asr r1, r1, #2\n"
+        "cmp r1, #0\n"
+        "ble .Ldone\n"
+
+        /* Padding to stay aligned on an 8 byte boundary, also has the added
+         * advantage of normalizing the overhead (1+1+2 cycles if the branch is
+         * take, or 1+1+1+1 cycles if the branch is skipped and the nop is
+         * executed)
+         */
+        "nop\n"
+
+        /* Main delay loop.
+         * sub is 1 cycle
+         * nop is 1 cycle
+         * branch is 2 cycles
+         */
+        ".Lloop:\n"
+        "sub r1, r1, #1\n"
+        "cmp r1,#0\n"
+        "bne .Lloop\n"
+
+        ".Ldone:\n"
+        "bx lr\n"
+        :                       /* no output */
+        : [cycles] "r" (cycles) /* input is cycles */
+        : "r1"                  /* r1 gets clobbered */
+    );
+}
+#endif
diff --git a/src/bsp/lk/arch/arm/arm-m/start.c b/src/bsp/lk/arch/arm/arm-m/start.c
new file mode 100644
index 0000000..323231b
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/start.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <compiler.h>
+#include <stdint.h>
+
+/* externals */
+extern unsigned int __data_start_rom, __data_start, __data_end;
+extern unsigned int __bss_start, __bss_end;
+
+extern void lk_main(void) __NO_RETURN __EXTERNALLY_VISIBLE;
+
+void _start(void)
+{
+    /* copy data from rom */
+    if (&__data_start != &__data_start_rom) {
+        unsigned int *src = &__data_start_rom;
+        unsigned int *dest = &__data_start;
+
+        while (dest != &__data_end)
+            *dest++ = *src++;
+    }
+
+    /* zero out bss */
+    unsigned int *bss = &__bss_start;
+    while (bss != &__bss_end)
+        *bss++ = 0;
+
+    lk_main();
+}
diff --git a/src/bsp/lk/arch/arm/arm-m/systick/rules.mk b/src/bsp/lk/arch/arm/arm-m/systick/rules.mk
new file mode 100644
index 0000000..8e9c116
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/systick/rules.mk
@@ -0,0 +1,8 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/systick.c
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/arm/arm-m/systick/systick.c b/src/bsp/lk/arch/arm/arm-m/systick/systick.c
new file mode 100644
index 0000000..b4fdda5
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/systick/systick.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2012-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Generic systick timer support for providing system time (current_time(), current_time_hires()),
+ * and a monotonic timer for the kernel.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <assert.h>
+#include <trace.h>
+#include <err.h>
+#include <kernel/thread.h>
+#include <arch/arm.h>
+#include <arch/arm/cm.h>
+#include <platform.h>
+#include <platform/timer.h>
+
+#define LOCAL_TRACE 0
+
+static volatile uint64_t ticks;
+static uint32_t tick_rate = 0;
+static uint32_t tick_rate_mhz = 0;
+static lk_time_t tick_interval_ms;
+static lk_bigtime_t tick_interval_us;
+
+static platform_timer_callback cb;
+static void *cb_args;
+
+static void arm_cm_systick_set_periodic(lk_time_t period)
+{
+    LTRACEF("clk_freq %u, period %u\n", tick_rate, (uint)period);
+
+    uint32_t ticks = tick_rate / (1000 / period);
+    LTRACEF("ticks %d\n", ticks);
+
+    SysTick->LOAD = (ticks & SysTick_LOAD_RELOAD_Msk) - 1;
+    SysTick->VAL = 0;
+    SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | SysTick_CTRL_TICKINT_Msk | SysTick_CTRL_ENABLE_Msk;
+}
+
+static void arm_cm_systick_cancel_periodic(void)
+{
+    SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
+}
+
+/* main systick irq handler */
+void _systick(void)
+{
+    ticks++;
+
+    arm_cm_irq_entry();
+
+    bool resched = false;
+    if (cb) {
+        lk_time_t now = current_time();
+        if (cb(cb_args, now) == INT_RESCHEDULE)
+            resched = true;
+    }
+
+    arm_cm_irq_exit(resched);
+}
+
+status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval)
+{
+    LTRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
+
+    DEBUG_ASSERT(tick_rate != 0 && tick_rate_mhz != 0);
+
+    cb = callback;
+    cb_args = arg;
+
+    tick_interval_ms = interval;
+    tick_interval_us = interval * 1000;
+    arm_cm_systick_set_periodic(interval);
+
+    return NO_ERROR;
+}
+
+lk_time_t current_time(void)
+{
+    uint32_t reload = SysTick->LOAD  & SysTick_LOAD_RELOAD_Msk;
+
+    uint64_t t;
+    uint32_t delta;
+    do {
+        t = ticks;
+        delta = (volatile uint32_t)SysTick->VAL;
+        DMB;
+    } while (ticks != t);
+
+    /* convert ticks to msec */
+    delta = (reload - delta) / (tick_rate_mhz * 1000);
+    lk_time_t res = (t * tick_interval_ms) + delta;
+
+    return res;
+}
+
+lk_bigtime_t current_time_hires(void)
+{
+    uint32_t reload = SysTick->LOAD  & SysTick_LOAD_RELOAD_Msk;
+
+    uint64_t t;
+    uint32_t delta;
+    do {
+        t = ticks;
+        delta = (volatile uint32_t)SysTick->VAL;
+        DMB;
+    } while (ticks != t);
+
+    /* convert ticks to usec */
+    delta = (reload - delta) / tick_rate_mhz;
+    lk_bigtime_t res = (t * tick_interval_us) + delta;
+
+    return res;
+}
+
+void arm_cm_systick_init(uint32_t mhz)
+{
+    tick_rate = mhz;
+    tick_rate_mhz = mhz / 1000000;
+}
diff --git a/src/bsp/lk/arch/arm/arm-m/thread.c b/src/bsp/lk/arch/arm/arm-m/thread.c
new file mode 100644
index 0000000..c0a1cc4
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/thread.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <trace.h>
+#include <assert.h>
+#include <kernel/thread.h>
+#include <arch/arm.h>
+#include <arch/arm/cm.h>
+
+#define LOCAL_TRACE 0
+
+struct arm_cm_context_switch_frame {
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    uint32_t r11;
+    uint32_t lr;
+};
+
+/* since we're implicitly uniprocessor, store a pointer to the current thread here */
+thread_t *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+    LTRACEF("thread %p calling %p with arg %p\n", _current_thread, _current_thread->entry, _current_thread->arg);
+#if LOCAL_TRACE
+    dump_thread(_current_thread);
+#endif
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    ret = _current_thread->entry(_current_thread->arg);
+
+    LTRACEF("thread %p exiting with %d\n", _current_thread, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(struct thread *t)
+{
+    LTRACEF("thread %p, stack %p\n", t, t->stack);
+
+    /* find the top of the stack and align it on an 8 byte boundary */
+    uint32_t *sp = (void *)ROUNDDOWN((vaddr_t)t->stack + t->stack_size, 8);
+
+    struct arm_cm_context_switch_frame *frame = (void *)sp;
+    frame--;
+
+    /* arrange for lr to point to our starting routine */
+    frame->lr = (uint32_t)&initial_thread_func;
+
+    t->arch.sp = (addr_t)frame;
+    t->arch.was_preempted = false;
+}
+
+volatile struct arm_cm_exception_frame_long *preempt_frame;
+
+static void pendsv(struct arm_cm_exception_frame_long *frame)
+{
+    arch_disable_ints();
+
+    LTRACEF("preempting thread %p (%s)\n", _current_thread, _current_thread->name);
+
+    /* save the iframe the pendsv fired on and hit the preemption code */
+    preempt_frame = frame;
+    thread_preempt();
+
+    LTRACEF("fell through\n");
+
+    /* if we got here, there wasn't anything to switch to, so just fall through and exit */
+    preempt_frame = NULL;
+
+    arch_enable_ints();
+}
+
+/*
+ * raw pendsv exception handler, triggered by interrupt glue to schedule
+ * a preemption check.
+ */
+__NAKED void _pendsv(void)
+{
+    __asm__ volatile(
+#if       (__CORTEX_M >= 0x03)
+
+        "push	{ r4-r11, lr };"
+        "mov	r0, sp;"
+        "bl		%0;"
+        "pop	{ r4-r11, lr };"
+        "bx		lr;"
+#else
+        "push   { lr };"
+        "mov    r0, r8;"
+        "mov    r1, r9;"
+        "mov    r2, r10;"
+        "mov    r3, r11;"
+        "push   { r0-r3 };"
+        "push   { r4-r7 };"
+        "mov	r0, sp;"
+        "bl     %c0;"
+        "pop    { r4-r7 };"
+        "pop    { r0-r3 };"
+        "mov    r8 , r0;"
+        "mov    r9 , r1;"
+        "mov    r10, r2;"
+        "mov    r11, r3;"
+        "pop    { r0 };"
+        "mov    lr, r0;"
+        "bx     lr;"
+#endif
+        :: "i" (pendsv)
+    );
+    __UNREACHABLE;
+}
+/*
+ * svc handler, used to hard switch the cpu into exception mode to return
+ * to preempted thread.
+ */
+__NAKED void _svc(void)
+{
+    __asm__ volatile(
+        /* load the pointer to the original exception frame we want to restore */
+#if       (__CORTEX_M >= 0x03)
+        "mov	sp, r4;"
+        "pop	{ r4-r11, lr };"
+        "bx		lr;"
+#else
+        "mov	sp, r4;"
+        "pop    { r4-r7 };"
+        "pop    { r0-r3 };"
+        "mov    r8 , r0;"
+        "mov    r9 , r1;"
+        "mov    r10, r2;"
+        "mov    r11, r3;"
+        "pop	{ pc };"
+#endif
+    );
+}
+
+__NAKED static void _half_save_and_svc(vaddr_t *fromsp, vaddr_t tosp)
+{
+    __asm__ volatile(
+#if       (__CORTEX_M >= 0x03)
+
+        "push	{ r4-r11, lr };"
+        "str	sp, [r0];"
+
+        /* make sure we load the destination sp here before we reenable interrupts */
+        "mov	sp, r1;"
+
+        "clrex;"
+        "cpsie 	i;"
+
+        "mov	r4, r1;"
+        "svc #0;" /* make a svc call to get us into handler mode */
+
+#else
+        "push   { lr };"
+        "mov    r2, r10;"
+        "mov    r3, r11;"
+        "push   { r2-r3 };"
+        "mov    r2, r8;"
+        "mov    r3, r9;"
+        "push   { r2-r3 };"
+        "push   { r4-r7 };"
+
+        "mov    r3, sp;"
+        "str	r3, [r0];"
+        "mov	sp, r1;"
+        "cpsie 	i;"
+
+        "mov	r4, r1;"
+        "svc #0;"           /* make a svc call to get us into handler mode */
+#endif
+    );
+}
+
+/* simple scenario where the to and from thread yielded */
+__NAKED static void _arch_non_preempt_context_switch(vaddr_t *fromsp, vaddr_t tosp)
+{
+    __asm__ volatile(
+#if       (__CORTEX_M >= 0x03)
+        "push	{ r4-r11, lr };"
+        "str	sp, [r0];"
+
+        "mov	sp, r1;"
+        "pop	{ r4-r11, lr };"
+        "clrex;"
+        "bx		lr;"
+#else
+        "push   { lr };"
+        "mov    r2, r10;"
+        "mov    r3, r11;"
+        "push   { r2-r3 };"
+        "mov    r2, r8;"
+        "mov    r3, r9;"
+        "push   { r2-r3 };"
+        "push   { r4-r7 };"
+
+        "mov    r3, sp;"
+        "str	r3, [r0];"
+        "mov	sp, r1;"
+
+        "pop    { r4-r7 };"
+        "pop    { r0-r3 };"
+        "mov    r8 , r0;"
+        "mov    r9 , r1;"
+        "mov    r10, r2;"
+        "mov    r11, r3;"
+        "pop    { pc };"
+#endif
+    );
+}
+
+__NAKED static void _thread_mode_bounce(void)
+{
+    __asm__ volatile(
+#if       (__CORTEX_M >= 0x03)
+        "pop	{ r4-r11, lr };"
+        "bx		lr;"
+#else
+        "pop    { r4-r7 };"
+        "pop    { r0-r3 };"
+        "mov    r8 , r0;"
+        "mov    r9 , r1;"
+        "mov    r10, r2;"
+        "mov    r11, r3;"
+        "pop    { pc };"
+#endif
+    );
+    __UNREACHABLE;
+}
+
+/*
+ * The raw context switch routine. Called by the scheduler when it decides to switch.
+ * Called either in the context of a thread yielding or blocking (interrupts disabled,
+ * on the system stack), or inside the pendsv handler on a thread that is being preempted
+ * (interrupts disabled, in handler mode). If preempt_frame is set the thread
+ * is being preempted.
+ */
+void arch_context_switch(struct thread *oldthread, struct thread *newthread)
+{
+    LTRACE_ENTRY;
+
+    /* if preempt_frame is set, we are being preempted */
+    if (preempt_frame) {
+        oldthread->arch.was_preempted = true;
+        oldthread->arch.sp = (addr_t)preempt_frame;
+        preempt_frame = NULL;
+
+        LTRACEF("we're preempted, new %d\n", newthread->arch.was_preempted);
+        if (newthread->arch.was_preempted) {
+            /* return directly to the preempted thread's iframe */
+            __asm__ volatile(
+                "mov	sp, %0;"
+#if       (__CORTEX_M >= 0x03)
+                "cpsie	i;"
+                "pop	{ r4-r11, lr };"
+                "clrex;"
+                "bx		lr;"
+#else
+                "cpsie	i;"
+                "pop    { r4-r7 };"
+                "pop    { r0-r3 };"
+                "mov    r8 , r0;"
+                "mov    r9 , r1;"
+                "mov    r10, r2;"
+                "mov    r11, r3;"
+                "pop    { pc };"
+#endif
+                :: "r"(newthread->arch.sp)
+            );
+            __UNREACHABLE;
+        } else {
+            /* we're inside a pendsv, switching to a user mode thread */
+            /* set up a fake frame to exception return to */
+            struct arm_cm_exception_frame_short *frame = (void *)newthread->arch.sp;
+            frame--;
+
+            frame->pc = (uint32_t)&_thread_mode_bounce;
+            frame->psr = (1 << 24); /* thread bit set, IPSR 0 */
+            frame->r0 = frame->r1 =  frame->r2 = frame->r3 = frame->r12 = frame->lr = 99;
+
+            LTRACEF("iretting to user space\n");
+            //hexdump(frame, sizeof(*frame) + 64);
+
+            __asm__ volatile(
+#if       (__CORTEX_M >= 0x03)
+		"clrex;"
+#endif
+                "mov	sp, %0;"
+                "bx		%1;"
+                :: "r"(frame), "r"(0xfffffff9)
+            );
+            __UNREACHABLE;
+        }
+    } else {
+        oldthread->arch.was_preempted = false;
+
+        if (newthread->arch.was_preempted) {
+            LTRACEF("not being preempted, but switching to preempted thread\n");
+            _half_save_and_svc(&oldthread->arch.sp, newthread->arch.sp);
+        } else {
+            /* fast path, both sides did not preempt */
+            _arch_non_preempt_context_switch(&oldthread->arch.sp, newthread->arch.sp);
+        }
+    }
+
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx, was preempted %u\n", t->arch.sp, t->arch.was_preempted);
+    }
+}
+
+
diff --git a/src/bsp/lk/arch/arm/arm-m/vectab.c b/src/bsp/lk/arch/arm/arm-m/vectab.c
new file mode 100644
index 0000000..690c55a
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm-m/vectab.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <compiler.h>
+#include <stdint.h>
+
+/*
+ * Make a nice 8 byte aligned stack to run on before the threading system is up.
+ * Put it in the .bss.prebss.* section to make sure it doesn't get wiped
+ * when bss is cleared a little ways into boot.
+ */
+static uint8_t initial_stack[1024] __SECTION(".bss.prebss.initial_stack") __ALIGNED(8);
+
+extern void _start(void);
+extern void _nmi(void);
+extern void _hardfault(void);
+extern void _memmanage(void);
+extern void _busfault(void);
+extern void _usagefault(void);
+extern void _svc(void);
+extern void _debugmonitor(void);
+extern void _pendsv(void);
+extern void _systick(void);
+
+#if defined(WITH_DEBUGGER_INFO)
+extern struct __debugger_info__ _debugger_info;
+#endif
+
+const void * const __SECTION(".text.boot.vectab1") vectab[] = {
+    /* arm exceptions */
+    initial_stack + sizeof(initial_stack),
+    _start,
+    _nmi, // nmi
+    _hardfault, // hard fault
+    _memmanage, // mem manage
+    _busfault, // bus fault
+    _usagefault, // usage fault
+    0, // reserved
+#if defined(WITH_DEBUGGER_INFO)
+    (void*) 0x52474244,
+    &_debugger_info,
+#else
+    0, // reserved
+    0, // reserved
+#endif
+    0, // reserved
+    _svc, // svcall
+    _debugmonitor, // debug monitor
+    0, // reserved
+    _pendsv, // pendsv
+    _systick, // systick
+};
+
+
+
diff --git a/src/bsp/lk/arch/arm/arm/arch.c b/src/bsp/lk/arch/arm/arm/arch.c
new file mode 100644
index 0000000..607c0f6
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/arch.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2008-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <compiler.h>
+#include <debug.h>
+#include <trace.h>
+#include <stdlib.h>
+#include <err.h>
+#include <trace.h>
+#include <stdio.h>
+#include <reg.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/mmu.h>
+#include <arch/arm.h>
+#include <arch/arm/mmu.h>
+#include <arch/mp.h>
+#include <kernel/spinlock.h>
+#include <kernel/thread.h>
+#include <lk/main.h>
+#include <lk/init.h>
+#include <platform.h>
+#include <target.h>
+#include <kernel/thread.h>
+
+#define LOCAL_TRACE 0
+
+#if WITH_DEV_TIMER_ARM_CORTEX_A9
+#include <dev/timer/arm_cortex_a9.h>
+#endif
+#if WITH_DEV_INTERRUPT_ARM_GIC
+#include <dev/interrupt/arm_gic.h>
+#endif
+#if WITH_DEV_CACHE_PL310
+#include <dev/cache/pl310.h>
+#endif
+
+/* initial and abort stacks */
+uint8_t abort_stack[ARCH_DEFAULT_STACK_SIZE * SMP_MAX_CPUS] __CPU_ALIGN;
+
+static void arm_basic_setup(void);
+static void spinlock_test(void);
+static void spinlock_test_secondary(void);
+
+#if WITH_SMP
+/* smp boot lock */
+spin_lock_t arm_boot_cpu_lock = 1;
+volatile int secondaries_to_init = 0;
+__WEAK const uint8_t *linear_cpuid_map = NULL;
+#endif
+
+void arch_early_init(void)
+{
+    /* turn off the cache */
+    arch_disable_cache(UCACHE);
+#if WITH_DEV_CACHE_PL310
+    pl310_set_enable(false);
+#endif
+
+    arm_basic_setup();
+
+#if WITH_SMP && ARM_CPU_CORTEX_A9
+    /* enable snoop control */
+    addr_t scu_base = arm_read_cbar();
+    *REG32(scu_base) |= (1<<0); /* enable SCU */
+#endif
+
+#if ARM_WITH_MMU
+    arm_mmu_early_init();
+
+    platform_init_mmu_mappings();
+#endif
+
+    /* turn the cache back on */
+#if WITH_DEV_CACHE_PL310
+    pl310_set_enable(true);
+#endif
+    arch_enable_cache(UCACHE);
+}
+
+void arch_init(void)
+{
+#if WITH_SMP
+    arch_mp_init_percpu();
+
+    LTRACEF("midr 0x%x\n", arm_read_midr());
+    LTRACEF("sctlr 0x%x\n", arm_read_sctlr());
+    LTRACEF("actlr 0x%x\n", arm_read_actlr());
+#if ARM_CPU_CORTEX_A9
+    LTRACEF("cbar 0x%x\n", arm_read_cbar());
+#endif
+    LTRACEF("mpidr 0x%x\n", arm_read_mpidr());
+    LTRACEF("ttbcr 0x%x\n", arm_read_ttbcr());
+    LTRACEF("ttbr0 0x%x\n", arm_read_ttbr0());
+    LTRACEF("dacr 0x%x\n", arm_read_dacr());
+#if ARM_CPU_CORTEX_A7
+    LTRACEF("l2ctlr 0x%x\n", arm_read_l2ctlr());
+    LTRACEF("l2ectlr 0x%x\n", arm_read_l2ectlr());
+#endif
+
+#if ARM_CPU_CORTEX_A9
+    addr_t scu_base = arm_read_cbar();
+    uint32_t scu_config = *REG32(scu_base + 4);
+    secondaries_to_init = scu_config & 0x3;
+#elif ARM_CPU_CORTEX_A7 || ARM_CPU_CORTEX_A15
+    uint32_t l2ctlr = arm_read_l2ctlr();
+    secondaries_to_init = (l2ctlr >> 24);
+#else
+    secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
+#endif
+
+    lk_init_secondary_cpus(secondaries_to_init);
+
+    /* in platforms where the cpus have already been started, go ahead and wake up all the
+     * secondary cpus here.
+     */
+    dprintf(SPEW, "releasing %d secondary cpu%c\n", secondaries_to_init, secondaries_to_init != 1 ? 's' : ' ');
+
+    /* release the secondary cpus */
+    spin_unlock(&arm_boot_cpu_lock);
+
+    /* flush the release of the lock, since the secondary cpus are running without cache on */
+    arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock));
+
+#if ARM_ARCH_WAIT_FOR_SECONDARIES
+    /* wait for secondary cpus to boot before arm_mmu_init below, which will remove
+     * temporary boot mappings
+     * TODO: find a cleaner way to do this than this #define
+     */
+    while (secondaries_to_init > 0) {
+        __asm__ volatile("wfe");
+    }
+#endif
+#endif // WITH_SMP
+
+    //spinlock_test();
+
+#if ARM_WITH_MMU
+    /* finish intializing the mmu */
+    arm_mmu_init();
+#endif
+}
+
+#if WITH_SMP
+void arm_secondary_entry(uint asm_cpu_num)
+{
+    uint cpu = arch_curr_cpu_num();
+    if (cpu != asm_cpu_num)
+        return;
+
+    arm_basic_setup();
+
+    /* enable the local L1 cache */
+    //arch_enable_cache(UCACHE);
+
+    // XXX may not be safe, but just hard enable i and d cache here
+    // at the moment cannot rely on arch_enable_cache not dumping the L2
+    uint32_t sctlr = arm_read_sctlr();
+    sctlr |= (1<<12) | (1<<2); // enable i and dcache
+    arm_write_sctlr(sctlr);
+
+    /* run early secondary cpu init routines up to the threading level */
+    lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
+
+    arch_mp_init_percpu();
+
+    LTRACEF("cpu num %d\n", cpu);
+    LTRACEF("sctlr 0x%x\n", arm_read_sctlr());
+    LTRACEF("actlr 0x%x\n", arm_read_actlr());
+
+    /* we're done, tell the main cpu we're up */
+    atomic_add(&secondaries_to_init, -1);
+    smp_mb();
+    __asm__ volatile("sev");
+
+    lk_secondary_cpu_entry();
+}
+#endif
+
+static void arm_basic_setup(void)
+{
+    uint32_t sctlr = arm_read_sctlr();
+
+    /* ARMV7 bits */
+    sctlr &= ~(1<<10); /* swp disable */
+    sctlr |=  (1<<11); /* enable program flow prediction */
+    sctlr &= ~(1<<14); /* random cache/tlb replacement */
+    sctlr &= ~(1<<25); /* E bit set to 0 on exception */
+    sctlr &= ~(1<<30); /* no thumb exceptions */
+
+    arm_write_sctlr(sctlr);
+
+    uint32_t actlr = arm_read_actlr();
+#if ARM_CPU_CORTEX_A9
+    actlr |= (1<<2); /* enable dcache prefetch */
+#if WITH_DEV_CACHE_PL310
+    actlr |= (1<<7); /* L2 exclusive cache */
+    actlr |= (1<<3); /* L2 write full line of zeroes */
+    actlr |= (1<<1); /* L2 prefetch hint enable */
+#endif
+#if WITH_SMP
+    /* enable smp mode, cache and tlb broadcast */
+    actlr |= (1<<6) | (1<<0);
+#endif
+#endif // ARM_CPU_CORTEX_A9
+#if ARM_CPU_CORTEX_A7
+#if WITH_SMP
+    /* enable smp mode */
+    actlr |= (1<<6);
+#endif
+#endif // ARM_CPU_CORTEX_A7
+
+    arm_write_actlr(actlr);
+
+#if ENABLE_CYCLE_COUNTER && ARM_ISA_ARMV7
+    /* enable the cycle count register */
+    uint32_t en;
+    __asm__ volatile("mrc	p15, 0, %0, c9, c12, 0" : "=r" (en));
+    en &= ~(1<<3); /* cycle count every cycle */
+    en |= 1; /* enable all performance counters */
+    __asm__ volatile("mcr	p15, 0, %0, c9, c12, 0" :: "r" (en));
+
+    /* enable cycle counter */
+    en = (1<<31);
+    __asm__ volatile("mcr	p15, 0, %0, c9, c12, 1" :: "r" (en));
+#endif
+
+#if ARM_WITH_VFP
+    /* enable cp10 and cp11 */
+    uint32_t val = arm_read_cpacr();
+    val |= (3<<22)|(3<<20);
+    arm_write_cpacr(val);
+
+    /* set enable bit in fpexc */
+    __asm__ volatile("mrc  p10, 7, %0, c8, c0, 0" : "=r" (val));
+    val |= (1<<30);
+    __asm__ volatile("mcr  p10, 7, %0, c8, c0, 0" :: "r" (val));
+
+    /* make sure the fpu starts off disabled */
+    arm_fpu_set_enable(false);
+#endif
+
+    /* set the vector base to our exception vectors so we dont need to double map at 0 */
+#if ARM_ISA_ARMV7
+    arm_write_vbar(KERNEL_BASE + KERNEL_LOAD_OFFSET);
+#endif
+}
+
+void arch_quiesce(void)
+{
+#if ENABLE_CYCLE_COUNTER
+#if ARM_ISA_ARMV7
+    /* disable the cycle count and performance counters */
+    uint32_t en;
+    __asm__ volatile("mrc	p15, 0, %0, c9, c12, 0" : "=r" (en));
+    en &= ~1; /* disable all performance counters */
+    __asm__ volatile("mcr	p15, 0, %0, c9, c12, 0" :: "r" (en));
+
+    /* disable cycle counter */
+    en = 0;
+    __asm__ volatile("mcr	p15, 0, %0, c9, c12, 1" :: "r" (en));
+#endif
+#if ARM_CPU_ARM1136
+    /* disable the cycle count and performance counters */
+    uint32_t en;
+    __asm__ volatile("mrc	p15, 0, %0, c15, c12, 0" : "=r" (en));
+    en &= ~1; /* disable all performance counters */
+    __asm__ volatile("mcr	p15, 0, %0, c15, c12, 0" :: "r" (en));
+#endif
+#endif
+
+    uint32_t actlr = arm_read_actlr();
+#if ARM_CPU_CORTEX_A9
+    actlr = 0; /* put the aux control register back to default */
+#endif // ARM_CPU_CORTEX_A9
+    arm_write_actlr(actlr);
+}
+
+#if ARM_ISA_ARMV7
+/* virtual to physical translation */
+status_t arm_vtop(addr_t va, addr_t *pa)
+{
+    spin_lock_saved_state_t irqstate;
+
+    arch_interrupt_save(&irqstate, SPIN_LOCK_FLAG_INTERRUPTS);
+
+    arm_write_ats1cpr(va & ~(PAGE_SIZE-1));
+    uint32_t par = arm_read_par();
+
+    arch_interrupt_restore(irqstate, SPIN_LOCK_FLAG_INTERRUPTS);
+
+    if (par & 1)
+        return ERR_NOT_FOUND;
+
+    if (pa) {
+        *pa = (par & 0xfffff000) | (va & 0xfff);
+    }
+
+    return NO_ERROR;
+}
+#endif
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    LTRACEF("entry %p, args 0x%lx 0x%lx 0x%lx 0x%lx\n", entry, arg0, arg1, arg2, arg3);
+
+    /* we are going to shut down the system, start by disabling interrupts */
+    arch_disable_ints();
+
+    /* give target and platform a chance to put hardware into a suitable
+     * state for chain loading.
+     */
+    target_quiesce();
+    platform_quiesce();
+
+    paddr_t entry_pa;
+    paddr_t loader_pa;
+
+#if WITH_KERNEL_VM
+    /* get the physical address of the entry point we're going to branch to */
+    if (arm_vtop((addr_t)entry, &entry_pa) < 0) {
+        panic("error translating entry physical address\n");
+    }
+
+    /* add the low bits of the virtual address back */
+    entry_pa |= ((addr_t)entry & 0xfff);
+
+    LTRACEF("entry pa 0x%lx\n", entry_pa);
+
+    /* figure out the mapping for the chain load routine */
+    if (arm_vtop((addr_t)&arm_chain_load, &loader_pa) < 0) {
+        panic("error translating loader physical address\n");
+    }
+
+    /* add the low bits of the virtual address back */
+    loader_pa |= ((addr_t)&arm_chain_load & 0xfff);
+
+    paddr_t loader_pa_section = ROUNDDOWN(loader_pa, SECTION_SIZE);
+
+    LTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n",
+            &arm_chain_load, loader_pa, loader_pa_section);
+
+    /* using large pages, map around the target location */
+    arch_mmu_map(loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
+#else
+    /* for non vm case, just branch directly into it */
+    entry_pa = (paddr_t)entry;
+    loader_pa = (paddr_t)&arm_chain_load;
+#endif
+
+    LTRACEF("disabling instruction/data cache\n");
+    arch_disable_cache(UCACHE);
+#if WITH_DEV_CACHE_PL310
+    pl310_set_enable(false);
+#endif
+
+    /* put the booting cpu back into close to a default state */
+    arch_quiesce();
+
+    LTRACEF("branching to physical address of loader\n");
+
+    /* branch to the physical address version of the chain loader routine */
+    void (*loader)(paddr_t entry, ulong, ulong, ulong, ulong) __NO_RETURN = (void *)loader_pa;
+    loader(entry_pa, arg0, arg1, arg2, arg3);
+}
+
+static spin_lock_t lock = 0;
+
+static void spinlock_test(void)
+{
+    TRACE_ENTRY;
+
+    spin_lock_saved_state_t state;
+    spin_lock_irqsave(&lock, state);
+
+    TRACEF("cpu0: i have the lock\n");
+    spin(1000000);
+    TRACEF("cpu0: releasing it\n");
+
+    spin_unlock_irqrestore(&lock, state);
+
+    spin(1000000);
+}
+
+static void spinlock_test_secondary(void)
+{
+    TRACE_ENTRY;
+
+    spin(500000);
+    spin_lock_saved_state_t state;
+    spin_lock_irqsave(&lock, state);
+
+    TRACEF("cpu1: i have the lock\n");
+    spin(250000);
+    TRACEF("cpu1: releasing it\n");
+
+    spin_unlock_irqrestore(&lock, state);
+}
+
+/* switch to user mode, set the user stack pointer to user_stack_top, put the svc stack pointer to the top of the kernel stack */
+void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top)
+{
+    DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 8));
+
+    thread_t *ct = get_current_thread();
+
+    vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
+    kernel_stack_top = ROUNDDOWN(kernel_stack_top, 8);
+
+    uint32_t spsr = CPSR_MODE_USR;
+    spsr |= (entry_point & 1) ? CPSR_THUMB : 0;
+
+    arch_disable_ints();
+
+    asm volatile(
+        "ldmia  %[ustack], { sp }^;"
+        "msr	spsr, %[spsr];"
+        "mov	sp, %[kstack];"
+        "movs	pc, %[entry];"
+        :
+        : [ustack]"r"(&user_stack_top),
+        [kstack]"r"(kernel_stack_top),
+        [entry]"r"(entry_point),
+        [spsr]"r"(spsr)
+        : "memory");
+    __UNREACHABLE;
+}
diff --git a/src/bsp/lk/arch/arm/arm/asm.S b/src/bsp/lk/arch/arm/arm/asm.S
new file mode 100644
index 0000000..bcfa0be
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/asm.S
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/arm/cores.h>
+
+    /* context switch frame is as follows:
+     * lr
+     * r11
+     * r10
+     * r9
+     * r8
+     * r7
+     * r6
+     * r5
+     * r4
+     */
+/* arm_context_switch(addr_t *old_sp, addr_t new_sp) */
+FUNCTION(arm_context_switch)
+    /* save non callee trashed supervisor registers */
+    /* spsr and user mode registers are saved and restored in the iframe by exceptions.S */
+    push    { r4-r11, lr }
+
+    /* save old sp */
+    str     sp, [r0]
+
+    /* clear any exlusive locks that the old thread holds */
+#if ARM_ARCH_LEVEL >= 7
+    /* can clear it directly */
+    clrex
+#elif ARM_ARCH_LEVEL == 6
+    /* have to do a fake strex to clear it */
+    ldr     r0, =strex_spot
+    strex   r3, r2, [r0]
+#endif
+
+    /* load new regs */
+    mov     sp, r1
+    pop     { r4-r11, lr }
+    bx      lr
+
+.ltorg
+
+#if ARM_ARCH_LEVEL == 6
+.data
+strex_spot:
+    .word   0
+#endif
+
+.text
+
+FUNCTION(arm_save_mode_regs)
+    mrs     r1, cpsr
+
+    stmia   r0, { r13, r14 }^ /* usr */
+    add     r0, #8
+
+    cps     #0x11   /* fiq */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    cps     #0x12   /* irq */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    cps     #0x13   /* svc */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    cps     #0x17   /* abt */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    cps     #0x1b   /* und */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    cps     #0x1f   /* sys */
+    str     r13, [r0], #4
+    str     r14, [r0], #4
+
+    msr     cpsr_c, r1
+
+    bx      lr
+
+.text
+
+/* void arm_chain_load(paddr_t entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) __NO_RETURN; */
+/* shut down the system, branching into the secondary system */
+FUNCTION(arm_chain_load)
+    /* shuffle the args around */
+    mov     r4, r0      /* r4 = entry point */
+    mov     r0, r1
+    mov     r1, r2
+    mov     r2, r3
+    ldr     r3, [sp]
+
+#if WITH_KERNEL_VM
+/* The MMU is initialized and running at this point, so we'll need to
+ * make sure we can disable it and continue to run. The caller should
+ * have built a identity map for us and branched to our identity mapping,
+ * so it will be safe to just disable the mmu and branch to the entry
+ * point in physical space.
+ */
+    /* Read SCTLR */
+    mrc     p15, 0, r12, c1, c0, 0
+
+    /* Turn off the MMU */
+    bic     r12, #0x1
+
+    /* Write back SCTLR */
+    mcr     p15, 0, r12, c1, c0, 0
+    isb
+
+#endif // WITH_KERNEL_VM
+
+    /* call the entry point */
+    bx      r4
diff --git a/src/bsp/lk/arch/arm/arm/cache-ops.S b/src/bsp/lk/arch/arm/arm/cache-ops.S
new file mode 100644
index 0000000..6ae02d4
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/cache-ops.S
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2008-2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/ops.h>
+#include <arch/defines.h>
+
+.text
+
+#if ARM_WITH_CACHE
+
+/* low level cache routines for various cpu families */
+
+#if ARM_CPU_ARM1136
+
+/* void arch_disable_cache(uint flags) */
+FUNCTION(arch_disable_cache)
+    mov     r12, #0                     // zero register
+    mrs     r3, cpsr                    // save the old interrupt state
+    cpsid   iaf                         // interrupts disabled
+
+.Ldcache_disable:
+    tst     r0, #DCACHE
+    beq     .Licache_disable
+    mrc     p15, 0, r1, c1, c0, 0       // cr1
+    tst     r1, #(1<<2)                 // is the dcache already disabled?
+    beq     .Licache_disable
+
+    bic     r1, #(1<<2)
+    mcr     p15, 0, r1, c1, c0, 0       // disable dcache
+
+    mcr     p15, 0, r12, c7, c14, 0     // clean & invalidate dcache
+    mcr     p15, 0, r0, c7, c10, 4      // data sync barrier (formerly drain write buffer)
+
+.Licache_disable:
+    tst     r0, #ICACHE
+    beq     .Ldone_disable
+
+    mrc     p15, 0, r1, c1, c0, 0       // cr1
+    bic     r1, #(1<<12)
+    mcr     p15, 0, r1, c1, c0, 0       // disable icache
+
+    mcr     p15, 0, r12, c7, c5, 0      // invalidate icache
+
+.Ldone_disable:
+    msr     cpsr, r3
+    bx      lr
+
+/* void arch_enable_cache(uint flags) */
+FUNCTION(arch_enable_cache)
+    mov     r12, #0                     // zero register
+    mrs     r3, cpsr                    // save the old interrupt state
+    cpsid   iaf                         // interrupts disabled
+
+.Ldcache_enable:
+    tst     r0, #DCACHE
+    beq     .Licache_enable
+    mrc     p15, 0, r1, c1, c0, 0       // cr1
+    tst     r1, #(1<<2)                 // is the dcache already enabled?
+    bne     .Licache_enable
+
+    mcr     p15, 0, r12, c7, c6, 0      // invalidate dcache
+
+    orr     r1, #(1<<2)
+    mcr     p15, 0, r1, c1, c0, 0       // enable dcache
+
+.Licache_enable:
+    tst     r0, #ICACHE
+    beq     .Ldone_enable
+
+    mcr     p15, 0, r12, c7, c5, 0      // invalidate icache
+
+    mrc     p15, 0, r1, c1, c0, 0       // cr1
+    orr     r1, #(1<<12)
+    mcr     p15, 0, r1, c1, c0, 0       // enable icache
+
+.Ldone_enable:
+    msr     cpsr, r3
+    bx      lr
+
+#elif ARM_ISA_ARMV7
+
+/* void arch_disable_cache(uint flags) */
+FUNCTION(arch_disable_cache)
+    stmfd   sp!, {r4-r11, lr}
+
+    mov     r7, r0                      // save flags
+
+    mrs     r8, cpsr                    // save the old interrupt state
+    cpsid   iaf                         // interrupts disabled
+
+.Ldcache_disable:
+    tst     r7, #DCACHE
+    beq     .Licache_disable
+    mrc     p15, 0, r0, c1, c0, 0       // cr1
+    tst     r0, #(1<<2)                 // is the dcache already disabled?
+    beq     .Ldcache_already_disabled
+
+    bic     r0, #(1<<2)
+    mcr     p15, 0, r0, c1, c0, 0       // disable dcache
+
+    // flush and invalidate the dcache
+    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
+    bl      flush_invalidate_cache_v7
+
+    b       .Ldcache_disable_L2
+
+.Ldcache_already_disabled:
+    // make sure all of the caches are invalidated
+    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
+    bl      invalidate_cache_v7
+
+.Ldcache_disable_L2:
+
+#if ARM_WITH_L2
+    // disable the L2, if present
+    mrc     p15, 0, r0, c1, c0, 1       // aux cr1
+    bic     r0, #(1<<1)
+    mcr     p15, 0, r0, c1, c0, 1       // disable L2 dcache
+#endif
+
+.Licache_disable:
+    tst     r7, #ICACHE
+    beq     .Ldone_disable
+
+    mrc     p15, 0, r0, c1, c0, 0       // cr1
+    bic     r0, #(1<<12)
+    mcr     p15, 0, r0, c1, c0, 0       // disable icache
+
+.Ldone_disable:
+    // make sure the icache is always invalidated
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c5, 0       // invalidate icache to PoU
+
+    msr     cpsr, r8
+    ldmfd   sp!, {r4-r11, pc}
+
+/* void arch_enable_cache(uint flags) */
+FUNCTION(arch_enable_cache)
+    stmfd   sp!, {r4-r12, lr}
+
+    mov     r7, r0                      // save flags
+
+    mrs     r8, cpsr                    // save the old interrupt state
+    cpsid   iaf                         // interrupts disabled
+
+.Ldcache_enable:
+    tst     r7, #DCACHE
+    beq     .Licache_enable
+    mrc     p15, 0, r0, c1, c0, 0       // cr1
+    tst     r0, #(1<<2)                 // is the dcache already enabled?
+    bne     .Licache_enable
+
+    // invalidate L1 and L2
+    // NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
+    bl      invalidate_cache_v7
+
+#if ARM_WITH_L2
+    // enable the L2, if present
+    mrc     p15, 0, r0, c1, c0, 1       // aux cr1
+    orr     r0, #(1<<1)
+    mcr     p15, 0, r0, c1, c0, 1       // enable L2 dcache
+#endif
+
+    mrc     p15, 0, r0, c1, c0, 0       // cr1
+    orr     r0, #(1<<2)
+    mcr     p15, 0, r0, c1, c0, 0       // enable dcache
+
+.Licache_enable:
+    tst     r7, #ICACHE
+    beq     .Ldone_enable
+
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c5, 0       // invalidate icache to PoU
+
+    mrc     p15, 0, r0, c1, c0, 0       // cr1
+    orr     r0, #(1<<12)
+    mcr     p15, 0, r0, c1, c0, 0       // enable icache
+
+.Ldone_enable:
+    isb
+    msr     cpsr, r8
+    ldmfd   sp!, {r4-r12, pc}
+
+// flush & invalidate cache routine, trashes r0-r6, r9-r11
+flush_invalidate_cache_v7:
+    /* from ARMv7 manual, B2-17 */
+    dmb
+    MRC     p15, 1, R0, c0, c0, 1       // Read CLIDR
+    ANDS    R3, R0, #0x7000000
+    MOV     R3, R3, LSR #23             // Cache level value (naturally aligned)
+    BEQ     .Lfinished
+    MOV     R10, #0
+.Loop1:
+    ADD     R2, R10, R10, LSR #1        // Work out 3xcachelevel
+    MOV     R1, R0, LSR R2              // bottom 3 bits are the Cache type for this level
+    AND     R1, R1, #7                  // get those 3 bits alone
+    CMP     R1, #2
+    BLT     .Lskip                      // no cache or only instruction cache at this level
+    MCR     p15, 2, R10, c0, c0, 0      // write the Cache Size selection register
+    isb                                 // ISB to sync the change to the CacheSizeID reg
+    MRC     p15, 1, R1, c0, c0, 0       // reads current Cache Size ID register
+    AND     R2, R1, #0x7                // extract the line length field
+    ADD     R2, R2, #4                  // add 4 for the line length offset (log2 16 bytes)
+    LDR     R4, =0x3FF
+    ANDS    R4, R4, R1, LSR #3          // R4 is the max number on the way size (right aligned)
+    CLZ     R5, R4                      // R5 is the bit position of the way size increment
+    LDR     R6, =0x00007FFF
+    ANDS    R6, R6, R1, LSR #13         // R6 is the max number of the index size (right aligned)
+.Loop2:
+    MOV     R9, R4                      // R9 working copy of the max way size (right aligned)
+.Loop3:
+    ORR     R11, R10, R9, LSL R5        // factor in the way number and cache number into R11
+    ORR     R11, R11, R6, LSL R2        // factor in the index number
+    MCR     p15, 0, R11, c7, c14, 2     // clean & invalidate by set/way
+    SUBS    R9, R9, #1                  // decrement the way number
+    BGE     .Loop3
+    SUBS    R6, R6, #1                  // decrement the index
+    BGE     .Loop2
+.Lskip:
+    ADD     R10, R10, #2                    // increment the cache number
+    CMP     R3, R10
+    BGT     .Loop1
+
+.Lfinished:
+    mov     r10, #0
+    mcr     p15, 2, r10, c0, c0, 0      // select cache level 0
+    dsb
+    isb
+
+    bx      lr
+
+// invalidate cache routine, trashes r0-r6, r9-r11
+invalidate_cache_v7:
+    /* from ARMv7 manual, B2-17 */
+    dmb
+    MRC     p15, 1, R0, c0, c0, 1       // Read CLIDR
+    ANDS    R3, R0, #0x7000000
+    MOV     R3, R3, LSR #23             // Cache level value (naturally aligned)
+    BEQ     .Lfinished_invalidate
+    MOV     R10, #0
+.Loop1_invalidate:
+    ADD     R2, R10, R10, LSR #1        // Work out 3xcachelevel
+    MOV     R1, R0, LSR R2              // bottom 3 bits are the Cache type for this level
+    AND     R1, R1, #7                  // get those 3 bits alone
+    CMP     R1, #2
+    BLT     .Lskip_invalidate           // no cache or only instruction cache at this level
+    MCR     p15, 2, R10, c0, c0, 0      // write the Cache Size selection register
+    isb                                 // ISB to sync the change to the CacheSizeID reg
+    MRC     p15, 1, R1, c0, c0, 0       // reads current Cache Size ID register
+    AND     R2, R1, #0x7                // extract the line length field
+    ADD     R2, R2, #4                  // add 4 for the line length offset (log2 16 bytes)
+    LDR     R4, =0x3FF
+    ANDS    R4, R4, R1, LSR #3          // R4 is the max number on the way size (right aligned)
+    CLZ     R5, R4                      // R5 is the bit position of the way size increment
+    LDR     R6, =0x00007FFF
+    ANDS    R6, R6, R1, LSR #13         // R6 is the max number of the index size (right aligned)
+.Loop2_invalidate:
+    MOV     R9, R4                      // R9 working copy of the max way size (right aligned)
+.Loop3_invalidate:
+    ORR     R11, R10, R9, LSL R5        // factor in the way number and cache number into R11
+    ORR     R11, R11, R6, LSL R2        // factor in the index number
+    MCR     p15, 0, R11, c7, c6, 2      // invalidate by set/way
+    SUBS    R9, R9, #1                  // decrement the way number
+    BGE     .Loop3_invalidate
+    SUBS    R6, R6, #1                  // decrement the index
+    BGE     .Loop2_invalidate
+.Lskip_invalidate:
+    ADD     R10, R10, #2                // increment the cache number
+    CMP     R3, R10
+    BGT     .Loop1_invalidate
+
+.Lfinished_invalidate:
+    dsb
+    mov     r10, #0
+    mcr     p15, 2, r10, c0, c0, 0      // select cache level 0
+    isb
+
+    bx      lr
+
+#else
+#error unhandled cpu
+#endif
+
+#if ARM_CPU_ARM926 || ARM_CPU_ARM1136 || ARM_ISA_ARMV7
+/* shared cache flush routines */
+
+    /* void arch_flush_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_clean_cache_range)
+#if ARM_WITH_CP15
+    mov     r3, r0                      // save the start address
+    add     r2, r0, r1                  // calculate the end address
+    bic     r0, #(CACHE_LINE-1)         // align the start with a cache line
+0:
+    mcr     p15, 0, r0, c7, c10, 1      // clean cache to PoC by MVA
+    add     r0, #CACHE_LINE
+    cmp     r0, r2
+    blo     0b
+
+#if ARM_ISA_ARMV7
+    dsb
+#else
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c10, 4      // data sync barrier
+#endif
+#endif
+#if WITH_DEV_CACHE_PL310
+    mov     r0, r3                      // put the start address back
+    b       pl310_clean_range
+#else
+    bx      lr
+#endif
+
+    /* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_clean_invalidate_cache_range)
+#if ARM_WITH_CP15
+    mov     r3, r0                      // save the start address
+    add     r2, r0, r1                  // calculate the end address
+    bic     r0, #(CACHE_LINE-1)         // align the start with a cache line
+0:
+    mcr     p15, 0, r0, c7, c14, 1      // clean & invalidate dcache to PoC by MVA
+    add     r0, r0, #CACHE_LINE
+    cmp     r0, r2
+    blo     0b
+
+#if ARM_ISA_ARMV7
+    dsb
+#else
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c10, 4      // data sync barrier
+#endif
+#endif
+#if WITH_DEV_CACHE_PL310
+    mov     r0, r3                      // put the start address back
+    b       pl310_clean_invalidate_range
+#else
+    bx      lr
+#endif
+
+    /* void arch_invalidate_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_invalidate_cache_range)
+#if ARM_WITH_CP15
+    mov     r3, r0                      // save the start address
+    add     r2, r0, r1                  // calculate the end address
+    bic     r0, #(CACHE_LINE-1)         // align the start with a cache line
+0:
+    mcr     p15, 0, r0, c7, c6, 1       // invalidate dcache to PoC by MVA
+    add     r0, r0, #CACHE_LINE
+    cmp     r0, r2
+    blo     0b
+
+#if ARM_ISA_ARMV7
+    dsb
+#else
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c10, 4      // data sync barrier
+#endif
+#endif
+#if WITH_DEV_CACHE_PL310
+    mov     r0, r3                      // put the start address back
+    b       pl310_invalidate_range
+#else
+    bx      lr
+#endif
+
+    /* void arch_sync_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_sync_cache_range)
+    push    { r14 }
+    bl      arch_clean_cache_range
+
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c5, 0       // invalidate icache to PoU
+
+    pop     { pc }
+
+#endif // ARM_CPU_...
+
+#else
+
+/* no cache */
+
+FUNCTION(arch_disable_cache)
+    bx      lr
+
+FUNCTION(arch_enable_cache)
+    bx      lr
+
+FUNCTION(arch_clean_cache_range)
+    bx      lr
+
+FUNCTION(arch_clean_invalidate_cache_range)
+    bx      lr
+
+FUNCTION(arch_sync_cache_range)
+    bx      lr
+
+#endif // ARM_WITH_CACHE
diff --git a/src/bsp/lk/arch/arm/arm/cache.c b/src/bsp/lk/arch/arm/arm/cache.c
new file mode 100644
index 0000000..0a403b5
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/cache.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
diff --git a/src/bsp/lk/arch/arm/arm/debug.c b/src/bsp/lk/arch/arm/arm/debug.c
new file mode 100644
index 0000000..01c39bc
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/debug.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <err.h>
+#include <malloc.h>
+#include <lk/init.h>
+#include <arch/arm.h>
+#include <arch/arm/dcc.h>
+#include <kernel/thread.h>
+#include <kernel/mutex.h>
+#include <platform.h>
+
+struct dcc_state {
+    dcc_rx_callback_t rx_callback;
+    mutex_t  lock;
+    thread_t *worker;
+};
+
+#define SLOW_POLL_RATE 100
+#define FAST_POLL_TIMEOUT 5
+
+static int dcc_worker_entry(void *arg)
+{
+    struct dcc_state *dcc = (struct dcc_state *)arg;
+    lk_time_t fast_poll_start;
+    bool fast_poll;
+
+    fast_poll = false;
+    for (;;) {
+        // wait for a bit if we're in slow poll mode
+        if (!fast_poll) {
+            thread_sleep(SLOW_POLL_RATE);
+        }
+
+        if (arm_dcc_read_available()) {
+            uint32_t val = arm_read_dbgdtrrxint();
+
+            dcc->rx_callback(val);
+
+            // we just received something, so go to a faster poll rate
+            fast_poll = true;
+            fast_poll_start = current_time();
+        } else {
+            // didn't see anything
+            if (fast_poll && current_time() - fast_poll_start >= FAST_POLL_TIMEOUT) {
+                fast_poll = false; // go back to slow poll
+            }
+        }
+    }
+
+    return 0;
+}
+
+status_t arm_dcc_enable(dcc_rx_callback_t rx_callback)
+{
+    struct dcc_state *state = malloc(sizeof(struct dcc_state));
+    if (!state)
+        return ERR_NO_MEMORY;
+
+    state->rx_callback = rx_callback;
+    mutex_init(&state->lock);
+
+    state->worker = thread_create("dcc worker", dcc_worker_entry, state, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
+    thread_resume(state->worker);
+
+    return NO_ERROR;
+}
+
+bool arm_dcc_read_available(void)
+{
+    uint32_t dscr = arm_read_dbgdscr();
+    if (dscr & (1<<30)) { // rx full
+        return true;
+    } else {
+        return false;
+    }
+}
+
+ssize_t arm_dcc_read(uint32_t *buf, size_t len, lk_time_t timeout)
+{
+    lk_time_t start = 0;
+
+    if (timeout != 0)
+        start = current_time();
+
+    ssize_t count = 0;
+    while (count < (ssize_t)len) {
+
+        uint32_t dscr = arm_read_dbgdscr();
+        if (dscr & (1<<30)) { // rx full
+            uint32_t val = arm_read_dbgdtrrxint();
+            *buf++ = val;
+
+            count++;
+        } else {
+            if (timeout == 0 || current_time() - start >= timeout) {
+                break;
+            }
+        }
+    }
+
+    return count;
+}
+
+ssize_t arm_dcc_write(const uint32_t *buf, size_t len, lk_time_t timeout)
+{
+    lk_time_t start = 0;
+
+    if (timeout != 0)
+        start = current_time();
+
+    ssize_t count = 0;
+    while (count < (ssize_t)len) {
+
+        uint32_t dscr = arm_read_dbgdscr();
+        if ((dscr & (1<<29)) == 0) { // tx empty
+            arm_write_dbgdtrrxint(*buf);
+            count++;
+            buf++;
+        } else {
+            if (timeout == 0 || current_time() - start >= timeout) {
+                break;
+            }
+        }
+    }
+
+    return count;
+}
+
+#if WITH_LIB_CONSOLE
+#include <lib/console.h>
+#include <string.h>
+
+static void dcc_rx_callback(uint32_t val)
+{
+    static int count = 0;
+    count += 4;
+    if ((count % 1000) == 0)
+        printf("count %d\n", count);
+}
+
+static int cmd_dcc(int argc, const cmd_args *argv)
+{
+    static bool dcc_started = false;
+
+    if (argc < 2) {
+        printf("not enough args\n");
+        return -1;
+    }
+
+    if (!strcmp(argv[1].str, "start")) {
+        if (!dcc_started) {
+            printf("starting dcc\n");
+
+            status_t err = arm_dcc_enable(&dcc_rx_callback);
+            printf("arm_dcc_enable returns %d\n", err);
+            dcc_started = true;
+        }
+    } else if (!strcmp(argv[1].str, "write")) {
+        for (int i = 2; i < argc; i++) {
+            uint32_t buf[128];
+            size_t len = strlen(argv[i].str);
+            for (uint j = 0; j < len; j++) {
+                buf[j] = argv[i].str[j];
+            }
+            arm_dcc_write(buf, strlen(argv[i].str), 1000);
+        }
+    } else if (!strcmp(argv[1].str, "read")) {
+        uint32_t buf[128];
+
+        ssize_t len = arm_dcc_read(buf, sizeof(buf), 1000);
+        printf("arm_dcc_read returns %ld\n", len);
+        if (len > 0) {
+            hexdump(buf, len);
+        }
+    } else {
+        printf("unknown args\n");
+    }
+
+    return 0;
+}
+
+STATIC_COMMAND_START
+#if LK_DEBUGLEVEL > 1
+STATIC_COMMAND("dcc", "dcc stuff", &cmd_dcc)
+#endif
+STATIC_COMMAND_END(dcc);
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/arm/exceptions.S b/src/bsp/lk/arch/arm/arm/exceptions.S
new file mode 100644
index 0000000..0f03f4e
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/exceptions.S
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2008-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/asm.h>
+#include <arch/arm/cores.h>
+
+/* exception handling glue.
+ * NOTE: only usable on armv6+ cores
+ */
+
+#define TIMESTAMP_IRQ 0
+
+/* macros to align and unalign the stack on 8 byte boundary for ABI compliance */
+.macro stack_align, tempreg
+    /* make sure the stack is aligned */
+    mov     \tempreg, sp
+    tst     sp, #4
+    subeq   sp, #4
+    push    { \tempreg }
+
+    /* tempreg holds the original stack */
+.endm
+
+.macro stack_restore, tempreg
+    /* restore the potentially unaligned stack */
+    pop     { \tempreg }
+    mov     sp, \tempreg
+.endm
+
+/* save and disable the vfp unit */
+.macro vfp_save, temp1
+    /* save old fpexc */
+    vmrs    \temp1, fpexc
+
+    push    { \temp1 }
+
+    /* hard disable the vfp unit */
+    bic     \temp1, #(1<<30)
+    vmsr    fpexc, \temp1
+.endm
+
+/* restore the vfp enable/disable state */
+.macro vfp_restore, temp1
+    /* restore fpexc */
+    pop     { \temp1 }
+
+    vmsr    fpexc, \temp1
+.endm
+
+/* Save callee trashed registers.
+ * At exit r0 contains a pointer to the register frame.
+ */
+.macro save
+    /* save spsr and r14 onto the svc stack */
+    srsdb   #0x13!
+
+    /* switch to svc mode, interrupts disabled */
+    cpsid   i,#0x13
+
+    /* save callee trashed regs and lr */
+    push    { r0-r3, r12, lr }
+
+    /* save user space sp/lr */
+    sub     sp, #8
+    stmia   sp, { r13, r14 }^
+
+#if ARM_WITH_VFP
+    /* save and disable the vfp unit */
+    vfp_save    r0
+#endif
+
+    /* make sure the stack is 8 byte aligned */
+    stack_align r0
+
+    /* r0 now holds the pointer to the original iframe (before alignment) */
+.endm
+
+.macro save_offset, offset
+    sub     lr, \offset
+    save
+.endm
+
+.macro restore
+    /* undo the stack alignment we did before */
+    stack_restore r0
+
+#if ARM_WITH_VFP
+    /* restore the old state of the vfp unit */
+    vfp_restore r0
+#endif
+
+    /* restore user space sp/lr */
+    ldmia   sp, { r13, r14 }^
+    add     sp, #8
+
+    pop     { r0-r3, r12, lr }
+
+    /* return to whence we came from */
+    rfeia   sp!
+.endm
+
+/* Save all registers.
+ * At exit r0 contains a pointer to the register frame.
+ */
+.macro saveall
+    /* save spsr and r14 onto the svc stack */
+    srsdb   #0x13!
+
+    /* switch to svc mode, interrupts disabled */
+    cpsid   i,#0x13
+
+    /* save all regs */
+    push    { r0-r12, lr }
+
+    /* save user space sp/lr */
+    sub     sp, #8
+    stmia   sp, { r13, r14 }^
+
+#if ARM_WITH_VFP
+    /* save and disable the vfp unit */
+    vfp_save    r0
+#endif
+
+    /* make sure the stack is 8 byte aligned */
+    stack_align r0
+
+    /* r0 now holds the pointer to the original iframe (before alignment) */
+.endm
+
+.macro saveall_offset, offset
+    sub     lr, \offset
+    saveall
+.endm
+
+.macro restoreall
+    /* undo the stack alignment we did before */
+    stack_restore r0
+
+#if ARM_WITH_VFP
+    /* restore the old state of the vfp unit */
+    vfp_restore r0
+#endif
+
+    /* restore user space sp/lr */
+    ldmia   sp, { r13, r14 }^
+    add     sp, #8
+
+    pop     { r0-r12, r14 }
+
+    /* return to whence we came from */
+    rfeia   sp!
+.endm
+
+FUNCTION(arm_undefined)
+    save
+    /* r0 now holds pointer to iframe */
+
+    bl      arm_undefined_handler
+
+    restore
+
+#ifndef WITH_LIB_SYSCALL
+FUNCTION(arm_syscall)
+    saveall
+    /* r0 now holds pointer to iframe */
+
+    bl      arm_syscall_handler
+
+    restoreall
+#endif
+
+FUNCTION(arm_prefetch_abort)
+    saveall_offset #4
+    /* r0 now holds pointer to iframe */
+
+    bl      arm_prefetch_abort_handler
+
+    restoreall
+
+FUNCTION(arm_data_abort)
+    saveall_offset #8
+    /* r0 now holds pointer to iframe */
+
+    bl      arm_data_abort_handler
+
+    restoreall
+
+FUNCTION(arm_reserved)
+    b   .
+
+FUNCTION(arm_irq)
+#if TIMESTAMP_IRQ
+    /* read the cycle count */
+    mrc     p15, 0, sp, c9, c13, 0
+    str     sp, [pc, #__irq_cycle_count - . - 8]
+#endif
+
+    save_offset    #4
+
+    /* r0 now holds pointer to iframe */
+
+    /* track that we're inside an irq handler */
+    LOADCONST(r2, __arm_in_handler)
+    mov     r1, #1
+    str     r1, [r2]
+
+    /* call into higher level code */
+    bl  platform_irq
+
+    /* clear the irq handler status */
+    LOADCONST(r1, __arm_in_handler)
+    mov     r2, #0
+    str     r2, [r1]
+
+    /* reschedule if the handler returns nonzero */
+    cmp     r0, #0
+    blne    thread_preempt
+
+    restore
+
+FUNCTION(arm_fiq)
+    save_offset #4
+    /* r0 now holds pointer to iframe */
+
+    bl  platform_fiq
+
+    restore
+
+.ltorg
+
+#if TIMESTAMP_IRQ
+DATA(__irq_cycle_count)
+    .word   0
+#endif
+
+.data
+DATA(__arm_in_handler)
+    .word   0
diff --git a/src/bsp/lk/arch/arm/arm/faults.c b/src/bsp/lk/arch/arm/arm/faults.c
new file mode 100644
index 0000000..0dc9d17
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/faults.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <bits.h>
+#include <arch/arm.h>
+#include <kernel/thread.h>
+#include <platform.h>
+
+struct fault_handler_table_entry {
+    uint32_t pc;
+    uint32_t fault_handler;
+};
+
+extern struct fault_handler_table_entry __fault_handler_table_start[];
+extern struct fault_handler_table_entry __fault_handler_table_end[];
+
+static void dump_mode_regs(uint32_t spsr, uint32_t svc_r13, uint32_t svc_r14)
+{
+    struct arm_mode_regs regs;
+    arm_save_mode_regs(&regs);
+
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_USR) ? '*' : ' ', "usr", regs.usr_r13, regs.usr_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_FIQ) ? '*' : ' ', "fiq", regs.fiq_r13, regs.fiq_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_IRQ) ? '*' : ' ', "irq", regs.irq_r13, regs.irq_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", 'a', "svc", regs.svc_r13, regs.svc_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_SVC) ? '*' : ' ', "svc", svc_r13, svc_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_ABT) ? '*' : ' ', "abt", regs.abt_r13, regs.abt_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_UND) ? '*' : ' ', "und", regs.und_r13, regs.und_r14);
+    dprintf(CRITICAL, "%c%s r13 0x%08x r14 0x%08x\n", ((spsr & CPSR_MODE_MASK) == CPSR_MODE_SYS) ? '*' : ' ', "sys", regs.sys_r13, regs.sys_r14);
+
+    // dump the bottom of the current stack
+    addr_t stack;
+    switch (spsr & CPSR_MODE_MASK) {
+        case CPSR_MODE_FIQ:
+            stack = regs.fiq_r13;
+            break;
+        case CPSR_MODE_IRQ:
+            stack = regs.irq_r13;
+            break;
+        case CPSR_MODE_SVC:
+            stack = svc_r13;
+            break;
+        case CPSR_MODE_UND:
+            stack = regs.und_r13;
+            break;
+        case CPSR_MODE_SYS:
+            stack = regs.sys_r13;
+            break;
+        default:
+            stack = 0;
+    }
+
+    if (stack != 0) {
+        dprintf(CRITICAL, "bottom of stack at 0x%08x:\n", (unsigned int)stack);
+        hexdump((void *)stack, 128);
+    }
+}
+
+static void dump_fault_frame(struct arm_fault_frame *frame)
+{
+    struct thread *current_thread = get_current_thread();
+
+    dprintf(CRITICAL, "current_thread %p, name %s\n",
+            current_thread, current_thread ? current_thread->name : "");
+
+    dprintf(CRITICAL, "r0  0x%08x r1  0x%08x r2  0x%08x r3  0x%08x\n", frame->r[0], frame->r[1], frame->r[2], frame->r[3]);
+    dprintf(CRITICAL, "r4  0x%08x r5  0x%08x r6  0x%08x r7  0x%08x\n", frame->r[4], frame->r[5], frame->r[6], frame->r[7]);
+    dprintf(CRITICAL, "r8  0x%08x r9  0x%08x r10 0x%08x r11 0x%08x\n", frame->r[8], frame->r[9], frame->r[10], frame->r[11]);
+    dprintf(CRITICAL, "r12 0x%08x usp 0x%08x ulr 0x%08x pc  0x%08x\n", frame->r[12], frame->usp, frame->ulr, frame->pc);
+    dprintf(CRITICAL, "spsr 0x%08x\n", frame->spsr);
+
+    dump_mode_regs(frame->spsr, (uintptr_t)(frame + 1), frame->lr);
+}
+
+static void dump_iframe(struct arm_iframe *frame)
+{
+    dprintf(CRITICAL, "r0  0x%08x r1  0x%08x r2  0x%08x r3  0x%08x\n", frame->r0, frame->r1, frame->r2, frame->r3);
+    dprintf(CRITICAL, "r12 0x%08x usp 0x%08x ulr 0x%08x pc  0x%08x\n", frame->r12, frame->usp, frame->ulr, frame->pc);
+    dprintf(CRITICAL, "spsr 0x%08x\n", frame->spsr);
+
+    dump_mode_regs(frame->spsr, (uintptr_t)(frame + 1), frame->lr);
+}
+
+static void exception_die(struct arm_fault_frame *frame, const char *msg)
+{
+    dprintf(CRITICAL, "%s", msg);
+    dump_fault_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    for (;;);
+}
+
+static void exception_die_iframe(struct arm_iframe *frame, const char *msg)
+{
+    dprintf(CRITICAL, "%s", msg);
+    dump_iframe(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    for (;;);
+}
+
+__WEAK void arm_syscall_handler(struct arm_fault_frame *frame)
+{
+    exception_die(frame, "unhandled syscall, halting\n");
+}
+
+void arm_undefined_handler(struct arm_iframe *frame)
+{
+    /* look at the undefined instruction, figure out if it's something we can handle */
+    bool in_thumb = frame->spsr & (1<<5);
+    if (in_thumb) {
+        frame->pc -= 2;
+    } else {
+        frame->pc -= 4;
+    }
+
+    __UNUSED uint32_t opcode = *(uint32_t *)frame->pc;
+    //dprintf(CRITICAL, "undefined opcode 0x%x\n", opcode);
+
+#if ARM_WITH_VFP
+    if (in_thumb) {
+        /* look for a 32bit thumb instruction */
+        if (opcode & 0x0000e800) {
+            /* swap the 16bit words */
+            opcode = (opcode >> 16) | (opcode << 16);
+        }
+
+        if (((opcode & 0xec000e00) == 0xec000a00) || // vfp
+                ((opcode & 0xef000000) == 0xef000000) || // advanced simd data processing
+                ((opcode & 0xff100000) == 0xf9000000)) { // VLD
+
+            //dprintf(CRITICAL, "vfp/neon thumb instruction 0x%08x at 0x%x\n", opcode, frame->pc);
+            goto fpu;
+        }
+    } else {
+        /* look for arm vfp/neon coprocessor instructions */
+        if (((opcode & 0x0c000e00) == 0x0c000a00) || // vfp
+                ((opcode & 0xfe000000) == 0xf2000000) || // advanced simd data processing
+                ((opcode & 0xff100000) == 0xf4000000)) { // VLD
+            //dprintf(CRITICAL, "vfp/neon arm instruction 0x%08x at 0x%x\n", opcode, frame->pc);
+            goto fpu;
+        }
+    }
+#endif
+
+    exception_die_iframe(frame, "undefined abort, halting\n");
+    return;
+
+#if ARM_WITH_VFP
+fpu:
+    arm_fpu_undefined_instruction(frame);
+#endif
+}
+
+void arm_data_abort_handler(struct arm_fault_frame *frame)
+{
+    struct fault_handler_table_entry *fault_handler;
+    uint32_t fsr = arm_read_dfsr();
+    uint32_t far = arm_read_dfar();
+
+    uint32_t fault_status = (BIT(fsr, 10) ? (1<<4) : 0) |  BITS(fsr, 3, 0);
+
+    for (fault_handler = __fault_handler_table_start; fault_handler < __fault_handler_table_end; fault_handler++) {
+        if (fault_handler->pc == frame->pc) {
+            frame->pc = fault_handler->fault_handler;
+            return;
+        }
+    }
+
+    dprintf(CRITICAL, "\n\ncpu %u data abort, ", arch_curr_cpu_num());
+    bool write = !!BIT(fsr, 11);
+
+    /* decode the fault status (from table B3-23) */
+    switch (fault_status) {
+        case 0b00001: // alignment fault
+            dprintf(CRITICAL, "alignment fault on %s\n", write ? "write" : "read");
+            break;
+        case 0b00101:
+        case 0b00111: // translation fault
+            dprintf(CRITICAL, "translation fault on %s\n", write ? "write" : "read");
+            break;
+        case 0b00011:
+        case 0b00110: // access flag fault
+            dprintf(CRITICAL, "access flag fault on %s\n", write ? "write" : "read");
+            break;
+        case 0b01001:
+        case 0b01011: // domain fault
+            dprintf(CRITICAL, "domain fault, domain %lu\n", BITS_SHIFT(fsr, 7, 4));
+            break;
+        case 0b01101:
+        case 0b01111: // permission fault
+            dprintf(CRITICAL, "permission fault on %s\n", write ? "write" : "read");
+            break;
+        case 0b00010: // debug event
+            dprintf(CRITICAL, "debug event\n");
+            break;
+        case 0b01000: // synchronous external abort
+            dprintf(CRITICAL, "synchronous external abort on %s\n", write ? "write" : "read");
+            break;
+        case 0b10110: // asynchronous external abort
+            dprintf(CRITICAL, "asynchronous external abort on %s\n", write ? "write" : "read");
+            break;
+        case 0b10000: // TLB conflict event
+        case 0b11001: // synchronous parity error on memory access
+        case 0b00100: // fault on instruction cache maintenance
+        case 0b01100: // synchronous external abort on translation table walk
+        case 0b01110: //    "
+        case 0b11100: // synchronous parity error on translation table walk
+        case 0b11110: //    "
+        case 0b11000: // asynchronous parity error on memory access
+        default:
+            dprintf(CRITICAL, "unhandled fault\n");
+            ;
+    }
+
+    dprintf(CRITICAL, "DFAR 0x%x (fault address)\n", far);
+    dprintf(CRITICAL, "DFSR 0x%x (fault status register)\n", fsr);
+
+    exception_die(frame, "halting\n");
+}
+
+void arm_prefetch_abort_handler(struct arm_fault_frame *frame)
+{
+    uint32_t fsr = arm_read_ifsr();
+    uint32_t far = arm_read_ifar();
+
+    uint32_t fault_status = (BIT(fsr, 10) ? (1<<4) : 0) |  BITS(fsr, 3, 0);
+
+    dprintf(CRITICAL, "\n\ncpu %u prefetch abort, ", arch_curr_cpu_num());
+
+    /* decode the fault status (from table B3-23) */
+    switch (fault_status) {
+        case 0b00001: // alignment fault
+            dprintf(CRITICAL, "alignment fault\n");
+            break;
+        case 0b00101:
+        case 0b00111: // translation fault
+            dprintf(CRITICAL, "translation fault\n");
+            break;
+        case 0b00011:
+        case 0b00110: // access flag fault
+            dprintf(CRITICAL, "access flag fault\n");
+            break;
+        case 0b01001:
+        case 0b01011: // domain fault
+            dprintf(CRITICAL, "domain fault, domain %lu\n", BITS_SHIFT(fsr, 7, 4));
+            break;
+        case 0b01101:
+        case 0b01111: // permission fault
+            dprintf(CRITICAL, "permission fault\n");
+            break;
+        case 0b00010: // debug event
+            dprintf(CRITICAL, "debug event\n");
+            break;
+        case 0b01000: // synchronous external abort
+            dprintf(CRITICAL, "synchronous external abort\n");
+            break;
+        case 0b10110: // asynchronous external abort
+            dprintf(CRITICAL, "asynchronous external abort\n");
+            break;
+        case 0b10000: // TLB conflict event
+        case 0b11001: // synchronous parity error on memory access
+        case 0b00100: // fault on instruction cache maintenance
+        case 0b01100: // synchronous external abort on translation table walk
+        case 0b01110: //    "
+        case 0b11100: // synchronous parity error on translation table walk
+        case 0b11110: //    "
+        case 0b11000: // asynchronous parity error on memory access
+        default:
+            dprintf(CRITICAL, "unhandled fault\n");
+            ;
+    }
+
+    dprintf(CRITICAL, "IFAR 0x%x (fault address)\n", far);
+    dprintf(CRITICAL, "IFSR 0x%x (fault status register)\n", fsr);
+
+    exception_die(frame, "halting\n");
+}
diff --git a/src/bsp/lk/arch/arm/arm/fpu.c b/src/bsp/lk/arch/arm/arm/fpu.c
new file mode 100644
index 0000000..94bd010
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/fpu.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/arm.h>
+#include <assert.h>
+#include <trace.h>
+#include <stdbool.h>
+#include <string.h>
+#include <kernel/thread.h>
+
+#define LOCAL_TRACE 0
+
+static inline bool is_16regs(void)
+{
+    uint32_t mvfr0;
+    __asm__ volatile("vmrs	%0, MVFR0" : "=r"(mvfr0));
+
+    return (mvfr0 & 0xf) == 1;
+}
+
+static inline uint32_t read_fpexc(void)
+{
+    uint32_t val;
+    /* use legacy encoding of vmsr reg, fpexc */
+    __asm__("mrc  p10, 7, %0, c8, c0, 0" : "=r" (val));
+    return val;
+}
+
+static inline void write_fpexc(uint32_t val)
+{
+    /* use legacy encoding of vmrs fpexc, reg */
+    __asm__ volatile("mcr  p10, 7, %0, c8, c0, 0" :: "r" (val));
+}
+
+void arm_fpu_set_enable(bool enable)
+{
+    /* set enable bit in fpexc */
+    write_fpexc(enable ? (1<<30) : 0);
+}
+
+#if ARM_WITH_VFP
+void arm_fpu_undefined_instruction(struct arm_iframe *frame)
+{
+    thread_t *t = get_current_thread();
+
+    if (unlikely(arch_in_int_handler())) {
+        panic("floating point code in irq context. pc 0x%x\n", frame->pc);
+    }
+
+    LTRACEF("enabling fpu on thread %p\n", t);
+
+    t->arch.fpused = true;
+    arm_fpu_thread_swap(NULL, t);
+
+    /* make sure the irq glue leaves the floating point unit enabled on the way out */
+    frame->fpexc |= (1<<30);
+}
+
+void arm_fpu_thread_initialize(struct thread *t)
+{
+    /* zero the fpu register state */
+    memset(t->arch.fpregs, 0, sizeof(t->arch.fpregs));
+
+    t->arch.fpexc = (1<<30);
+    t->arch.fpscr = 0;
+    t->arch.fpused = false;
+}
+
+void arm_fpu_thread_swap(struct thread *oldthread, struct thread *newthread)
+{
+    LTRACEF("old %p (%d), new %p (%d)\n",
+            oldthread, oldthread ? oldthread->arch.fpused : 0,
+            newthread, newthread ? newthread->arch.fpused : 0);
+
+    if (oldthread) {
+        if (oldthread->arch.fpused) {
+            /* save the old state */
+            uint32_t fpexc;
+            fpexc = read_fpexc();
+
+            oldthread->arch.fpexc = fpexc;
+
+            /* make sure that the fpu is enabled, so the next instructions won't fault */
+            arm_fpu_set_enable(true);
+
+            __asm__ volatile("vmrs  %0, fpscr" : "=r" (oldthread->arch.fpscr));
+            __asm__ volatile("vstm   %0, { d0-d15 }" :: "r" (&oldthread->arch.fpregs[0]));
+            if (!is_16regs()) {
+                __asm__ volatile("vstm   %0, { d16-d31 }" :: "r" (&oldthread->arch.fpregs[16]));
+            }
+
+            arm_fpu_set_enable(false);
+        }
+    }
+
+    if (newthread) {
+        if (newthread->arch.fpused) {
+            // load the new state
+            arm_fpu_set_enable(true);
+            __asm__ volatile("vmsr  fpscr, %0" :: "r" (newthread->arch.fpscr));
+
+            __asm__ volatile("vldm   %0, { d0-d15 }" :: "r" (&newthread->arch.fpregs[0]));
+            if (!is_16regs()) {
+                __asm__ volatile("vldm   %0, { d16-d31 }" :: "r" (&newthread->arch.fpregs[16]));
+            }
+            write_fpexc(newthread->arch.fpexc);
+        } else {
+            arm_fpu_set_enable(false);
+        }
+    }
+}
+#endif
diff --git a/src/bsp/lk/arch/arm/arm/include/arch/arch_thread.h b/src/bsp/lk/arch/arm/arm/include/arch/arch_thread.h
new file mode 100644
index 0000000..b7e6e4d
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/include/arch/arch_thread.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARM_ARCH_THREAD_H
+#define __ARM_ARCH_THREAD_H
+
+#include <sys/types.h>
+
+struct arch_thread {
+    vaddr_t sp;
+
+#if ARM_WITH_VFP
+    /* has this thread ever used the floating point state? */
+    bool fpused;
+
+    uint32_t fpscr;
+    uint32_t fpexc;
+    double   fpregs[32];
+#endif
+};
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/arm/mmu.c b/src/bsp/lk/arch/arm/arm/mmu.c
new file mode 100644
index 0000000..1b8a5c6
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/mmu.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <trace.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <err.h>
+#include <string.h>
+#include <compiler.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/mmu.h>
+#include <arch/arm.h>
+#include <arch/arm/mmu.h>
+#include <kernel/vm.h>
+
+#define LOCAL_TRACE 0
+
+#if ARM_WITH_MMU
+
+#define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE)
+#define IS_SUPERSECTION_ALIGNED(x) IS_ALIGNED(x, SUPERSECTION_SIZE)
+
+/* locals */
+static void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags);
+static void arm_mmu_unmap_section(addr_t vaddr);
+
+/* the main translation table */
+uint32_t arm_kernel_translation_table[4096] __ALIGNED(16384) __SECTION(".translation_table");
+
+/* convert user level mmu flags to flags that go in L1 descriptors */
+static uint32_t mmu_flags_to_l1_arch_flags(uint flags)
+{
+    uint32_t arch_flags = 0;
+    switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
+        case ARCH_MMU_FLAG_CACHED:
+            arch_flags |= MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
+#if WITH_SMP
+            arch_flags |= MMU_MEMORY_L1_SECTION_SHAREABLE;
+#endif
+            break;
+        case ARCH_MMU_FLAG_UNCACHED:
+            arch_flags |= MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED;
+            break;
+        case ARCH_MMU_FLAG_UNCACHED_DEVICE:
+            arch_flags |= MMU_MEMORY_L1_TYPE_DEVICE_SHARED;
+            break;
+        default:
+            /* invalid user-supplied flag */
+            DEBUG_ASSERT(1);
+            return ERR_INVALID_ARGS;
+    }
+
+    switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
+        case 0:
+            arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_RO:
+            arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER:
+            arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_RW;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
+            arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_RO;
+            break;
+    }
+
+    if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
+        arch_flags |= MMU_MEMORY_L1_SECTION_XN;
+    }
+
+    if (flags & ARCH_MMU_FLAG_NS) {
+        arch_flags |= MMU_MEMORY_L1_SECTION_NON_SECURE;
+    }
+
+    return arch_flags;
+}
+
+/* convert user level mmu flags to flags that go in L2 descriptors */
+static uint32_t mmu_flags_to_l2_arch_flags_small_page(uint flags)
+{
+    uint32_t arch_flags = 0;
+    switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
+        case ARCH_MMU_FLAG_CACHED:
+#if WITH_SMP
+            arch_flags |= MMU_MEMORY_L2_SHAREABLE;
+#endif
+            arch_flags |= MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
+#if WITH_SMP
+            arch_flags |= MMU_MEMORY_L2_SHAREABLE;
+#endif
+            break;
+        case ARCH_MMU_FLAG_UNCACHED:
+            arch_flags |= MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED;
+            break;
+        case ARCH_MMU_FLAG_UNCACHED_DEVICE:
+            arch_flags |= MMU_MEMORY_L2_TYPE_DEVICE_SHARED;
+            break;
+        default:
+            /* invalid user-supplied flag */
+            DEBUG_ASSERT(1);
+            return ERR_INVALID_ARGS;
+    }
+
+    switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
+        case 0:
+            arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_RO:
+            arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER:
+            arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_RW;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
+            arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_RO;
+            break;
+    }
+
+    if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
+        arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN;
+    } else {
+        arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE;
+    }
+
+    return arch_flags;
+}
+
+static void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags)
+{
+    int index;
+
+    LTRACEF("pa 0x%lx va 0x%lx flags 0x%x\n", paddr, vaddr, flags);
+
+    DEBUG_ASSERT(IS_SECTION_ALIGNED(paddr));
+    DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
+    DEBUG_ASSERT((flags & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_SECTION);
+
+    /* Get the index into the translation table */
+    index = vaddr / SECTION_SIZE;
+
+    /* Set the entry value:
+     * (2<<0): Section entry
+     * (0<<5): Domain = 0
+     *  flags: TEX, CB and AP bit settings provided by the caller.
+     */
+    arm_kernel_translation_table[index] = (paddr & ~(MB-1)) | (MMU_MEMORY_DOMAIN_MEM << 5) | MMU_MEMORY_L1_DESCRIPTOR_SECTION | flags;
+}
+
+static void arm_mmu_unmap_l1_entry(uint32_t index)
+{
+    DEBUG_ASSERT(index < countof(arm_kernel_translation_table));
+
+    arm_kernel_translation_table[index] = 0;
+    DSB;
+    arm_invalidate_tlb_mva_no_barrier((vaddr_t)index * SECTION_SIZE);
+}
+
+static void arm_mmu_unmap_section(addr_t vaddr)
+{
+    DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
+    arm_mmu_unmap_l1_entry(vaddr / SECTION_SIZE);
+}
+
+void arm_mmu_early_init(void)
+{
+}
+
+void arm_mmu_init(void)
+{
+    /* unmap the initial mapings that are marked temporary */
+    struct mmu_initial_mapping *map = mmu_initial_mappings;
+    while (map->size > 0) {
+        if (map->flags & MMU_INITIAL_MAPPING_TEMPORARY) {
+            vaddr_t va = map->virt;
+            size_t size = map->size;
+
+            DEBUG_ASSERT(IS_SECTION_ALIGNED(size));
+
+            while (size > 0) {
+                arm_mmu_unmap_section(va);
+                va += MB;
+                size -= MB;
+            }
+        }
+        map++;
+    }
+    arm_after_invalidate_tlb_barrier();
+}
+
+void arch_disable_mmu(void)
+{
+    arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled
+}
+
+status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
+{
+    //LTRACEF("vaddr 0x%lx\n", vaddr);
+
+    /* Get the index into the translation table */
+    uint index = vaddr / MB;
+
+    /* decode it */
+    uint32_t tt_entry = arm_kernel_translation_table[index];
+    switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
+        case MMU_MEMORY_L1_DESCRIPTOR_INVALID:
+            return ERR_NOT_FOUND;
+        case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
+            if (tt_entry & (1<<18)) {
+                /* supersection */
+                PANIC_UNIMPLEMENTED;
+            }
+
+            /* section */
+            if (paddr)
+                *paddr = MMU_MEMORY_L1_SECTION_ADDR(tt_entry) + (vaddr & (SECTION_SIZE - 1));
+
+            if (flags) {
+                *flags = 0;
+                if (tt_entry & MMU_MEMORY_L1_SECTION_NON_SECURE)
+                    *flags |= ARCH_MMU_FLAG_NS;
+                switch (tt_entry & MMU_MEMORY_L1_TYPE_MASK) {
+                    case MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED:
+                        *flags |= ARCH_MMU_FLAG_UNCACHED;
+                        break;
+                    case MMU_MEMORY_L1_TYPE_DEVICE_SHARED:
+                    case MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED:
+                        *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
+                        break;
+                }
+                switch (tt_entry & MMU_MEMORY_L1_AP_MASK) {
+                    case MMU_MEMORY_L1_AP_P_RO_U_NA:
+                        *flags |= ARCH_MMU_FLAG_PERM_RO;
+                        break;
+                    case MMU_MEMORY_L1_AP_P_RW_U_NA:
+                        break;
+                    case MMU_MEMORY_L1_AP_P_RO_U_RO:
+                        *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
+                        break;
+                    case MMU_MEMORY_L1_AP_P_RW_U_RW:
+                        *flags |= ARCH_MMU_FLAG_PERM_USER;
+                        break;
+                }
+                if (tt_entry & MMU_MEMORY_L1_SECTION_XN) {
+                    *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+                }
+            }
+            break;
+        case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
+            uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
+            uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
+            uint32_t l2_entry = l2_table[l2_index];
+
+            //LTRACEF("l2_table at %p, index %u, entry 0x%x\n", l2_table, l2_index, l2_entry);
+
+            switch (l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) {
+                default:
+                case MMU_MEMORY_L2_DESCRIPTOR_INVALID:
+                    return ERR_NOT_FOUND;
+                case MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE:
+                    PANIC_UNIMPLEMENTED;
+                    break;
+                case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE:
+                case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN:
+                    if (paddr)
+                        *paddr = MMU_MEMORY_L2_SMALL_PAGE_ADDR(l2_entry) + (vaddr & (PAGE_SIZE - 1));
+
+                    if (flags) {
+                        *flags = 0;
+                        /* NS flag is only present on L1 entry */
+                        if (tt_entry & MMU_MEMORY_L1_PAGETABLE_NON_SECURE)
+                            *flags |= ARCH_MMU_FLAG_NS;
+                        switch (l2_entry & MMU_MEMORY_L2_TYPE_MASK) {
+                            case MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED:
+                                *flags |= ARCH_MMU_FLAG_UNCACHED;
+                                break;
+                            case MMU_MEMORY_L2_TYPE_DEVICE_SHARED:
+                            case MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED:
+                                *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
+                                break;
+                        }
+                        switch (l2_entry & MMU_MEMORY_L2_AP_MASK) {
+                            case MMU_MEMORY_L2_AP_P_RO_U_NA:
+                                *flags |= ARCH_MMU_FLAG_PERM_RO;
+                                break;
+                            case MMU_MEMORY_L2_AP_P_RW_U_NA:
+                                break;
+                            case MMU_MEMORY_L2_AP_P_RO_U_RO:
+                                *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
+                                break;
+                            case MMU_MEMORY_L2_AP_P_RW_U_RW:
+                                *flags |= ARCH_MMU_FLAG_PERM_USER;
+                                break;
+                        }
+                        if ((l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) ==
+                                MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN) {
+                            *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+                        }
+                    }
+                    break;
+            }
+
+            break;
+        }
+        default:
+            PANIC_UNIMPLEMENTED;
+    }
+
+    return NO_ERROR;
+}
+
+
+/*
+ *  We allow up to 4 adjacent L1 entries to point within the same memory page
+ *  allocated for L2 page tables.
+ *
+ *  L1:   | 0 | 1 | 2 | 3 | .... | N+0 | N+1 | N+2 | N+3 |
+ *  L2:   [       0       | .....[      (N/4)            |
+ */
+#define L1E_PER_PAGE 4
+
+static status_t get_l2_table(uint32_t l1_index, paddr_t *ppa)
+{
+    status_t ret;
+    paddr_t pa;
+    uint32_t tt_entry;
+
+    DEBUG_ASSERT(ppa);
+
+    /* lookup an existing l2 pagetable */
+    for (uint i = 0; i < L1E_PER_PAGE; i++) {
+        tt_entry = arm_kernel_translation_table[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
+        if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
+                == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
+            *ppa = (paddr_t)ROUNDDOWN(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE)
+                   + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
+            return NO_ERROR;
+        }
+    }
+
+    /* not found: allocate it */
+    uint32_t *l2_va = pmm_alloc_kpage();
+    if (!l2_va)
+        return ERR_NO_MEMORY;
+
+    /* wipe it clean to set no access */
+    memset(l2_va, 0, PAGE_SIZE);
+
+    /* get physical address */
+    ret = arm_vtop((vaddr_t)l2_va, &pa);
+    ASSERT(!ret);
+    ASSERT(paddr_to_kvaddr(pa));
+
+    DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va));
+    DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
+
+    *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
+
+    LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa);
+    return NO_ERROR;
+}
+
+
+vm_page_t *address_to_page(paddr_t addr); // move to common
+
+static void put_l2_table(uint32_t l1_index, paddr_t l2_pa)
+{
+    /* check if any l1 entry points to this l2 table */
+    for (uint i = 0; i < L1E_PER_PAGE; i++) {
+        uint32_t tt_entry = arm_kernel_translation_table[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i];
+        if ((tt_entry &  MMU_MEMORY_L1_DESCRIPTOR_MASK)
+                == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
+            return;
+        }
+    }
+
+    /* we can free this l2 table */
+    vm_page_t *page = address_to_page(l2_pa);
+    if (!page)
+        panic("bad page table paddr 0x%lx\n", l2_pa);
+
+    LTRACEF("freeing pagetable at 0x%lx\n", l2_pa);
+    pmm_free_page(page);
+}
+
+#if WITH_ARCH_MMU_PICK_SPOT
+
+static inline bool are_regions_compatible(uint new_region_flags,
+        uint adjacent_region_flags)
+{
+    /*
+     * Two regions are compatible if NS flag matches.
+     */
+    uint mask = ARCH_MMU_FLAG_NS;
+
+    if ((new_region_flags & mask) == (adjacent_region_flags & mask))
+        return true;
+
+    return false;
+}
+
+
+vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_flags,
+                           vaddr_t end,  uint next_region_flags,
+                           vaddr_t align, size_t size, uint flags)
+{
+    LTRACEF("base = 0x%lx, end=0x%lx, align=%ld, size=%zd, flags=0x%x\n",
+            base, end, align, size, flags);
+
+    vaddr_t spot;
+
+    if (align >= SECTION_SIZE ||
+            are_regions_compatible(flags, prev_region_flags)) {
+        spot = ALIGN(base, align);
+    } else {
+        spot = ALIGN(base, SECTION_SIZE);
+    }
+
+    vaddr_t spot_end = spot + size - 1;
+    if (spot_end < spot || spot_end > end)
+        return end; /* wrapped around or it does not fit */
+
+    if ((spot_end / SECTION_SIZE) == (end / SECTION_SIZE)) {
+        if (!are_regions_compatible(flags, next_region_flags))
+            return end;
+    }
+
+    return spot;
+}
+#endif  /* WITH_ARCH_MMU_PICK_SPOT */
+
+
+int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
+{
+    LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);
+
+#if !WITH_ARCH_MMU_PICK_SPOT
+    if (flags & ARCH_MMU_FLAG_NS) {
+        /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
+        panic("NS mem is not supported\n");
+    }
+#endif
+
+    /* paddr and vaddr must be aligned */
+    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
+    DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
+    if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    /* see what kind of mapping we can use */
+    int mapped = 0;
+    while (count > 0) {
+        if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
+            /* we can use a section */
+
+            /* compute the arch flags for L1 sections */
+            uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
+                              MMU_MEMORY_L1_DESCRIPTOR_SECTION;
+
+            /* map it */
+            arm_mmu_map_section(paddr, vaddr, arch_flags);
+            count -= SECTION_SIZE / PAGE_SIZE;
+            mapped += SECTION_SIZE / PAGE_SIZE;
+            vaddr += SECTION_SIZE;
+            paddr += SECTION_SIZE;
+        } else {
+            /* will have to use a L2 mapping */
+            uint l1_index = vaddr / SECTION_SIZE;
+            uint32_t tt_entry = arm_kernel_translation_table[l1_index];
+
+            LTRACEF("tt_entry 0x%x\n", tt_entry);
+            switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
+                case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
+                    // XXX will have to break L1 mapping into a L2 page table
+                    PANIC_UNIMPLEMENTED;
+                    break;
+                case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
+                    paddr_t l2_pa = 0;
+                    if (get_l2_table(l1_index, &l2_pa) != NO_ERROR) {
+                        TRACEF("failed to allocate pagetable\n");
+                        goto done;
+                    }
+                    tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
+                    if (flags & ARCH_MMU_FLAG_NS)
+                        tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;
+
+                    arm_kernel_translation_table[l1_index] = tt_entry;
+                    /* fallthrough */
+                    __attribute__((fallthrough));
+                }
+                case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
+                    uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
+                    LTRACEF("l2_table at %p\n", l2_table);
+
+                    DEBUG_ASSERT(l2_table);
+
+                    // XXX handle 64K pages here
+
+                    /* compute the arch flags for L2 4K pages */
+                    uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);
+
+                    uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
+                    do {
+                        l2_table[l2_index++] = paddr | arch_flags;
+                        count--;
+                        mapped++;
+                        vaddr += PAGE_SIZE;
+                        paddr += PAGE_SIZE;
+                    } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
+                    break;
+                }
+                default:
+                    PANIC_UNIMPLEMENTED;
+            }
+        }
+    }
+
+done:
+    DSB;
+    return mapped;
+}
+
+int arch_mmu_unmap(vaddr_t vaddr, uint count)
+{
+    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
+    if (!IS_PAGE_ALIGNED(vaddr))
+        return ERR_INVALID_ARGS;
+
+    LTRACEF("vaddr 0x%lx count %u\n", vaddr, count);
+
+    int unmapped = 0;
+    while (count > 0) {
+        uint l1_index = vaddr / SECTION_SIZE;
+        uint32_t tt_entry = arm_kernel_translation_table[l1_index];
+
+        switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
+            case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
+                /* this top level page is not mapped, move on to the next one */
+                uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
+                vaddr += page_cnt * PAGE_SIZE;
+                count -= page_cnt;
+                break;
+            }
+            case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
+                if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
+                    /* we're asked to remove at least all of this section, so just zero it out */
+                    // XXX test for supersection
+                    arm_mmu_unmap_section(vaddr);
+
+                    vaddr += SECTION_SIZE;
+                    count -= SECTION_SIZE / PAGE_SIZE;
+                    unmapped += SECTION_SIZE / PAGE_SIZE;
+                } else {
+                    // XXX handle unmapping just part of a section
+                    // will need to convert to a L2 table and then unmap the parts we are asked to
+                    PANIC_UNIMPLEMENTED;
+                }
+                break;
+            case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
+                uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
+                uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
+                uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);
+
+                /* unmap page run */
+                for (uint i = 0; i < page_cnt; i++) {
+                    l2_table[page_idx++] = 0;
+                }
+                DSB;
+
+                /* invalidate tlb */
+                for (uint i = 0; i < page_cnt; i++) {
+                    arm_invalidate_tlb_mva_no_barrier(vaddr);
+                    vaddr += PAGE_SIZE;
+                }
+                count -= page_cnt;
+                unmapped += page_cnt;
+
+                /*
+                 * Check if all pages related to this l1 entry are deallocated.
+                 * We only need to check pages that we did not clear above starting
+                 * from page_idx and wrapped around SECTION.
+                 */
+                page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
+                while (page_cnt) {
+                    if (page_idx == (SECTION_SIZE / PAGE_SIZE))
+                        page_idx = 0;
+                    if (l2_table[page_idx++])
+                        break;
+                    page_cnt--;
+                }
+                if (!page_cnt) {
+                    /* we can kill l1 entry */
+                    arm_mmu_unmap_l1_entry(l1_index);
+
+                    /* try to free l2 page itself */
+                    put_l2_table(l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
+                }
+                break;
+            }
+
+            default:
+                // XXX not implemented supersections or L2 tables
+                PANIC_UNIMPLEMENTED;
+        }
+    }
+    arm_after_invalidate_tlb_barrier();
+    return unmapped;
+}
+
+
+#endif // ARM_WITH_MMU
diff --git a/src/bsp/lk/arch/arm/arm/mp.c b/src/bsp/lk/arch/arm/arm/mp.c
new file mode 100644
index 0000000..cb1ae8c
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/mp.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/mp.h>
+
+#include <assert.h>
+#include <compiler.h>
+#include <trace.h>
+#include <err.h>
+#include <platform/interrupts.h>
+#include <arch/ops.h>
+
+#if WITH_DEV_INTERRUPT_ARM_GIC
+#include <dev/interrupt/arm_gic.h>
+#elif PLATFORM_BCM28XX
+/* bcm28xx has a weird custom interrupt controller for MP */
+extern void bcm2835_send_ipi(uint irq, uint cpu_mask);
+#else
+#error need other implementation of interrupt controller that can ipi
+#endif
+
+#define LOCAL_TRACE 0
+
+#define GIC_IPI_BASE (14)
+
+__WEAK status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi)
+{
+    LTRACEF("target 0x%x, ipi %u\n", target, ipi);
+
+#if WITH_DEV_INTERRUPT_ARM_GIC
+    uint gic_ipi_num = ipi + GIC_IPI_BASE;
+
+    /* filter out targets outside of the range of cpus we care about */
+    target &= ((1UL << SMP_MAX_CPUS) - 1);
+    if (target != 0) {
+        LTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
+        u_int flags = 0;
+#if WITH_LIB_SM
+        flags |= ARM_GIC_SGI_FLAG_NS;
+#endif
+        arm_gic_sgi(gic_ipi_num, flags, target);
+    }
+#elif PLATFORM_BCM2835
+    /* filter out targets outside of the range of cpus we care about */
+    target &= ((1UL << SMP_MAX_CPUS) - 1);
+    if (target != 0) {
+        bcm2835_send_ipi(ipi, target);
+    }
+#endif
+
+    return NO_ERROR;
+}
+
+enum handler_return arm_ipi_generic_handler(void *arg)
+{
+    LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
+
+    return INT_NO_RESCHEDULE;
+}
+
+enum handler_return arm_ipi_reschedule_handler(void *arg)
+{
+    LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
+
+    return mp_mbx_reschedule_irq();
+}
+
+__WEAK void arch_mp_init_percpu(void)
+{
+#if WITH_DEV_INTERRUPT_ARM_GIC
+    register_int_handler(MP_IPI_GENERIC + GIC_IPI_BASE, &arm_ipi_generic_handler, 0);
+    register_int_handler(MP_IPI_RESCHEDULE + GIC_IPI_BASE, &arm_ipi_reschedule_handler, 0);
+#endif
+}
+
diff --git a/src/bsp/lk/arch/arm/arm/ops.S b/src/bsp/lk/arch/arm/arm/ops.S
new file mode 100644
index 0000000..0eeb566
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/ops.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/arm/cores.h>
+
+.text
+
+/* void _arch_enable_ints(void); */
+FUNCTION(_arch_enable_ints)
+    mrs     r0, cpsr
+    bic     r0, r0, #(1<<7)     /* clear the I bit */
+    msr     cpsr_c, r0
+    bx      lr
+
+/* void _arch_disable_ints(void); */
+FUNCTION(_arch_disable_ints)
+    mrs     r0, cpsr
+    orr     r0, r0, #(1<<7)
+    msr     cpsr_c, r0
+    bx      lr
+
+/* int _atomic_swap(int *ptr, int val); */
+FUNCTION(_atomic_swap)
+.L_loop_swap:
+    ldrex   r12, [r0]
+    strex   r2, r1, [r0]
+    cmp     r2, #0
+    bne     .L_loop_swap
+
+    /* save old value */
+    mov     r0, r12
+    bx      lr
+
+/* int _atomic_add(int *ptr, int val); */
+FUNCTION(_atomic_add)
+    /* use load/store exclusive */
+.L_loop_add:
+    ldrex   r12, [r0]
+    add     r2, r12, r1
+    strex   r3, r2, [r0]
+    cmp     r3, #0
+    bne     .L_loop_add
+
+    /* save old value */
+    mov     r0, r12
+    bx      lr
+
+/* int _atomic_and(int *ptr, int val); */
+FUNCTION(_atomic_and)
+    /* use load/store exclusive */
+.L_loop_and:
+    ldrex   r12, [r0]
+    and     r2, r12, r1
+    strex   r3, r2, [r0]
+    cmp     r3, #0
+    bne     .L_loop_and
+
+    /* save old value */
+    mov     r0, r12
+    bx      lr
+
+/* int _atomic_or(int *ptr, int val); */
+FUNCTION(_atomic_or)
+    /* use load/store exclusive */
+.L_loop_or:
+    ldrex   r12, [r0]
+    orr     r2, r12, r1
+    strex   r3, r2, [r0]
+    cmp     r3, #0
+    bne     .L_loop_or
+
+    /* save old value */
+    mov     r0, r12
+    bx      lr
+
+FUNCTION(arch_spin_trylock)
+    mov     r2, r0
+    mov     r1, #1
+    ldrex   r0, [r2]
+    cmp     r0, #0
+    strexeq r0, r1, [r2]
+    dmb
+    bx      lr
+
+FUNCTION(arch_spin_lock)
+    mov     r1, #1
+1:
+    ldrex   r2, [r0]
+    cmp     r2, #0
+    wfene
+    strexeq r2, r1, [r0]
+    cmpeq   r2, #0
+    bne     1b
+    dmb
+    bx      lr
+
+FUNCTION(arch_spin_unlock)
+    mov     r1, #0
+    dmb
+    str     r1, [r0]
+    dsb
+    sev
+    bx      lr
+
+/* void arch_idle(); */
+FUNCTION(arch_idle)
+#if ARM_ARCH_LEVEL >= 7
+    wfi
+#elif ARM_ARCH_LEVEL == 6
+    mov     r0, #0
+    mcr     p15, 0, r0, c7, c0, #4
+#else
+#error unknown cpu
+#endif
+    bx      lr
+
+/* void arm_invalidate_tlb(void) */
+FUNCTION(arm_invalidate_tlb)
+    mov     r0, #0
+    mcr     p15, 0, r0, c8, c7, 0
+    bx      lr
+
diff --git a/src/bsp/lk/arch/arm/arm/start.S b/src/bsp/lk/arch/arm/arm/start.S
new file mode 100644
index 0000000..7670c59
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/start.S
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2008-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/arm/cores.h>
+#include <arch/arm/mmu.h>
+#include <arch/arch_ops.h>
+#include <kernel/vm.h>
+
+.section ".text.boot"
+.globl _start
+_start:
+    b   platform_reset
+    b   arm_undefined
+    b   arm_syscall
+    b   arm_prefetch_abort
+    b   arm_data_abort
+    b   arm_reserved
+    b   arm_irq
+    b   arm_fiq
+#if WITH_SMP
+    b   arm_reset
+#endif
+
+.weak platform_reset
+platform_reset:
+    /* Fall through for the weak symbol */
+
+.globl arm_reset
+arm_reset:
+    /* do some early cpu setup */
+    mrc     p15, 0, r12, c1, c0, 0
+    /* i/d cache disable, mmu disabled */
+    bic     r12, #(1<<12)
+    bic     r12, #(1<<2 | 1<<0)
+#if WITH_KERNEL_VM
+    /* enable caches so atomics and spinlocks work */
+    orr     r12, r12, #(1<<12)
+    orr     r12, r12, #(1<<2)
+#endif // WITH_KERNEL_VM
+    mcr     p15, 0, r12, c1, c0, 0
+
+    /* calculate the physical offset from our eventual virtual location */
+.Lphys_offset:
+    ldr     r4, =.Lphys_offset
+    adr     r11, .Lphys_offset
+    sub     r11, r11, r4
+
+#if WITH_SMP
+    /* figure out our cpu number */
+    mrc     p15, 0, r12, c0, c0, 5 /* read MPIDR */
+
+    /* mask off the bottom bits to test cluster number:cpu number */
+    ubfx    r12, r12, #0, #SMP_CPU_ID_BITS
+
+    /* if we're not cpu 0:0, fall into a trap and wait */
+    teq     r12, #0
+    movne   r0, r12
+    bne     arm_secondary_setup
+#endif // WITH_SMP
+
+#if WITH_CPU_EARLY_INIT
+    /* call platform/arch/etc specific init code */
+    bl      __cpu_early_init
+#endif // WITH_CPU_EARLY_INIT
+
+#if WITH_NO_PHYS_RELOCATION
+    /* assume that image is properly loaded in physical memory */
+#else
+    /* see if we need to relocate to our proper location in physical memory */
+    adr     r4, _start                           /* this emits sub r4, pc, #constant */
+    ldr     r5, =(MEMBASE + KERNEL_LOAD_OFFSET)  /* calculate the binary's physical load address */
+    subs    r12, r4, r5                          /* calculate the delta between where we're loaded and the proper spot */
+    beq     .Lrelocate_done
+
+    /* we need to relocate ourselves to the proper spot */
+    ldr     r6, =__data_end
+    ldr     r7, =(KERNEL_BASE - MEMBASE)
+    sub     r6, r7
+    add     r6, r12
+
+.Lrelocate_loop:
+    ldr     r7, [r4], #4
+    str     r7, [r5], #4
+    cmp     r4, r6
+    bne     .Lrelocate_loop
+
+    /* we're relocated, jump to the right address */
+    sub     pc, r12
+    nop     /* skipped in the add to pc */
+
+    /* recalculate the physical offset */
+    sub     r11, r11, r12
+
+.Lrelocate_done:
+#endif // !WITH_NO_PHYS_RELOCATION
+
+#if ARM_WITH_MMU
+.Lsetup_mmu:
+
+    /* set up the mmu according to mmu_initial_mappings */
+
+    /* load the base of the translation table and clear the table */
+    ldr     r4, =arm_kernel_translation_table
+    add     r4, r4, r11
+        /* r4 = physical address of translation table */
+
+    mov     r5, #0
+    mov     r6, #0
+
+    /* walk through all the entries in the translation table, setting them up */
+0:
+    str     r5, [r4, r6, lsl #2]
+    add     r6, #1
+    cmp     r6, #4096
+    bne     0b
+
+    /* load the address of the mmu_initial_mappings table and start processing */
+    ldr     r5, =mmu_initial_mappings
+    add     r5, r5, r11
+        /* r5 = physical address of mmu initial mapping table */
+
+.Linitial_mapping_loop:
+    ldmia   r5!, { r6-r10 }
+        /* r6 = phys, r7 = virt, r8 = size, r9 = flags, r10 = name */
+
+    /* round size up to 1MB alignment */
+    ubfx        r10, r6, #0, #20
+    add     r8, r8, r10
+    add     r8, r8, #(1 << 20)
+    sub     r8, r8, #1
+
+    /* mask all the addresses and sizes to 1MB boundaries */
+    lsr     r6, #20  /* r6 = physical address / 1MB */
+    lsr     r7, #20  /* r7 = virtual address / 1MB */
+    lsr     r8, #20  /* r8 = size in 1MB chunks */
+
+    /* if size == 0, end of list */
+    cmp     r8, #0
+    beq     .Linitial_mapping_done
+
+    /* set up the flags */
+    ldr     r10, =MMU_KERNEL_L1_PTE_FLAGS
+    teq     r9, #MMU_INITIAL_MAPPING_FLAG_UNCACHED
+    ldreq   r10, =MMU_INITIAL_MAP_STRONGLY_ORDERED
+    beq     0f
+    teq     r9, #MMU_INITIAL_MAPPING_FLAG_DEVICE
+    ldreq   r10, =MMU_INITIAL_MAP_DEVICE
+        /* r10 = mmu entry flags */
+
+0:
+    orr     r12, r10, r6, lsl #20
+        /* r12 = phys addr | flags */
+
+    /* store into appropriate translation table entry */
+    str     r12, [r4, r7, lsl #2]
+
+    /* loop until we're done */
+    add     r6, #1
+    add     r7, #1
+    subs    r8, #1
+    bne     0b
+
+    b       .Linitial_mapping_loop
+
+.Linitial_mapping_done:
+
+#if MMU_WITH_TRAMPOLINE
+    /* move arm_kernel_translation_table address to r8 and
+     * set cacheable attributes on translation walk
+     */
+    orr     r8, r4, #MMU_TTBRx_FLAGS
+
+    /* Prepare tt_trampoline page table */
+    /* Calculate pagetable physical addresses */
+    ldr     r4, =tt_trampoline  /* r4 = tt_trampoline vaddr */
+    add     r4, r4, r11     /* r4 = tt_trampoline paddr */
+
+    /* Zero tt_trampoline translation tables */
+    mov     r6, #0
+    mov     r7, #0
+1:
+    str     r7, [r4, r6, lsl#2]
+    add     r6, #1
+    cmp     r6, #0x1000
+    blt     1b
+
+    /* Setup 1M section mapping at
+     * phys  -> phys   and
+     * virt  -> phys
+     */
+    lsr     r6, pc, #20     /* r6 = paddr index */
+    ldr     r7, =MMU_KERNEL_L1_PTE_FLAGS
+    add     r7, r7, r6, lsl #20 /* r7 = pt entry */
+
+    str     r7, [r4, r6, lsl #2]    /* tt_trampoline[paddr index] = pt entry */
+
+    rsb     r6, r11, r6, lsl #20    /* r6 = vaddr */
+    str     r7, [r4, r6, lsr #(20 - 2)] /* tt_trampoline[vaddr index] = pt entry */
+#endif // MMU_WITH_TRAMPOLINE
+
+    /* set up the mmu */
+    bl      .Lmmu_setup
+#endif // WITH_KERNEL_VM
+
+    /* at this point we're running at our final location in virtual memory (if enabled) */
+.Lstack_setup:
+    /* set up the stack for irq, fiq, abort, undefined, system/user, and lastly supervisor mode */
+    mov     r12, #0
+
+    cpsid   i,#0x12       /* irq */
+    mov     sp, r12
+
+    cpsid   i,#0x11       /* fiq */
+    mov     sp, r12
+
+    cpsid   i,#0x17       /* abort */
+    mov     sp, r12
+
+    cpsid   i,#0x1b       /* undefined */
+    mov     sp, r12
+
+    cpsid   i,#0x1f       /* system */
+    mov     sp, r12
+
+    cpsid   i,#0x13       /* supervisor */
+    ldr     r12, =abort_stack
+    add     r12, #ARCH_DEFAULT_STACK_SIZE
+    mov     sp, r12
+
+    /* stay in supervisor mode from now on out */
+
+    /* copy the initialized data segment out of rom if necessary */
+    ldr     r4, =__data_start_rom
+    ldr     r5, =__data_start
+    ldr     r6, =__data_end
+
+    cmp     r4, r5
+    beq     .L__do_bss
+
+.L__copy_loop:
+    cmp     r5, r6
+    ldrlt   r7, [r4], #4
+    strlt   r7, [r5], #4
+    blt     .L__copy_loop
+
+.L__do_bss:
+    /* clear out the bss */
+    ldr     r4, =__bss_start
+    ldr     r5, =__bss_end
+    mov     r6, #0
+.L__bss_loop:
+    cmp     r4, r5
+    strlt   r6, [r4], #4
+    blt     .L__bss_loop
+
+    bl      lk_main
+    b       .
+
+#if WITH_KERNEL_VM
+    /* per cpu mmu setup, shared between primary and secondary cpus
+       args:
+       r4 == translation table physical
+       r8 == final translation table physical (if using trampoline)
+    */
+.Lmmu_setup:
+    /* Invalidate TLB */
+    mov     r12, #0
+    mcr     p15, 0, r12, c8, c7, 0
+    isb
+
+    /* Write 0 to TTBCR */
+    mcr     p15, 0, r12, c2, c0, 2
+    isb
+
+    /* Set cacheable attributes on translation walk */
+    orr     r12, r4, #MMU_TTBRx_FLAGS
+
+    /* Write ttbr with phys addr of the translation table */
+    mcr     p15, 0, r12, c2, c0, 0
+    isb
+
+    /* Write DACR */
+    mov     r12, #0x1
+    mcr     p15, 0, r12, c3, c0, 0
+    isb
+
+    /* Read SCTLR into r12 */
+    mrc     p15, 0, r12, c1, c0, 0
+
+    /* Disable TRE/AFE */
+    bic     r12, #(1<<29 | 1<<28)
+
+    /* Turn on the MMU */
+    orr     r12, #0x1
+
+    /* Write back SCTLR */
+    mcr     p15, 0, r12, c1, c0, 0
+    isb
+
+    /* Jump to virtual code address */
+    ldr     pc, =1f
+1:
+
+#if MMU_WITH_TRAMPOLINE
+    /* Switch to main page table */
+    mcr     p15, 0, r8, c2, c0, 0
+    isb
+#endif
+
+    /* Invalidate TLB */
+    mov     r12, #0
+    mcr     p15, 0, r12, c8, c7, 0
+    isb
+
+    /* assume lr was in physical memory, adjust it before returning */
+    sub     lr, r11
+    bx      lr
+#endif
+
+#if WITH_SMP
+    /* secondary cpu entry point */
+    /* r0 holds cpu number */
+    /* r11 hold phys offset */
+FUNCTION(arm_secondary_setup)
+    /* all other cpus, trap and wait to be released */
+1:
+    wfe
+    ldr     r12, =arm_boot_cpu_lock
+    add     r12, r12, r11
+    ldr     r12, [r12]
+    cmp     r12, #0
+    bne     1b
+
+    and     r1, r0, #0xff
+    cmp     r1, #(1 << SMP_CPU_CLUSTER_SHIFT)
+    bge     unsupported_cpu_trap
+    bic     r0, r0, #0xff
+    orr     r0, r1, r0, LSR #(8 - SMP_CPU_CLUSTER_SHIFT)
+    ldr     r1, =linear_cpuid_map
+    add     r1, r1, r11
+    ldr     r1, [r1]
+    cbz     r1, .Lno_cpuid_remap
+    add     r1, r1, r0
+    ldrb    r0, [r1]
+
+.Lno_cpuid_remap:
+    cmp     r0, #SMP_MAX_CPUS
+    bge     unsupported_cpu_trap
+    mov     r5, r0 /* save cpu num */
+
+    /* set up the stack for irq, fiq, abort, undefined, system/user, and lastly supervisor mode */
+    mov     r1, #0
+    cpsid   i,#0x12       /* irq */
+    mov     sp, r1
+
+    cpsid   i,#0x11       /* fiq */
+    mov     sp, r1
+
+    cpsid   i,#0x17       /* abort */
+    mov     sp, r1
+
+    cpsid   i,#0x1b       /* undefined */
+    mov     sp, r1
+
+    cpsid   i,#0x1f       /* system */
+    mov     sp, r1
+
+    cpsid   i,#0x13       /* supervisor */
+    ldr     r1, =abort_stack
+    mov     r2, #ARCH_DEFAULT_STACK_SIZE
+    add     r0, #1
+    mul     r2, r2, r0
+    add     r1, r2
+
+    mov     sp, r1
+
+#if WITH_KERNEL_VM
+    /* load the physical base of the translation table and clear the table */
+    ldr     r4, =arm_kernel_translation_table
+    add     r4, r4, r11
+
+#if MMU_WITH_TRAMPOLINE
+    /* move arm_kernel_translation_table address to r8 and
+     * set cacheable attributes on translation walk
+     */
+    orr     r8, r4, #MMU_TTBRx_FLAGS
+
+    /* Prepare tt_trampoline page table */
+    /* Calculate pagetable physical addresses */
+    ldr     r4, =tt_trampoline  /* r4 = tt_trampoline vaddr */
+    add     r4, r4, r11     /* r4 = tt_trampoline paddr */
+#endif
+
+    /* set up the mmu on this cpu and switch to virtual memory */
+    bl      .Lmmu_setup
+#endif
+
+    /* stay in supervisor and call into arm arch code to continue setup */
+    mov     r0, r5
+    bl      arm_secondary_entry
+
+    /* cpus above the number we claim to support get trapped here */
+unsupported_cpu_trap:
+    wfe
+    b       unsupported_cpu_trap
+#endif
+
+.ltorg
+
+#if WITH_KERNEL_VM && MMU_WITH_TRAMPOLINE
+.section ".bss.prebss.translation_table"
+.align 14
+DATA(tt_trampoline)
+    .skip 16384
+#endif
+
+.data
+.align 2
diff --git a/src/bsp/lk/arch/arm/arm/thread.c b/src/bsp/lk/arch/arm/arm/thread.c
new file mode 100644
index 0000000..200de77
--- /dev/null
+++ b/src/bsp/lk/arch/arm/arm/thread.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <trace.h>
+#include <kernel/thread.h>
+#include <arch/arm.h>
+
+struct context_switch_frame {
+    vaddr_t r4;
+    vaddr_t r5;
+    vaddr_t r6;
+    vaddr_t r7;
+    vaddr_t r8;
+    vaddr_t r9;
+    vaddr_t r10;
+    vaddr_t r11;
+    vaddr_t lr;
+};
+
+extern void arm_context_switch(addr_t *old_sp, addr_t new_sp);
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+//  dprintf("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
+//  dump_thread(current_thread);
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    thread_t *ct = get_current_thread();
+    ret = ct->entry(ct->arg);
+
+//  dprintf("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    // create a default stack frame on the stack
+    vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
+
+    // make sure the top of the stack is 8 byte aligned for EABI compliance
+    stack_top = ROUNDDOWN(stack_top, 8);
+
+    struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
+    frame--;
+
+    // fill it in
+    memset(frame, 0, sizeof(*frame));
+    frame->lr = (vaddr_t)&initial_thread_func;
+
+    // set the stack pointer
+    t->arch.sp = (vaddr_t)frame;
+
+#if ARM_WITH_VFP
+    arm_fpu_thread_initialize(t);
+#endif
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+//  TRACEF("arch_context_switch: cpu %u old %p (%s), new %p (%s)\n", arch_curr_cpu_num(), oldthread, oldthread->name, newthread, newthread->name);
+#if ARM_WITH_VFP
+    arm_fpu_thread_swap(oldthread, newthread);
+#endif
+
+    arm_context_switch(&oldthread->arch.sp, newthread->arch.sp);
+
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
+    }
+}
+
diff --git a/src/bsp/lk/arch/arm/include/arch/arch_ops.h b/src/bsp/lk/arch/arm/include/arch/arch_ops.h
new file mode 100644
index 0000000..c513532
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/arch_ops.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifndef ASSEMBLY
+
+#include <stdbool.h>
+#include <compiler.h>
+#include <reg.h>
+#include <arch/arm.h>
+
+__BEGIN_CDECLS;
+
+#if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__)
+#define USE_GCC_ATOMICS 0
+#define ENABLE_CYCLE_COUNTER 1
+
+// override of some routines
+static inline void arch_enable_ints(void)
+{
+    CF;
+    __asm__ volatile("cpsie i");
+}
+
+static inline void arch_disable_ints(void)
+{
+    __asm__ volatile("cpsid i");
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    unsigned int state;
+
+#if ARM_ISA_ARMV7M
+    __asm__ volatile("mrs %0, primask" : "=r"(state));
+    state &= 0x1;
+#else
+    __asm__ volatile("mrs %0, cpsr" : "=r"(state));
+    state &= (1<<7);
+#endif
+
+    return !!state;
+}
+
+static inline void arch_enable_fiqs(void)
+{
+    CF;
+    __asm__ volatile("cpsie f");
+}
+
+static inline void arch_disable_fiqs(void)
+{
+    __asm__ volatile("cpsid f");
+    CF;
+}
+
+static inline bool arch_fiqs_disabled(void)
+{
+    unsigned int state;
+
+    __asm__ volatile("mrs %0, cpsr" : "=r"(state));
+    state &= (1<<6);
+
+    return !!state;
+}
+
+static inline bool arch_in_int_handler(void)
+{
+    /* set by the interrupt glue to track that the cpu is inside a handler */
+    extern bool __arm_in_handler;
+
+    return __arm_in_handler;
+}
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex	%[old], [%[ptr]]\n"
+            "adds	%[temp], %[old], %[val]\n"
+            "strex	%[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex	%[old], [%[ptr]]\n"
+            "orrs	%[temp], %[old], %[val]\n"
+            "strex	%[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_and(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex	%[old], [%[ptr]]\n"
+            "ands	%[temp], %[old], %[val]\n"
+            "strex	%[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex	%[old], [%[ptr]]\n"
+            "strex	%[test], %[val], [%[ptr]]\n"
+            : [old]"=&r" (old), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
+{
+    int old;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex	%[old], [%[ptr]]\n"
+            "mov	%[test], #0\n"
+            "teq	%[old], %[oldval]\n"
+#if (ARM_ISA_ARMV7M || __thumb__)
+            "bne	0f\n"
+            "strex	%[test], %[newval], [%[ptr]]\n"
+            "0:\n"
+#else
+            "strexeq %[test], %[newval], [%[ptr]]\n"
+#endif
+            : [old]"=&r" (old), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [oldval]"Ir" (oldval), [newval]"r" (newval)
+            : "cc");
+
+    } while (test != 0);
+
+    return old;
+}
+
+static inline uint32_t arch_cycle_count(void)
+{
+#if ARM_ISA_ARMV7M
+#if ENABLE_CYCLE_COUNTER
+#define DWT_CYCCNT (0xE0001004)
+    return *REG32(DWT_CYCCNT);
+#else
+    return 0;
+#endif
+#elif ARM_ISA_ARMV7
+    uint32_t count;
+    __asm__ volatile("mrc		p15, 0, %0, c9, c13, 0"
+                     : "=r" (count)
+                    );
+    return count;
+#else
+//#warning no arch_cycle_count implementation
+    return 0;
+#endif
+}
+
+#if WITH_SMP && ARM_ISA_ARMV7
+extern const uint8_t *linear_cpuid_map;
+
+static inline uint arch_curr_cpu_num(void)
+{
+    uint32_t mpidr = arm_read_mpidr();
+    mpidr = ((mpidr & ((1U << SMP_CPU_ID_BITS) - 1)) >> 8 << SMP_CPU_CLUSTER_SHIFT) | (mpidr & 0xff);
+    return linear_cpuid_map ? *(linear_cpuid_map + mpidr) : mpidr;
+}
+#else
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+#endif
+
+/* defined in kernel/thread.h */
+
+#if !ARM_ISA_ARMV7M
+/* use the cpu local thread context pointer to store current_thread */
+static inline struct thread *get_current_thread(void)
+{
+    return (struct thread *)arm_read_tpidrprw();
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    arm_write_tpidrprw((uint32_t)t);
+}
+#else // ARM_ISA_ARM7M
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+#endif // !ARM_ISA_ARMV7M
+
+#elif ARM_ISA_ARMV6M // cortex-m0 cortex-m0+
+
+
+static inline void arch_enable_fiqs(void)
+{
+    CF;
+    __asm__ volatile("cpsie f");
+}
+
+static inline void arch_disable_fiqs(void)
+{
+    __asm__ volatile("cpsid f");
+    CF;
+}
+
+static inline bool arch_fiqs_disabled(void)
+{
+    unsigned int state;
+
+    __asm__ volatile("mrs %0, cpsr" : "=r"(state));
+    state &= (1<<6);
+
+    return !!state;
+}
+
+
+
+static inline void arch_enable_ints(void)
+{
+    CF;
+    __asm__ volatile("cpsie i");
+}
+static inline void arch_disable_ints(void)
+{
+    __asm__ volatile("cpsid i");
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    unsigned int state;
+
+    __asm__ volatile("mrs %0, primask" : "=r"(state));
+    state &= 0x1;
+    return !!state;
+}
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    int temp;
+    bool state;
+
+    state = arch_ints_disabled();
+    arch_disable_ints();
+    temp = *ptr;
+    *ptr = temp + val;
+    if (!state)
+        arch_enable_ints();
+    return temp;
+}
+
+static inline  int atomic_and(volatile int *ptr, int val)
+{
+    int temp;
+    bool state;
+
+    state = arch_ints_disabled();
+    arch_disable_ints();
+    temp = *ptr;
+    *ptr = temp & val;
+    if (!state)
+        arch_enable_ints();
+    return temp;
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+    int temp;
+    bool state;
+
+    state = arch_ints_disabled();
+    arch_disable_ints();
+    temp = *ptr;
+    *ptr = temp | val;
+    if (!state)
+        arch_enable_ints();
+    return temp;
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    int temp;
+    bool state;
+
+    state = arch_ints_disabled();
+    arch_disable_ints();
+    temp = *ptr;
+    *ptr = val;
+    if (!state)
+        arch_enable_ints();
+    return temp;
+}
+
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
+{
+    int temp;
+    bool state;
+
+    state = arch_ints_disabled();
+    arch_disable_ints();
+    temp = *ptr;
+    if (temp == oldval) {
+        *ptr = newval;
+    }
+    if (!state)
+        arch_enable_ints();
+    return temp;
+}
+
+static inline uint32_t arch_cycle_count(void)
+{
+    return 0;
+}
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+#else // pre-armv6 || (armv6 & thumb)
+
+/* for pre-armv6 the bodies of these are too big to inline, call an assembly stub version */
+void _arch_enable_ints(void);
+void _arch_disable_ints(void);
+
+int _atomic_add(volatile int *ptr, int val);
+int _atomic_and(volatile int *ptr, int val);
+int _atomic_or(volatile int *ptr, int val);
+int _atomic_add(volatile int *ptr, int val);
+int _atomic_swap(volatile int *ptr, int val);
+int _atomic_cmpxchg(volatile int *ptr, int oldval, int newval);
+
+uint32_t _arch_cycle_count(void);
+
+static inline int atomic_add(volatile int *ptr, int val) { return _atomic_add(ptr, val); }
+static inline int atomic_and(volatile int *ptr, int val) { return _atomic_and(ptr, val); }
+static inline int atomic_or(volatile int *ptr, int val) { return _atomic_or(ptr, val); }
+static inline int atomic_swap(volatile int *ptr, int val) { return _atomic_swap(ptr, val); }
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) { return _atomic_cmpxchg(ptr, oldval, newval); }
+
+static inline void arch_enable_ints(void) { _arch_enable_ints(); }
+static inline void arch_disable_ints(void) { _arch_disable_ints(); }
+
+static inline uint32_t arch_cycle_count(void) { return _arch_cycle_count(); }
+
+#endif
+
+#define mb()        DSB
+#define wmb()       DSB
+#define rmb()       DSB
+
+#ifdef WITH_SMP
+#define smp_mb()    DMB
+#define smp_wmb()   DMB
+#define smp_rmb()   DMB
+#else
+#define smp_mb()    CF
+#define smp_wmb()   CF
+#define smp_rmb()   CF
+#endif
+
+__END_CDECLS;
+
+#endif // ASSEMBLY
diff --git a/src/bsp/lk/arch/arm/include/arch/arm.h b/src/bsp/lk/arch/arm/include/arch/arm.h
new file mode 100644
index 0000000..3368e9b
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/arm.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2008-2013 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_ARM_H
+#define __ARCH_ARM_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+#include <arch/arm/cores.h>
+#include <compiler.h>
+
+/* due to the cp15 accessors below, you're gonna have a bad time if you try
+ * to compile in thumb mode. Either compile in ARM only or get a thumb2 capable cpu.
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#error this file unsupported in thumb1 mode
+#endif
+*/
+__BEGIN_CDECLS
+
+#if ARM_ISA_ARMV7
+#define DSB __asm__ volatile("dsb" ::: "memory")
+#define DMB __asm__ volatile("dmb" ::: "memory")
+#define ISB __asm__ volatile("isb" ::: "memory")
+#elif ARM_ISA_ARMV6 || ARM_ISA_ARMV6M
+#define DSB __asm__ volatile("mcr p15, 0, %0, c7, c10, 4" :: "r" (0) : "memory")
+#define ISB __asm__ volatile("mcr p15, 0, %0, c7, c5, 4" :: "r" (0) : "memory")
+#define DMB __asm__ volatile("nop")
+#else
+#error unhandled arm isa
+#endif
+#define NOP __asm__ volatile("nop");
+
+void arm_context_switch(vaddr_t *old_sp, vaddr_t new_sp);
+
+void arm_chain_load(paddr_t entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) __NO_RETURN;
+
+static inline uint32_t read_cpsr(void)
+{
+    uint32_t cpsr;
+
+    __asm__ volatile("mrs   %0, cpsr" : "=r" (cpsr));
+    return cpsr;
+}
+
+#define CPSR_MODE_MASK 0x1f
+#define CPSR_MODE_USR 0x10
+#define CPSR_MODE_FIQ 0x11
+#define CPSR_MODE_IRQ 0x12
+#define CPSR_MODE_SVC 0x13
+#define CPSR_MODE_MON 0x16
+#define CPSR_MODE_ABT 0x17
+#define CPSR_MODE_UND 0x1b
+#define CPSR_MODE_SYS 0x1f
+#define CPSR_THUMB    (1<<5)
+#define CPSR_FIQ_MASK (1<<6)
+#define CPSR_IRQ_MASK (1<<7)
+#define CPSR_ABORT    (1<<8)
+#define CPSR_ENDIAN   (1<<9)
+
+struct arm_iframe {
+#if ARM_WITH_VFP
+    uint32_t fpexc;
+#endif
+    uint32_t usp;
+    uint32_t ulr;
+    uint32_t r0;
+    uint32_t r1;
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r12;
+    uint32_t lr;
+    uint32_t pc;
+    uint32_t spsr;
+};
+
+struct arm_fault_frame {
+#if ARM_WITH_VFP
+    uint32_t fpexc;
+#endif
+    uint32_t usp;
+    uint32_t ulr;
+    uint32_t r[13];
+    uint32_t lr;
+    uint32_t pc;
+    uint32_t spsr;
+};
+
+struct arm_mode_regs {
+    uint32_t usr_r13, usr_r14;
+    uint32_t fiq_r13, fiq_r14;
+    uint32_t irq_r13, irq_r14;
+    uint32_t svc_r13, svc_r14;
+    uint32_t abt_r13, abt_r14;
+    uint32_t und_r13, und_r14;
+    uint32_t sys_r13, sys_r14;
+};
+
+void arm_save_mode_regs(struct arm_mode_regs *regs);
+
+#define GEN_CP_REG_FUNCS(cp, reg, op1, c1, c2, op2) \
+static inline __ALWAYS_INLINE uint32_t arm_read_##reg(void) { \
+    uint32_t val; \
+    __asm__ volatile("mrc " #cp ", " #op1 ", %0, " #c1 ","  #c2 "," #op2 : "=r" (val)); \
+    return val; \
+} \
+\
+static inline __ALWAYS_INLINE uint32_t arm_read_##reg##_relaxed(void) { \
+    uint32_t val; \
+    __asm__("mrc " #cp ", " #op1 ", %0, " #c1 ","  #c2 "," #op2 : "=r" (val)); \
+    return val; \
+} \
+\
+static inline __ALWAYS_INLINE void arm_write_##reg(uint32_t val) { \
+    __asm__ volatile("mcr " #cp ", " #op1 ", %0, " #c1 ","  #c2 "," #op2 :: "r" (val)); \
+    ISB; \
+} \
+\
+static inline __ALWAYS_INLINE void arm_write_##reg##_relaxed(uint32_t val) { \
+    __asm__ volatile("mcr " #cp ", " #op1 ", %0, " #c1 ","  #c2 "," #op2 :: "r" (val)); \
+}
+
+#define GEN_CP15_REG_FUNCS(reg, op1, c1, c2, op2) \
+    GEN_CP_REG_FUNCS(p15, reg, op1, c1, c2, op2)
+
+#define GEN_CP14_REG_FUNCS(reg, op1, c1, c2, op2) \
+    GEN_CP_REG_FUNCS(p14, reg, op1, c1, c2, op2)
+
+/* armv6+ control regs */
+GEN_CP15_REG_FUNCS(sctlr, 0, c1, c0, 0);
+GEN_CP15_REG_FUNCS(actlr, 0, c1, c0, 1);
+GEN_CP15_REG_FUNCS(cpacr, 0, c1, c0, 2);
+
+GEN_CP15_REG_FUNCS(ttbr, 0, c2, c0, 0);
+GEN_CP15_REG_FUNCS(ttbr0, 0, c2, c0, 0);
+GEN_CP15_REG_FUNCS(ttbr1, 0, c2, c0, 1);
+GEN_CP15_REG_FUNCS(ttbcr, 0, c2, c0, 2);
+GEN_CP15_REG_FUNCS(dacr, 0, c3, c0, 0);
+GEN_CP15_REG_FUNCS(dfsr, 0, c5, c0, 0);
+GEN_CP15_REG_FUNCS(ifsr, 0, c5, c0, 1);
+GEN_CP15_REG_FUNCS(dfar, 0, c6, c0, 0);
+GEN_CP15_REG_FUNCS(wfar, 0, c6, c0, 1);
+GEN_CP15_REG_FUNCS(ifar, 0, c6, c0, 2);
+
+GEN_CP15_REG_FUNCS(fcseidr, 0, c13, c0, 0);
+GEN_CP15_REG_FUNCS(contextidr, 0, c13, c0, 1);
+GEN_CP15_REG_FUNCS(tpidrurw, 0, c13, c0, 2);
+GEN_CP15_REG_FUNCS(tpidruro, 0, c13, c0, 3);
+GEN_CP15_REG_FUNCS(tpidrprw, 0, c13, c0, 4);
+
+/* armv7+ */
+GEN_CP15_REG_FUNCS(midr, 0, c0, c0, 0);
+GEN_CP15_REG_FUNCS(mpidr, 0, c0, c0, 5);
+GEN_CP15_REG_FUNCS(vbar, 0, c12, c0, 0);
+GEN_CP15_REG_FUNCS(cbar, 4, c15, c0, 0);
+
+GEN_CP15_REG_FUNCS(ats1cpr, 0, c7, c8, 0);
+GEN_CP15_REG_FUNCS(ats1cpw, 0, c7, c8, 1);
+GEN_CP15_REG_FUNCS(ats1cur, 0, c7, c8, 2);
+GEN_CP15_REG_FUNCS(ats1cuw, 0, c7, c8, 3);
+GEN_CP15_REG_FUNCS(ats12nsopr, 0, c7, c8, 4);
+GEN_CP15_REG_FUNCS(ats12nsopw, 0, c7, c8, 5);
+GEN_CP15_REG_FUNCS(ats12nsour, 0, c7, c8, 6);
+GEN_CP15_REG_FUNCS(ats12nsouw, 0, c7, c8, 7);
+GEN_CP15_REG_FUNCS(par, 0, c7, c4, 0);
+
+/* Branch predictor invalidate */
+GEN_CP15_REG_FUNCS(bpiall, 0, c7, c5, 6);
+GEN_CP15_REG_FUNCS(bpimva, 0, c7, c5, 7);
+GEN_CP15_REG_FUNCS(bpiallis, 0, c7, c1, 6);
+
+/* tlb registers */
+GEN_CP15_REG_FUNCS(tlbiallis, 0, c8, c3, 0);
+GEN_CP15_REG_FUNCS(tlbimvais, 0, c8, c3, 1);
+GEN_CP15_REG_FUNCS(tlbiasidis, 0, c8, c3, 2);
+GEN_CP15_REG_FUNCS(tlbimvaais, 0, c8, c3, 3);
+GEN_CP15_REG_FUNCS(itlbiall, 0, c8, c5, 0);
+GEN_CP15_REG_FUNCS(itlbimva, 0, c8, c5, 1);
+GEN_CP15_REG_FUNCS(itlbiasid, 0, c8, c5, 2);
+GEN_CP15_REG_FUNCS(dtlbiall, 0, c8, c6, 0);
+GEN_CP15_REG_FUNCS(dtlbimva, 0, c8, c6, 1);
+GEN_CP15_REG_FUNCS(dtlbiasid, 0, c8, c6, 2);
+GEN_CP15_REG_FUNCS(tlbiall, 0, c8, c7, 0);
+GEN_CP15_REG_FUNCS(tlbimva, 0, c8, c7, 1);
+GEN_CP15_REG_FUNCS(tlbiasid, 0, c8, c7, 2);
+GEN_CP15_REG_FUNCS(tlbimvaa, 0, c8, c7, 3);
+
+GEN_CP15_REG_FUNCS(l2ctlr, 1, c9, c0, 2);
+GEN_CP15_REG_FUNCS(l2ectlr, 1, c9, c0, 3);
+
+/* debug registers */
+GEN_CP14_REG_FUNCS(dbddidr, 0, c0, c0, 0);
+GEN_CP14_REG_FUNCS(dbgdrar, 0, c1, c0, 0);
+GEN_CP14_REG_FUNCS(dbgdsar, 0, c2, c0, 0);
+GEN_CP14_REG_FUNCS(dbgdscr, 0, c0, c1, 0);
+GEN_CP14_REG_FUNCS(dbgdtrtxint, 0, c0, c5, 0);
+GEN_CP14_REG_FUNCS(dbgdtrrxint, 0, c0, c5, 0); /* alias to previous */
+GEN_CP14_REG_FUNCS(dbgwfar, 0, c0, c6, 0);
+GEN_CP14_REG_FUNCS(dbgvcr, 0, c0, c7, 0);
+GEN_CP14_REG_FUNCS(dbgecr, 0, c0, c9, 0);
+GEN_CP14_REG_FUNCS(dbgdsccr, 0, c0, c10, 0);
+GEN_CP14_REG_FUNCS(dbgdsmcr, 0, c0, c11, 0);
+GEN_CP14_REG_FUNCS(dbgdtrrxext, 0, c0, c0, 2);
+GEN_CP14_REG_FUNCS(dbgdscrext, 0, c0, c2, 2);
+GEN_CP14_REG_FUNCS(dbgdtrtxext, 0, c0, c3, 2);
+GEN_CP14_REG_FUNCS(dbgdrcr, 0, c0, c4, 2);
+GEN_CP14_REG_FUNCS(dbgvr0, 0, c0, c0, 4);
+GEN_CP14_REG_FUNCS(dbgvr1, 0, c0, c1, 4);
+GEN_CP14_REG_FUNCS(dbgvr2, 0, c0, c2, 4);
+GEN_CP14_REG_FUNCS(dbgbcr0, 0, c0, c0, 5);
+GEN_CP14_REG_FUNCS(dbgbcr1, 0, c0, c1, 5);
+GEN_CP14_REG_FUNCS(dbgbcr2, 0, c0, c2, 5);
+GEN_CP14_REG_FUNCS(dbgwvr0, 0, c0, c0, 6);
+GEN_CP14_REG_FUNCS(dbgwvr1, 0, c0, c1, 6);
+GEN_CP14_REG_FUNCS(dbgwcr0, 0, c0, c0, 7);
+GEN_CP14_REG_FUNCS(dbgwcr1, 0, c0, c1, 7);
+GEN_CP14_REG_FUNCS(dbgoslar, 0, c1, c0, 4);
+GEN_CP14_REG_FUNCS(dbgoslsr, 0, c1, c1, 4);
+GEN_CP14_REG_FUNCS(dbgossrr, 0, c1, c2, 4);
+GEN_CP14_REG_FUNCS(dbgprcr, 0, c1, c4, 4);
+GEN_CP14_REG_FUNCS(dbgprsr, 0, c1, c5, 4);
+GEN_CP14_REG_FUNCS(dbgclaimset, 0, c7, c8, 6);
+GEN_CP14_REG_FUNCS(dbgclaimclr, 0, c7, c9, 6);
+GEN_CP14_REG_FUNCS(dbgauthstatus, 0, c7, c14, 6);
+GEN_CP14_REG_FUNCS(dbgdevid, 0, c7, c2, 7);
+
+/* fpu */
+void arm_fpu_set_enable(bool enable);
+#if ARM_WITH_VFP
+void arm_fpu_undefined_instruction(struct arm_iframe *frame);
+struct thread;
+void arm_fpu_thread_initialize(struct thread *t);
+void arm_fpu_thread_swap(struct thread *oldthread, struct thread *newthread);
+#endif
+
+__END_CDECLS
+
+#endif
diff --git a/src/bsp/lk/arch/arm/include/arch/arm/cores.h b/src/bsp/lk/arch/arm/include/arch/arm/cores.h
new file mode 100644
index 0000000..4d751da
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/arm/cores.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008-2012 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARM_CORES_H
+#define __ARM_CORES_H
+
+/*
+ * make the gcc built in define a little easier to deal with
+ * to decide what core it is generating code for
+ *
+ * ARM_ARCH_LEVEL gets assigned a numeric value of the general family
+ *
+ * ARM_ARCH_* gets defined for each feature recursively
+ */
+
+/* echo | gcc -E -dM - to dump builtin defines */
+
+#if defined(__ARM_ARCH_7EM__)
+#define ARM_ARCH_7EM 1
+#endif
+#if defined(__ARM_ARCH_7M__) || defined(ARM_ARCH_7EM)
+#define ARM_ARCH_7M 1
+#endif
+#if defined(__ARM_ARCH_7R__)
+#define ARM_ARCH_7R 1
+#endif
+#if defined(__ARM_ARCH_7A__) || defined(ARM_ARCH_7R)
+#define ARM_ARCH_7A 1
+#endif
+#if defined(__ARM_ARCH_7__) || defined(ARM_ARCH_7A) || defined(ARM_ARCH_7M)
+#define ARM_ARCH_7 1
+#ifndef ARM_ARCH_LEVEL
+#define ARM_ARCH_LEVEL 7
+#endif
+#endif
+
+#if defined(__ARM_ARCH_6M__)
+#define ARM_ARCH_6M 1
+#endif
+#if defined(__ARM_ARCH_6T2__) || defined(ARM_ARCH_7)
+#define ARM_ARCH_6T2 1
+#endif
+#if defined(__ARM_ARCH_6ZK__)
+#define ARM_ARCH_6ZK 1
+#endif
+#if defined(__ARM_ARCH_6Z__) || defined(ARM_ARCH_6ZK)
+#define ARM_ARCH_6Z 1
+#endif
+#if defined(__ARM_ARCH_6K__) || defined(ARM_ARCH_6ZK) || defined(ARM_ARCH_7)
+#define ARM_ARCH_6K 1
+#endif
+#if defined(__ARM_ARCH_6J__)
+#define ARM_ARCH_6J 1
+#endif
+#if defined(__ARM_ARCH_6__) || defined(ARM_ARCH_6J) || defined(ARM_ARCH_6K) || defined(ARM_ARCH_6Z) || defined(ARM_ARCH_6T2) || defined(ARM_ARCH_6M)
+#define ARM_ARCH_6 1
+#ifndef ARM_ARCH_LEVEL
+#define ARM_ARCH_LEVEL 6
+#endif
+#endif
+
+#if defined(__ARM_ARCH_5TEJ__)
+#define ARM_ARCH_5TEJ 1
+#endif
+#if defined(__ARM_ARCH_5TE__) || defined(ARM_ARCH_5TEJ) || defined(ARM_ARCH_6)
+#define ARM_ARCH_5TE 1
+#endif
+#if defined(__ARM_ARCH_5E__) || defined(ARM_ARCH_5TE)
+#define ARM_ARCH_5E 1
+#endif
+#if defined(__ARM_ARCH_5T__) || defined(ARM_ARCH_5TE)
+#define ARM_ARCH_5T 1
+#endif
+#if defined(__ARM_ARCH_5__) || defined(ARM_ARCH_5E) || defined(ARM_ARCH_5T)
+#define ARM_ARCH_5 1
+#ifndef ARM_ARCH_LEVEL
+#define ARM_ARCH_LEVEL 5
+#endif
+#endif
+
+#if defined(__ARM_ARCH_4T__) || defined(ARM_ARCH_5T)
+#define ARM_ARCH_4T 1
+#endif
+#if defined(__ARM_ARCH_4__) || defined(ARM_ARCH_4T) || defined(ARM_ARCH_5)
+#define ARM_ARCH_4 1
+#ifndef ARM_ARCH_LEVEL
+#define ARM_ARCH_LEVEL 4
+#endif
+#endif
+
+#if 0
+/* test */
+#if ARM_ARCH_LEVEL >= 7
+#warning ARM_ARCH_LEVEL >= 7
+#endif
+#if ARM_ARCH_LEVEL >= 6
+#warning ARM_ARCH_LEVEL >= 6
+#endif
+#if ARM_ARCH_LEVEL >= 5
+#warning ARM_ARCH_LEVEL >= 5
+#endif
+#if ARM_ARCH_LEVEL >= 4
+#warning ARM_ARCH_LEVEL >= 4
+#endif
+#endif
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/include/arch/arm/dcc.h b/src/bsp/lk/arch/arm/include/arch/arm/dcc.h
new file mode 100644
index 0000000..1ac2605
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/arm/dcc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include <sys/types.h>
+#include <stdint.h>
+
+/* dcc */
+typedef void (*dcc_rx_callback_t)(uint32_t val);
+
+status_t arm_dcc_enable(dcc_rx_callback_t rx_callback);
+
+bool arm_dcc_read_available(void);
+ssize_t arm_dcc_read(uint32_t *buf, size_t len, lk_time_t timeout);
+ssize_t arm_dcc_write(const uint32_t *buf, size_t len, lk_time_t timeout);
+
diff --git a/src/bsp/lk/arch/arm/include/arch/arm/mmu.h b/src/bsp/lk/arch/arm/include/arch/arm/mmu.h
new file mode 100644
index 0000000..1f94462
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/arm/mmu.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_ARM_MMU_H
+#define __ARCH_ARM_MMU_H
+
+#define MB                (1024U*1024U)
+#define SECTION_SIZE      MB
+#define SUPERSECTION_SIZE (16 * MB)
+
+#if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7)
+
+#define MMU_MEMORY_L1_DESCRIPTOR_INVALID                 (0x0 << 0)
+#define MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE              (0x1 << 0)
+#define MMU_MEMORY_L1_DESCRIPTOR_SECTION                 (0x2 << 0)
+#define MMU_MEMORY_L1_DESCRIPTOR_SUPERSECTION            ((0x2 << 0) | (0x1 << 18))
+#define MMU_MEMORY_L1_DESCRIPTOR_MASK                    (0x3 << 0)
+
+#define MMU_MEMORY_L2_DESCRIPTOR_INVALID                 (0x0 << 0)
+#define MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE              (0x1 << 0)
+#define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE              (0x2 << 0)
+#define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN           (0x3 << 0)
+#define MMU_MEMORY_L2_DESCRIPTOR_MASK                    (0x3 << 0)
+
+/* C, B and TEX[2:0] encodings without TEX remap (for first level descriptors) */
+/* TEX      |    CB    */
+#define MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED              ((0x0 << 12) | (0x0 << 2))
+#define MMU_MEMORY_L1_TYPE_DEVICE_SHARED                 ((0x0 << 12) | (0x1 << 2))
+#define MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED             ((0x2 << 12) | (0x0 << 2))
+#define MMU_MEMORY_L1_TYPE_NORMAL                        ((0x1 << 12) | (0x0 << 2))
+#define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_THROUGH          ((0x0 << 12) | (0x2 << 2))
+#define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 12) | (0x3 << 2))
+#define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE    ((0x1 << 12) | (0x3 << 2))
+#define MMU_MEMORY_L1_TYPE_MASK                          ((0x7 << 12) | (0x3 << 2))
+
+#define MMU_MEMORY_L1_TYPE_INNER_WRITE_BACK_ALLOCATE     ((0x4 << 12) | (0x1 << 2))
+
+/* C, B and TEX[2:0] encodings without TEX remap (for second level descriptors) */
+/* TEX     |    CB    */
+#define MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED              ((0x0 << 6) | (0x0 << 2))
+#define MMU_MEMORY_L2_TYPE_DEVICE_SHARED                 ((0x0 << 6) | (0x1 << 2))
+#define MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED             ((0x2 << 6) | (0x0 << 2))
+#define MMU_MEMORY_L2_TYPE_NORMAL                        ((0x1 << 6) | (0x0 << 2))
+#define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_THROUGH          ((0x0 << 6) | (0x2 << 2))
+#define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 6) | (0x3 << 2))
+#define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE    ((0x1 << 6) | (0x3 << 2))
+#define MMU_MEMORY_L2_TYPE_MASK                          ((0x7 << 6) | (0x3 << 2))
+
+#define MMU_MEMORY_DOMAIN_MEM                            (0)
+
+/*
+ * AP (Access Permissions)
+ * +-------------------------+
+ * | AP        P         U   |
+ * +-------------------------+
+ * |                         |
+ * | 000      NA        NA   |
+ * |                         |
+ * | 001      RW        NA   |
+ * |                         |
+ * | 010      RW        R    |
+ * |                         |
+ * | 011      RW        RW   |
+ * |                         |
+ * | 101      R         NA   |
+ * |                         |
+ * | 111      R         R    |
+ * |                         |
+ * +-------------------------+
+ *
+ * NA = No Access
+ * RW = Read/Write
+ * R  = Read only
+ *
+ * P = Privileged modes
+ * U = ~P
+ *
+ */
+#define MMU_MEMORY_L1_AP_P_NA_U_NA          ((0x0 << 15) | (0x0 << 10))
+#define MMU_MEMORY_L1_AP_P_RW_U_RO          ((0x0 << 15) | (0x2 << 10)) /* Obsolete */
+#define MMU_MEMORY_L1_AP_P_RW_U_RW          ((0x0 << 15) | (0x3 << 10))
+#define MMU_MEMORY_L1_AP_P_RW_U_NA          ((0x0 << 15) | (0x1 << 10))
+#define MMU_MEMORY_L1_AP_P_RO_U_RO          ((0x1 << 15) | (0x3 << 10))
+#define MMU_MEMORY_L1_AP_P_RO_U_NA          ((0x1 << 15) | (0x1 << 10))
+#define MMU_MEMORY_L1_AP_MASK               ((0x1 << 15) | (0x3 << 10))
+
+#define MMU_MEMORY_L2_AP_P_NA_U_NA          ((0x0 << 9) | (0x0 << 4))
+#define MMU_MEMORY_L2_AP_P_RW_U_RO          ((0x0 << 9) | (0x2 << 4)) /* Obsolete */
+#define MMU_MEMORY_L2_AP_P_RW_U_RW          ((0x0 << 9) | (0x3 << 4))
+#define MMU_MEMORY_L2_AP_P_RW_U_NA          ((0x0 << 9) | (0x1 << 4))
+#define MMU_MEMORY_L2_AP_P_RO_U_RO          ((0x1 << 9) | (0x3 << 4))
+#define MMU_MEMORY_L2_AP_P_RO_U_NA          ((0x1 << 9) | (0x1 << 4))
+#define MMU_MEMORY_L2_AP_MASK               ((0x1 << 9) | (0x3 << 4))
+
+#define MMU_MEMORY_L1_PAGETABLE_NON_SECURE  (1 << 3)
+
+#define MMU_MEMORY_L1_SECTION_NON_SECURE    (1 << 19)
+#define MMU_MEMORY_L1_SECTION_SHAREABLE     (1 << 16)
+#define MMU_MEMORY_L1_SECTION_NON_GLOBAL    (1 << 17)
+#define MMU_MEMORY_L1_SECTION_XN            (1 << 4)
+
+#define MMU_MEMORY_L1_CB_SHIFT              2
+#define MMU_MEMORY_L1_TEX_SHIFT            12
+
+#define MMU_MEMORY_SET_L1_INNER(val)        (((val) & 0x3) << MMU_MEMORY_L1_CB_SHIFT)
+#define MMU_MEMORY_SET_L1_OUTER(val)        (((val) & 0x3) << MMU_MEMORY_L1_TEX_SHIFT)
+#define MMU_MEMORY_SET_L1_CACHEABLE_MEM     (0x4 << MMU_MEMORY_L1_TEX_SHIFT)
+
+#define MMU_MEMORY_L2_SHAREABLE             (1 << 10)
+#define MMU_MEMORY_L2_NON_GLOBAL            (1 << 11)
+
+#define MMU_MEMORY_L2_CB_SHIFT              2
+#define MMU_MEMORY_L2_TEX_SHIFT             6
+
+#define MMU_MEMORY_NON_CACHEABLE            0
+#define MMU_MEMORY_WRITE_BACK_ALLOCATE      1
+#define MMU_MEMORY_WRITE_THROUGH_NO_ALLOCATE 2
+#define MMU_MEMORY_WRITE_BACK_NO_ALLOCATE   3
+
+#define MMU_MEMORY_SET_L2_INNER(val)        (((val) & 0x3) << MMU_MEMORY_L2_CB_SHIFT)
+#define MMU_MEMORY_SET_L2_OUTER(val)        (((val) & 0x3) << MMU_MEMORY_L2_TEX_SHIFT)
+#define MMU_MEMORY_SET_L2_CACHEABLE_MEM     (0x4 << MMU_MEMORY_L2_TEX_SHIFT)
+
+#define MMU_MEMORY_L1_SECTION_ADDR(x)       ((x) & ~((1<<20)-1))
+#define MMU_MEMORY_L1_PAGE_TABLE_ADDR(x)    ((x) & ~((1<<10)-1))
+
+#define MMU_MEMORY_L2_SMALL_PAGE_ADDR(x)    ((x) & ~((1<<12)-1))
+#define MMU_MEMORY_L2_LARGE_PAGE_ADDR(x)    ((x) & ~((1<<16)-1))
+
+#define MMU_MEMORY_TTBR_RGN(x)              (((x) & 0x3) << 3)
+/* IRGN[1:0] is encoded as: IRGN[0] in TTBRx[6], and IRGN[1] in TTBRx[0] */
+#define MMU_MEMORY_TTBR_IRGN(x)             ((((x) & 0x1) << 6) | \
+                                            ((((x) >> 1) & 0x1) << 0))
+#define MMU_MEMORY_TTBR_S                   (1 << 1)
+#define MMU_MEMORY_TTBR_NOS                 (1 << 5)
+
+/* Default configuration for main kernel page table:
+ *    - section mappings for memory
+ *    - do cached translation walks
+ */
+
+/* Enable cached page table walks:
+ * inner/outer (IRGN/RGN): write-back + write-allocate
+ * (select inner sharable on smp)
+ */
+#if WITH_SMP
+#define MMU_TTBRx_SHARABLE_FLAGS (MMU_MEMORY_TTBR_S | MMU_MEMORY_TTBR_NOS)
+#else
+#define MMU_TTBRx_SHARABLE_FLAGS (0)
+#endif
+#define MMU_TTBRx_FLAGS \
+    (MMU_MEMORY_TTBR_RGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) |\
+     MMU_MEMORY_TTBR_IRGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) | \
+     MMU_TTBRx_SHARABLE_FLAGS)
+
+/* Section mapping, TEX[2:0]=001, CB=11, S=1, AP[2:0]=001 */
+#if WITH_SMP
+#define MMU_KERNEL_L1_PTE_FLAGS \
+    (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
+     MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \
+     MMU_MEMORY_L1_AP_P_RW_U_NA | \
+     MMU_MEMORY_L1_SECTION_SHAREABLE)
+#else
+#define MMU_KERNEL_L1_PTE_FLAGS \
+    (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
+     MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \
+     MMU_MEMORY_L1_AP_P_RW_U_NA)
+#endif
+
+#define MMU_INITIAL_MAP_STRONGLY_ORDERED \
+    (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
+    MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED | \
+    MMU_MEMORY_L1_AP_P_RW_U_NA)
+
+#define MMU_INITIAL_MAP_DEVICE \
+    (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
+    MMU_MEMORY_L1_TYPE_DEVICE_SHARED | \
+    MMU_MEMORY_L1_AP_P_RW_U_NA)
+
+#endif // armv6 | armv7
+
+#ifndef ASSEMBLY
+
+#include <sys/types.h>
+#include <assert.h>
+#include <compiler.h>
+#include <arch/arm.h>
+
+__BEGIN_CDECLS
+
+void arm_mmu_early_init(void);
+void arm_mmu_init(void);
+status_t arm_vtop(addr_t va, addr_t *pa);
+
+/* tlb routines */
+
+static inline void arm_after_invalidate_tlb_barrier(void)
+{
+#if WITH_SMP
+    arm_write_bpiallis(0);
+#else
+    arm_write_bpiall(0);
+#endif
+    DSB;
+    ISB;
+}
+
+static inline void arm_invalidate_tlb_global_no_barrier(void)
+{
+#if WITH_SMP
+    arm_write_tlbiallis(0);
+#else
+    arm_write_tlbiall(0);
+#endif
+}
+
+static inline void arm_invalidate_tlb_global(void)
+{
+    DSB;
+    arm_invalidate_tlb_global_no_barrier();
+    arm_after_invalidate_tlb_barrier();
+}
+
+static inline void arm_invalidate_tlb_mva_no_barrier(vaddr_t va)
+{
+#if WITH_SMP
+    arm_write_tlbimvaais(va & 0xfffff000);
+#else
+    arm_write_tlbimvaa(va & 0xfffff000);
+#endif
+}
+
+static inline void arm_invalidate_tlb_mva(vaddr_t va)
+{
+    DSB;
+    arm_invalidate_tlb_mva_no_barrier(va);
+    arm_after_invalidate_tlb_barrier();
+}
+
+
+static inline void arm_invalidate_tlb_asid_no_barrier(uint8_t asid)
+{
+#if WITH_SMP
+    arm_write_tlbiasidis(asid);
+#else
+    arm_write_tlbiasid(asid);
+#endif
+}
+
+static inline void arm_invalidate_tlb_asid(uint8_t asid)
+{
+    DSB;
+    arm_invalidate_tlb_asid_no_barrier(asid);
+    arm_after_invalidate_tlb_barrier();
+}
+
+static inline void arm_invalidate_tlb_mva_asid_no_barrier(vaddr_t va, uint8_t asid)
+{
+#if WITH_SMP
+    arm_write_tlbimvais((va & 0xfffff000) | asid);
+#else
+    arm_write_tlbimva((va & 0xfffff000) | asid);
+#endif
+}
+
+static inline void arm_invalidate_tlb_mva_asid(vaddr_t va, uint8_t asid)
+{
+    DSB;
+    arm_invalidate_tlb_mva_asid_no_barrier(va, asid);
+    arm_after_invalidate_tlb_barrier();
+}
+
+__END_CDECLS
+
+#endif /* ASSEMBLY */
+
+#endif
diff --git a/src/bsp/lk/arch/arm/include/arch/asm.h b/src/bsp/lk/arch/arm/include/arch/asm.h
new file mode 100644
index 0000000..7c8d00e
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/asm.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/arm/cores.h>
+
+#if ARM_ARCH_LEVEL >= 7
+#define LOADCONST(reg, c) \
+    movw reg, #:lower16: c; \
+    movt reg, #:upper16: c
+#else
+#define LOADCONST(reg, c) ldr   reg, =##c
+#endif
+
diff --git a/src/bsp/lk/arch/arm/include/arch/asm_macros.h b/src/bsp/lk/arch/arm/include/arch/asm_macros.h
new file mode 100644
index 0000000..7dd7bf7
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/asm_macros.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015, Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+/* Set fault handler for next instruction */
+.macro set_fault_handler, handler
+.Lfault_location\@:
+.pushsection .rodata.fault_handler_table
+.long    .Lfault_location\@
+.long    \handler
+.popsection
+.endm
diff --git a/src/bsp/lk/arch/arm/include/arch/defines.h b/src/bsp/lk/arch/arm/include/arch/defines.h
new file mode 100644
index 0000000..c756986
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/defines.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_CPU_H
+#define __ARCH_CPU_H
+
+/* arm specific stuff */
+#define PAGE_SIZE 4096
+#define PAGE_SIZE_SHIFT 12
+
+#if ARM_CPU_ARM7
+/* irrelevant, no consistent cache */
+#define CACHE_LINE 32
+#elif ARM_CPU_ARM926
+#define CACHE_LINE 32
+#elif ARM_CPU_ARM1136
+#define CACHE_LINE 32
+#elif ARM_CPU_ARMEMU
+#define CACHE_LINE 32
+#elif ARM_CPU_CORTEX_A7
+#define CACHE_LINE 64 /* XXX L1 icache is 32 bytes */
+#elif ARM_CPU_CORTEX_A8
+#define CACHE_LINE 64
+#elif ARM_CPU_CORTEX_A9
+#define CACHE_LINE 32
+#elif ARM_CPU_CORTEX_M0 || ARM_CPU_CORTEX_M0_PLUS || ARM_CPU_CORTEX_M3 || ARM_CPU_CORTEX_M4
+#define CACHE_LINE 32 /* doesn't actually matter */
+#elif ARM_CPU_CORTEX_M7
+#define CACHE_LINE 32
+#elif ARM_CPU_CORTEX_A15
+#define CACHE_LINE 64
+#else
+#error unknown cpu
+#endif
+
+#endif
+
diff --git a/src/bsp/lk/arch/arm/include/arch/spinlock.h b/src/bsp/lk/arch/arm/include/arch/spinlock.h
new file mode 100644
index 0000000..d2fb527
--- /dev/null
+++ b/src/bsp/lk/arch/arm/include/arch/spinlock.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <arch/ops.h>
+#include <stdbool.h>
+
+__BEGIN_CDECLS;
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned long spin_lock_t;
+
+typedef unsigned long spin_lock_saved_state_t;
+typedef unsigned long spin_lock_save_flags_t;
+
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+#if WITH_SMP
+
+void arch_spin_lock(spin_lock_t *lock);
+int arch_spin_trylock(spin_lock_t *lock);
+void arch_spin_unlock(spin_lock_t *lock);
+
+#else
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+#endif
+
+/* ARM specific flags */
+#define SPIN_LOCK_FLAG_IRQ                      0x40000000
+#define SPIN_LOCK_FLAG_FIQ                      0x80000000 /* Do not use unless IRQs are already disabled */
+#define SPIN_LOCK_FLAG_IRQ_FIQ                  (SPIN_LOCK_FLAG_IRQ | SPIN_LOCK_FLAG_FIQ)
+
+/* default arm flag is to just disable plain irqs */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  SPIN_LOCK_FLAG_IRQ
+
+enum {
+    /* private */
+    SPIN_LOCK_STATE_RESTORE_IRQ = 1,
+    SPIN_LOCK_STATE_RESTORE_FIQ = 2,
+};
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    spin_lock_saved_state_t state = 0;
+    if ((flags & SPIN_LOCK_FLAG_IRQ) && !arch_ints_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_IRQ;
+        arch_disable_ints();
+    }
+    if ((flags & SPIN_LOCK_FLAG_FIQ) && !arch_fiqs_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_FIQ;
+        arch_disable_fiqs();
+    }
+    *statep = state;
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    if ((flags & SPIN_LOCK_FLAG_FIQ) && (old_state & SPIN_LOCK_STATE_RESTORE_FIQ))
+        arch_enable_fiqs();
+    if ((flags & SPIN_LOCK_FLAG_IRQ) && (old_state & SPIN_LOCK_STATE_RESTORE_IRQ))
+        arch_enable_ints();
+}
+
+__END_CDECLS;
diff --git a/src/bsp/lk/arch/arm/rules.mk b/src/bsp/lk/arch/arm/rules.mk
new file mode 100644
index 0000000..716e383
--- /dev/null
+++ b/src/bsp/lk/arch/arm/rules.mk
@@ -0,0 +1,343 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+# can override this in local.mk
+ENABLE_THUMB?=true
+
+# default to the regular arm subarch
+SUBARCH := arm
+
+GLOBAL_DEFINES += \
+	ARM_CPU_$(ARM_CPU)=1
+
+# do set some options based on the cpu core
+HANDLED_CORE := false
+ifeq ($(ARM_CPU),cortex-m0)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M0=1 \
+	ARM_ISA_ARMV6M=1 \
+	ARM_WITH_THUMB=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-m0plus)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M0_PLUS=1 \
+	ARM_ISA_ARMV6M=1 \
+	ARM_WITH_THUMB=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-m3)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M3=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7M=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-m4)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M4=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7M=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-m4f)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M4=1 \
+	ARM_CPU_CORTEX_M4F=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7M=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_VFP=1 \
+	__FPU_PRESENT=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-m7)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_M7=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7M=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1
+HANDLED_CORE := true
+ENABLE_THUMB := true
+SUBARCH := arm-m
+endif
+ifeq ($(ARM_CPU),cortex-a7)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_VFP=1 \
+	ARM_WITH_NEON=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),cortex-a15)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1 \
+	ARM_WITH_L2=1
+ifneq ($(ARM_WITHOUT_VFP_NEON),true)
+GLOBAL_DEFINES += \
+	ARM_WITH_VFP=1 \
+	ARM_WITH_NEON=1
+endif
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),cortex-a8)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_VFP=1 \
+	ARM_WITH_NEON=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1 \
+	ARM_WITH_L2=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),cortex-a9)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),cortex-a9-neon)
+GLOBAL_DEFINES += \
+	ARM_CPU_CORTEX_A9=1 \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_VFP=1 \
+	ARM_WITH_NEON=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_THUMB2=1 \
+	ARM_WITH_CACHE=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),arm1136j-s)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv6=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_CACHE=1 \
+	ARM_CPU_ARM1136=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),arm1176jzf-s)
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_WITH_MMU=1 \
+	ARM_ISA_ARMv6=1 \
+	ARM_WITH_VFP=1 \
+	ARM_WITH_THUMB=1 \
+	ARM_WITH_CACHE=1 \
+	ARM_CPU_ARM1136=1
+HANDLED_CORE := true
+endif
+ifeq ($(ARM_CPU),armemu)
+# flavor of emulated cpu by the armemu project
+GLOBAL_DEFINES += \
+	ARM_WITH_CP15=1 \
+	ARM_ISA_ARMv7=1 \
+	ARM_ISA_ARMv7A=1 \
+	ARM_WITH_CACHE=1
+HANDLED_CORE := true
+ENABLE_THUMB := false # armemu doesn't currently support thumb properly
+endif
+
+ifneq ($(HANDLED_CORE),true)
+$(error $(LOCAL_DIR)/rules.mk doesnt have logic for arm core $(ARM_CPU))
+endif
+
+THUMBCFLAGS :=
+THUMBINTERWORK :=
+ifeq ($(ENABLE_THUMB),true)
+THUMBCFLAGS := -mthumb -D__thumb__
+endif
+
+GLOBAL_INCLUDES += \
+	$(LOCAL_DIR)/$(SUBARCH)/include
+
+ifeq ($(SUBARCH),arm)
+MODULE_SRCS += \
+	$(LOCAL_DIR)/arm/start.S \
+	$(LOCAL_DIR)/arm/asm.S \
+	$(LOCAL_DIR)/arm/cache-ops.S \
+	$(LOCAL_DIR)/arm/cache.c \
+	$(LOCAL_DIR)/arm/debug.c \
+	$(LOCAL_DIR)/arm/ops.S \
+	$(LOCAL_DIR)/arm/exceptions.S \
+	$(LOCAL_DIR)/arm/faults.c \
+	$(LOCAL_DIR)/arm/fpu.c \
+	$(LOCAL_DIR)/arm/mmu.c \
+	$(LOCAL_DIR)/arm/thread.c
+
+MODULE_ARM_OVERRIDE_SRCS := \
+	$(LOCAL_DIR)/arm/arch.c
+
+GLOBAL_DEFINES += \
+	ARCH_DEFAULT_STACK_SIZE=4096
+
+ARCH_OPTFLAGS := -O2
+WITH_LINKER_GC ?= 1
+
+# we have a mmu and want the vmm/pmm
+WITH_KERNEL_VM ?= 1
+
+# for arm, have the kernel occupy the entire top 3GB of virtual space,
+# but put the kernel itself at 0x80000000.
+# this leaves 0x40000000 - 0x80000000 open for kernel space to use.
+GLOBAL_DEFINES += \
+    KERNEL_ASPACE_BASE=0x40000000 \
+    KERNEL_ASPACE_SIZE=0xc0000000
+
+KERNEL_BASE ?= 0x80000000
+KERNEL_LOAD_OFFSET ?= 0
+
+GLOBAL_DEFINES += \
+    KERNEL_BASE=$(KERNEL_BASE) \
+    KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
+
+# if its requested we build with SMP, arm generically supports 4 cpus
+ifeq ($(WITH_SMP),1)
+SMP_MAX_CPUS ?= 4
+SMP_CPU_CLUSTER_SHIFT ?= 8
+SMP_CPU_ID_BITS ?= 24
+
+GLOBAL_DEFINES += \
+    WITH_SMP=1 \
+    SMP_MAX_CPUS=$(SMP_MAX_CPUS) \
+    SMP_CPU_CLUSTER_SHIFT=$(SMP_CPU_CLUSTER_SHIFT) \
+    SMP_CPU_ID_BITS=$(SMP_CPU_ID_BITS)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/arm/mp.c
+else
+GLOBAL_DEFINES += \
+    SMP_MAX_CPUS=1
+endif
+
+ifeq (true,$(call TOBOOL,$(WITH_NS_MAPPING)))
+GLOBAL_DEFINES += \
+    WITH_ARCH_MMU_PICK_SPOT=1
+endif
+
+endif
+ifeq ($(SUBARCH),arm-m)
+MODULE_SRCS += \
+	$(LOCAL_DIR)/arm-m/arch.c \
+	$(LOCAL_DIR)/arm-m/cache.c \
+	$(LOCAL_DIR)/arm-m/exceptions.c \
+	$(LOCAL_DIR)/arm-m/start.c \
+	$(LOCAL_DIR)/arm-m/spin_cycles.c \
+	$(LOCAL_DIR)/arm-m/thread.c \
+	$(LOCAL_DIR)/arm-m/vectab.c
+
+GLOBAL_INCLUDES += \
+	$(LOCAL_DIR)/arm-m/CMSIS/Include
+
+# we're building for small binaries
+GLOBAL_DEFINES += \
+	ARM_ONLY_THUMB=1 \
+	ARCH_DEFAULT_STACK_SIZE=1024 \
+	SMP_MAX_CPUS=1
+
+ARCH_OPTFLAGS := -Os
+WITH_LINKER_GC ?= 1
+endif
+
+# try to find toolchain
+include $(LOCAL_DIR)/toolchain.mk
+TOOLCHAIN_PREFIX := $(ARCH_$(ARCH)_TOOLCHAIN_PREFIX)
+$(info TOOLCHAIN_PREFIX = $(TOOLCHAIN_PREFIX))
+
+ARCH_COMPILEFLAGS += $(ARCH_$(ARCH)_COMPILEFLAGS)
+
+
+# set the max page size to something more reasonables (defaults to 64K or above)
+GLOBAL_LDFLAGS += -z max-page-size=4096
+
+$(info GLOBAL_COMPILEFLAGS = $(GLOBAL_COMPILEFLAGS) $(ARCH_COMPILEFLAGS) $(THUMBCFLAGS))
+
+# make sure some bits were set up
+MEMVARS_SET := 0
+ifneq ($(MEMBASE),)
+MEMVARS_SET := 1
+endif
+ifneq ($(MEMSIZE),)
+MEMVARS_SET := 1
+endif
+ifeq ($(MEMVARS_SET),0)
+$(error missing MEMBASE or MEMSIZE variable, please set in target rules.mk)
+endif
+
+GLOBAL_DEFINES += \
+	MEMBASE=$(MEMBASE) \
+	MEMSIZE=$(MEMSIZE)
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/system-onesegment.ld \
+	$(BUILDDIR)/system-twosegment.ld
+
+# rules for generating the linker scripts
+$(BUILDDIR)/system-onesegment.ld: $(LOCAL_DIR)/system-onesegment.ld $(wildcard arch/*.ld) linkerscript.phony
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/" < $< > $@.tmp
+	@$(call TESTANDREPLACEFILE,$@.tmp,$@)
+
+$(BUILDDIR)/system-twosegment.ld: $(LOCAL_DIR)/system-twosegment.ld $(wildcard arch/*.ld) linkerscript.phony
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%ROMBASE%/$(ROMBASE)/;s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/" < $< > $@.tmp
+	@$(call TESTANDREPLACEFILE,$@.tmp,$@)
+
+linkerscript.phony:
+.PHONY: linkerscript.phony
+
+# arm specific script to try to guess stack usage
+$(OUTELF).stack: LOCAL_DIR:=$(LOCAL_DIR)
+$(OUTELF).stack: $(OUTELF)
+	$(NOECHO)echo generating stack usage $@
+	$(NOECHO)$(OBJDUMP) -Mreg-names-raw -d $< | $(LOCAL_DIR)/stackusage | $(CPPFILT) | sort -n -k 1 -r > $@
+
+EXTRA_BUILDDEPS += $(OUTELF).stack
+GENERATED += $(OUTELF).stack
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/arm/stackusage b/src/bsp/lk/arch/arm/stackusage
new file mode 100755
index 0000000..51afdf3
--- /dev/null
+++ b/src/bsp/lk/arch/arm/stackusage
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+
+import sys
+import re
+
+hexrule = re.compile("([0-9a-fA-F]+)")
+hex2byterule = re.compile("([0-9a-fA-F]{4})")
+hexcolonrule = re.compile("([0-9a-fA-F]+)\:")
+symbolrule = re.compile("<([\._a-zA-Z]+[\._0-9a-zA-Z]*)>:")
+insrule = re.compile("([a-zA-Z][\.a-zA-Z]*)")
+
+currsymbol = ""
+curraddress = 0
+count = 0
+
+for line in sys.stdin:
+    t = line.split()
+
+    if len(t) == 0:
+        continue
+
+    try:
+
+        # match the address
+        match = hexcolonrule.match(t[0])
+        if match:
+            #print "%s %s" % (match, match.group(1))
+            curraddress = int(match.group(1), 16)
+            #print "curraddress 0x%x" % curraddress
+
+        # see if this is a symbol declaration
+        match = symbolrule.match(t[1])
+        if match:
+            # print the previous count
+            if count > 0:
+                print "%d %s" % (count, currsymbol)
+            count = 0
+
+            #print "%s %s" % (match, match.group(1))
+            currsymbol = str(match.group(1))
+            #print "current symbol is now '%s'" % currsymbol
+            continue
+
+        # see if it's a one or two byte opcode
+        iindex = 2
+        match = hex2byterule.match(t[1])
+        if not match:
+            continue
+        match = hex2byterule.match(t[2])
+        if match:
+            #print "match %s, %s" % (match, match.group(0))
+            iindex = 3
+
+        #print "instruction starts at index %d: '%s'" % (iindex, t[iindex])
+
+        # match the instruction string
+        insmatch = insrule.match(t[iindex])
+        if not insmatch:
+            continue
+        ins = insmatch.group(1)
+        #print "instruction '%s'" % ins
+
+        # look for a few special instructions
+        if ins == "push":
+            c = (len(t) - 1 - iindex) * 4
+            #print "%d bytes pushed" % c
+            count += c
+
+        # look for a few special instructions
+        if ins == "stmdb":
+            c = (len(t) - 2 - iindex) * 4
+            #print "%d bytes stmed" % c
+            count += c
+
+        if ins == "sub":
+            reg = t[iindex+1]
+            if reg == "sp,":
+                conststr = t[iindex+2]
+                c = int(conststr[1:])
+                #print "subtracting from sp, val %d" % c
+                count += c
+
+    except IndexError:
+        continue
+    except Exception as e:
+        print "Exception %s" % e
+        continue
+
+# print the last count
+if count > 0:
+    print "%d %s" % (count, currsymbol)
+
diff --git a/src/bsp/lk/arch/arm/system-onesegment.ld b/src/bsp/lk/arch/arm/system-onesegment.ld
new file mode 100644
index 0000000..a3ba8b9
--- /dev/null
+++ b/src/bsp/lk/arch/arm/system-onesegment.ld
@@ -0,0 +1,122 @@
+OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
+
+    _start = .;
+
+    /* text/read-only data */
+    /* set the load address to physical MEMBASE */
+    .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET%) {
+        KEEP(*(.text.boot.vectab1))
+        KEEP(*(.text.boot.vectab2))
+        KEEP(*(.text.boot))
+        *(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) } =0x9090
+    .plt : { *(.plt) }
+
+    /* .ARM.exidx is sorted, so has to go in its own output section.  */
+    __exidx_start = .;
+    .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+    __exidx_end = .;
+
+    .rodata : ALIGN(4) {
+        __rodata_start = .;
+        __fault_handler_table_start = .;
+        KEEP(*(.rodata.fault_handler_table))
+        __fault_handler_table_end = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+    .data : ALIGN(4) {
+        /* writable data  */
+        __data_start_rom = .;
+        /* in one segment binaries, the rom data address is on top of the ram data address */
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+    }
+
+    .ctors : ALIGN(4) {
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+    }
+    .dtors : ALIGN(4) {
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+    }
+    .got : { *(.got.plt) *(.got) }
+    .dynamic : { *(.dynamic) }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : ALIGN(4) {
+        KEEP(*(.bss.prebss.*))
+        . = ALIGN(4);
+        __bss_start = .;
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+        . = ALIGN(4);
+        __bss_end = .;
+    }
+
+    .translation_table (NOLOAD) : {
+        KEEP(*(.translation_table))
+    }
+
+    _end = .;
+
+    . = %KERNEL_BASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/arm/system-twosegment.ld b/src/bsp/lk/arch/arm/system-twosegment.ld
new file mode 100644
index 0000000..3667f41
--- /dev/null
+++ b/src/bsp/lk/arch/arm/system-twosegment.ld
@@ -0,0 +1,123 @@
+OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm")
+OUTPUT_ARCH(arm)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %ROMBASE%;
+
+    /* text/read-only data */
+    .text : {
+        KEEP(*(.text.boot.vectab1))
+        KEEP(*(.text.boot.vectab2))
+        KEEP(*(.text.boot))
+        *(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) } =0x9090
+    .plt : { *(.plt) }
+
+    /* .ARM.exidx is sorted, so has to go in its own output section.  */
+    __exidx_start = .;
+    .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+    __exidx_end = .;
+
+    .rodata : ALIGN(4) {
+        __rodata_start = .;
+        __fault_handler_table_start = .;
+        KEEP(*(.rodata.fault_handler_table))
+        __fault_handler_table_end = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+    }
+
+    /* fake section for .data to anchor off of
+     * needed because extra linker scripts tend to insert sections
+     * just after .rodata
+     */
+    .dummy_post_rodata : {
+        /* end of rodata, start of data area */
+        __rodata_end = . ;
+        __data_start_rom = .;
+    }
+
+    /* in two segment binaries, the data starts at the bottom of ram (MEMBASE)
+     * bump us forward to the start of ram
+     */
+    . = %MEMBASE%;
+
+    /* start .data segment, force the physical address to be AT() __data_start_rom */
+    .data : AT ( ADDR (.dummy_post_rodata) + SIZEOF (.dummy_post_rodata) ) ALIGN(4) {
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+    }
+
+    /* code that is located in ram */
+    .sram.text : ALIGN(4) {
+        KEEP (*(.sram.text*))
+    }
+    .ctors : ALIGN(4) {
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+    }
+    .dtors : ALIGN(4) {
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+    }
+    .got : { *(.got.plt) *(.got) }
+    .dynamic : { *(.dynamic) }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : ALIGN(4) {
+        KEEP(*(.bss.prebss.*))
+        . = ALIGN(4);
+        __bss_start = .;
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+        . = ALIGN(4);
+        __bss_end = .;
+    }
+
+    _end = .;
+
+    . = %MEMBASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/arm/toolchain.mk b/src/bsp/lk/arch/arm/toolchain.mk
new file mode 100644
index 0000000..7cd56b2
--- /dev/null
+++ b/src/bsp/lk/arch/arm/toolchain.mk
@@ -0,0 +1,123 @@
+ifndef ARCH_arm_TOOLCHAIN_INCLUDED
+ARCH_arm_TOOLCHAIN_INCLUDED := 1
+
+# try to find the toolchain
+ifndef ARCH_arm_TOOLCHAIN_PREFIX
+
+# if TOOLCHAIN_PREFIX is not empty, try to use it first
+ifneq ($(TOOLCHAIN_PREFIX),)
+ARCH_arm_TOOLCHAIN_PREFIX := $(TOOLCHAIN_PREFIX)
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+endif
+
+# try a series of common arm toolchain prefixes in the path
+ifeq ($(FOUNDTOOL),)
+ARCH_arm_TOOLCHAIN_PREFIX := arm-eabi-
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+endif
+ifeq ($(FOUNDTOOL),)
+ARCH_arm_TOOLCHAIN_PREFIX := arm-elf-
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+endif
+ifeq ($(FOUNDTOOL),)
+ARCH_arm_TOOLCHAIN_PREFIX := arm-none-eabi-
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+endif
+ifeq ($(FOUNDTOOL),)
+ARCH_arm_TOOLCHAIN_PREFIX := arm-linux-gnueabi-
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+
+# Set no stack protection if we found our gnueabi toolchain. We don't
+# need it.
+#
+# Stack protection is default in this toolchain and we get such errors
+# final linking stage:
+#
+# undefined reference to `__stack_chk_guard'
+# undefined reference to `__stack_chk_fail'
+# undefined reference to `__stack_chk_guard'
+#
+ifneq (,$(findstring arm-linux-gnueabi-,$(FOUNDTOOL)))
+        ARCH_arm_COMPILEFLAGS += -fno-stack-protector
+endif
+endif # arm-linux-gnueabi-
+
+else
+FOUNDTOOL=$(shell which $(ARCH_arm_TOOLCHAIN_PREFIX)gcc)
+endif # ARCH_arm_TOOLCHAIN_PREFIX
+
+ifeq ($(FOUNDTOOL),)
+$(error cannot find toolchain, please set ARCH_arm_TOOLCHAIN_PREFIX or add it to your path)
+endif
+
+ifeq ($(ARM_CPU),cortex-m0)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+ARCH_arm_COMPILEFLAGS += -mthumb -mfloat-abi=soft
+endif
+ifeq ($(ARM_CPU),cortex-m0plus)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+ARCH_arm_COMPILEFLAGS += -mthumb -mfloat-abi=soft
+endif
+ifeq ($(ARM_CPU),cortex-m3)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+endif
+ifeq ($(ARM_CPU),cortex-m4)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+endif
+ifeq ($(ARM_CPU),cortex-m7)
+ARCH_arm_COMPILEFLAGS += -mcpu=cortex-m4
+endif
+ifeq ($(ARM_CPU),cortex-m4f)
+ARCH_arm_COMPILEFLAGS += -mcpu=cortex-m4 -mfloat-abi=softfp
+endif
+ifeq ($(ARM_CPU),cortex-a7)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+ARCH_arm_COMPILEFLAGS += -mfpu=neon -mfloat-abi=hard
+endif
+ifeq ($(ARM_CPU),cortex-a8)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+ARCH_arm_COMPILEFLAGS += -mfpu=neon -mfloat-abi=softfp
+endif
+ifeq ($(ARM_CPU),cortex-a9)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+endif
+ifeq ($(ARM_CPU),cortex-a9-neon)
+ARCH_arm_COMPILEFLAGS += -mcpu=cortex-a9
+# XXX cannot enable neon right now because compiler generates
+# neon code for 64bit integer ops
+ARCH_arm_COMPILEFLAGS += -mfpu=vfpv3 -mfloat-abi=softfp
+endif
+ifeq ($(ARM_CPU),cortex-a15)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+ifneq ($(ARM_WITHOUT_VFP_NEON),true)
+ARCH_arm_COMPILEFLAGS += -mfpu=vfpv3 -mfloat-abi=softfp
+endif
+endif
+ifeq ($(ARM_CPU),arm1136j-s)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+endif
+ifeq ($(ARM_CPU),arm1176jzf-s)
+ARCH_arm_COMPILEFLAGS += -mcpu=$(ARM_CPU)
+endif
+ifeq ($(ARM_CPU),armemu)
+ARCH_arm_COMPILEFLAGS += -march=armv7-a
+endif
+
+ifeq ($(call TOBOOL,$(CLANGBUILD)),true)
+
+CLANG_ARM_TARGET_SYS ?= linux
+CLANG_ARM_TARGET_ABI ?= gnu
+
+CLANG_ARM_AS_DIR := $(shell dirname $(shell dirname $(ARCH_arm_TOOLCHAIN_PREFIX)))
+
+AS_PATH := $(wildcard $(CLANG_ARM_AS_DIR)/*/bin/as)
+ifeq ($(AS_PATH),)
+$(error Could not find $(CLANG_ARM_AS_DIR)/*/bin/as, did the directory structure change?)
+endif
+
+ARCH_arm_COMPILEFLAGS += -target arm-$(CLANG_ARM_TARGET_SYS)-$(CLANG_ARM_TARGET_ABI) \
+			   --gcc-toolchain=$(CLANG_ARM_AS_DIR)/
+
+endif
+
+endif
diff --git a/src/bsp/lk/arch/arm64/arch.c b/src/bsp/lk/arch/arm64/arch.c
new file mode 100644
index 0000000..c21c4ea
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/arch.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <compiler.h>
+#include <debug.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/arm64.h>
+#include <arch/arm64/mmu.h>
+#include <arch/mp.h>
+#include <kernel/thread.h>
+#if WITH_KERNEL_VM
+#include <kernel/vm.h>
+#endif
+#include <lk/init.h>
+#include <lk/main.h>
+#include <platform.h>
+#include <target.h>
+#include <trace.h>
+
+#define LOCAL_TRACE 0
+
+#if WITH_SMP
+/* smp boot lock */
+static spin_lock_t arm_boot_cpu_lock = 1;
+static volatile int secondaries_to_init = 0;
+__WEAK const uint8_t *linear_cpuid_map = NULL;
+#endif
+
+static void arm64_cpu_early_init(void)
+{
+    /* set the vector base */
+    ARM64_WRITE_SYSREG(VBAR_EL1, (uint64_t)&arm64_exception_base);
+
+    /* switch to EL1 */
+    unsigned int current_el = ARM64_READ_SYSREG(CURRENTEL) >> 2;
+    if (current_el > 1) {
+        arm64_elX_to_el1();
+    }
+
+    arch_enable_fiqs();
+}
+
+void arch_early_init(void)
+{
+    arm64_cpu_early_init();
+    platform_init_mmu_mappings();
+}
+
+void arch_init(void)
+{
+#if WITH_SMP
+    arch_mp_init_percpu();
+
+    LTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1));
+
+    secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
+
+    lk_init_secondary_cpus(secondaries_to_init);
+
+    LTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
+
+    /* release the secondary cpus */
+    spin_unlock(&arm_boot_cpu_lock);
+
+    /* flush the release of the lock, since the secondary cpus are running without cache on */
+    arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock));
+#endif
+}
+
+void arch_quiesce(void)
+{
+}
+
+void arch_idle(void)
+{
+    __asm__ volatile("wfi");
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    LTRACEF("entry %p, args 0x%lx 0x%lx 0x%lx 0x%lx\n", entry, arg0, arg1, arg2, arg3);
+
+    arch_disable_ints();
+
+    /* give target and platform a chance to put hardware into a suitable
+     * state for chain loading.
+     */
+    target_quiesce();
+    platform_quiesce();
+
+    paddr_t entry_pa;
+    paddr_t loader_pa;
+
+#if WITH_KERNEL_VM
+    entry_pa = kvaddr_to_paddr(entry);
+    if (entry_pa == (paddr_t)NULL) {
+        panic("error translating entry physical address\n");
+    }
+
+    LTRACEF("entry pa 0x%lx\n", entry_pa);
+
+    loader_pa = kvaddr_to_paddr((void *)&arm64_chain_load);
+    if (loader_pa == (paddr_t)NULL) {
+        panic("error translating loader physical address\n");
+    }
+
+    LTRACEF("loader pa 0x%lx\n", loader_pa);
+
+    /* TTBR0_EL1 already contains the physical address mapping */
+    ARM64_WRITE_SYSREG(tcr_el1, (uint64_t)MMU_TCR_FLAGS_IDENT);
+#else
+    entry_pa = (paddr_t)entry;
+    loader_pa = (paddr_t)&arm64_chain_load;
+#endif
+
+    LTRACEF("disabling instruction/data cache\n");
+    arch_disable_cache(UCACHE);
+
+    /* put the booting cpu back into close to a default state */
+    arch_quiesce();
+
+    LTRACEF("branching to physical address of loader\n");
+
+    /* branch to the physical address version of the chain loader routine */
+    void (*loader)(paddr_t entry, ulong, ulong, ulong, ulong) __NO_RETURN = (void *)loader_pa;
+    loader(entry_pa, arg0, arg1, arg2, arg3);
+}
+
+#if WITH_SMP
+void arm64_secondary_entry(ulong asm_cpu_num)
+{
+    uint cpu = arch_curr_cpu_num();
+    if (cpu != asm_cpu_num)
+        return;
+
+    arm64_cpu_early_init();
+
+    spin_lock(&arm_boot_cpu_lock);
+    spin_unlock(&arm_boot_cpu_lock);
+
+    /* run early secondary cpu init routines up to the threading level */
+    lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
+
+    arch_mp_init_percpu();
+
+    LTRACEF("cpu num %d\n", cpu);
+
+    /* we're done, tell the main cpu we're up */
+    atomic_add(&secondaries_to_init, -1);
+    __asm__ volatile("sev");
+
+    lk_secondary_cpu_entry();
+}
+#endif
+
diff --git a/src/bsp/lk/arch/arm64/asm.S b/src/bsp/lk/arch/arm64/asm.S
new file mode 100644
index 0000000..3adc0ee
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/asm.S
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/asm_macros.h>
+
+/* use x9 ~ x15 as scratch registers */
+tmp     .req x9
+
+/* void arm64_context_switch(vaddr_t *old_sp, vaddr_t new_sp); */
+FUNCTION(arm64_context_switch)
+    /* save old frame */
+    push x28, x29
+    push x26, x27
+    push x24, x25
+    push x22, x23
+    push x20, x21
+    push x18, x19
+    str  x30, [sp,#-16]!
+
+    /* save old sp */
+    mov  x15, sp
+    str  x15, [x0]
+
+    /* load new sp */
+    mov  sp, x1
+
+    /* restore new frame */
+    ldr  x30, [sp], #16
+    pop  x18, x19
+    pop  x20, x21
+    pop  x22, x23
+    pop  x24, x25
+    pop  x26, x27
+    pop  x28, x29
+
+    ret
+
+FUNCTION(arm64_chain_load)
+    /* shuffle the args around */
+    mov x5, x0
+    mov x0, x1
+    mov x1, x2
+    mov x2, x3
+    mov x3, x4
+    mov x4, x5
+
+#if WITH_KERNEL_VM
+    /* disable MMU */
+    mrs x5, sctlr_el1
+    bic x5, x5, #0x1
+    msr sctlr_el1, x5
+    isb
+#endif
+
+    tlbi vmalle1
+    br  x4
+
+FUNCTION(arm64_elX_to_el1)
+    mrs tmp, CurrentEL
+
+    cmp tmp, #(0b01 << 2)
+    bne .notEL1
+    /* Already in EL1 */
+    ret
+
+.notEL1:
+    cmp tmp, #(0b10 << 2)
+    beq .inEL2
+
+
+    /* set EL2 to 64bit */
+    mrs tmp, scr_el3
+    orr tmp, tmp, #(1<<10)
+    msr scr_el3, tmp
+
+
+    adr tmp, .Ltarget
+    msr elr_el3, tmp
+
+    mov tmp, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
+    msr spsr_el3, tmp
+    b   .confEL1
+
+.inEL2:
+    adr tmp, .Ltarget
+    msr elr_el2, tmp
+    mov tmp, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
+    msr spsr_el2, tmp
+
+
+
+.confEL1:
+    /* disable EL2 coprocessor traps */
+    mov tmp, #0x33ff
+    msr cptr_el2, tmp
+
+    /* set EL1 to 64bit */
+    mov tmp, #(1<<31)
+    msr hcr_el2, tmp
+
+    /* disable EL1 FPU traps */
+    mov tmp, #(0b11<<20)
+    msr cpacr_el1, tmp
+
+    /* set up the EL1 bounce interrupt */
+    mov tmp, sp
+    msr sp_el1, tmp
+
+    isb
+    eret
+
+
+.Ltarget:
+    ret
diff --git a/src/bsp/lk/arch/arm64/cache-ops.S b/src/bsp/lk/arch/arm64/cache-ops.S
new file mode 100644
index 0000000..04ffef8
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/cache-ops.S
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2014, Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <asm.h>
+#include <arch/ops.h>
+#include <arch/defines.h>
+
+#define LOC_SHIFT           24
+#define CLIDR_FIELD_WIDTH   3
+#define LEVEL_SHIFT         1
+#define DCISW               0x0
+#define DCCISW              0x1
+
+.text
+
+.macro cache_range_op, cache op
+    add     x2, x0, x1                  // calculate the end address
+    bic     x3, x0, #(CACHE_LINE-1)     // align the start with a cache line
+.Lcache_range_op_loop\@:
+    \cache  \op, x3
+    add     x3, x3, #CACHE_LINE
+    cmp     x3, x2
+    blo     .Lcache_range_op_loop\@
+    dsb     sy
+.endm
+
+    /* void arch_flush_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_clean_cache_range)
+    cache_range_op dc cvac         // clean cache to PoC by MVA
+    ret
+
+    /* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_clean_invalidate_cache_range)
+    cache_range_op dc civac        // clean & invalidate dcache to PoC by MVA
+    ret
+
+    /* void arch_invalidate_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_invalidate_cache_range)
+    cache_range_op dc ivac         // invalidate dcache to PoC by MVA
+    ret
+
+    /* void arch_sync_cache_range(addr_t start, size_t len); */
+FUNCTION(arch_sync_cache_range)
+    cache_range_op dc cvau         // clean dcache to PoU by MVA
+    cache_range_op ic ivau         // invalidate icache to PoU by MVA
+    ret
+
+/* will trash x0-x2, x4-x9, x11, x14, x16-x17 */
+LOCAL_FUNCTION(do_dcsw_op)
+        cbz     x3, exit
+        adr     x14, dcsw_loop_table    // compute inner loop address
+        add     x14, x14, x0, lsl #5    // inner loop is 8x32-bit instructions
+        mov     x0, x9
+        mov     w8, #1
+loop1:
+        add     x2, x10, x10, lsr #1    // work out 3x current cache level
+        lsr     x1, x0, x2              // extract cache type bits from clidr
+        and     x1, x1, #7              // mask the bits for current cache only
+        cmp     x1, #2                  // see what cache we have at this level
+        b.lt    level_done              // nothing to do if no cache or icache
+
+        msr     csselr_el1, x10         // select current cache level in csselr
+        isb                             // isb to sych the new cssr&csidr
+        mrs     x1, ccsidr_el1          // read the new ccsidr
+        and     x2, x1, #7              // extract the length of the cache lines
+        add     x2, x2, #4              // add 4 (line length offset)
+        ubfx    x4, x1, #3, #10         // maximum way number
+        clz     w5, w4                  // bit position of way size increment
+        lsl     w9, w4, w5              // w9 = aligned max way number
+        lsl     w16, w8, w5             // w16 = way number loop decrement
+        orr     w9, w10, w9             // w9 = combine way and cache number
+        ubfx    w6, w1, #13, #15        // w6 = max set number
+        lsl     w17, w8, w2             // w17 = set number loop decrement
+        dsb     sy                      // barrier before we start this level
+        br      x14                     // jump to DC operation specific loop
+
+        .macro  dcsw_loop _op
+loop2_\_op:
+        lsl     w7, w6, w2              // w7 = aligned max set number
+
+loop3_\_op:
+        orr     w11, w9, w7             // combine cache, way and set number
+        dc      \_op, x11
+        subs    w7, w7, w17             // decrement set number
+        b.ge    loop3_\_op
+
+        subs    x9, x9, x16             // decrement way number
+        b.ge    loop2_\_op
+
+        b       level_done
+.endm
+
+level_done:
+        add     x10, x10, #2            // increment cache number
+        cmp     x3, x10
+        b.gt    loop1
+        msr     csselr_el1, xzr         // select cache level 0 in csselr
+        dsb     sy                      // barrier to complete final cache operation
+        isb
+exit:
+        ret
+
+dcsw_loop_table:
+        dcsw_loop isw
+        dcsw_loop cisw
+        dcsw_loop csw
+
+/* will trash x3, x9, x10 */
+.macro  dcsw_op shift, fw, ls
+        mrs     x9, clidr_el1
+        ubfx    x3, x9, \shift, \fw
+        lsl     x3, x3, \ls
+        mov     x10, xzr
+        bl      do_dcsw_op
+.endm
+
+/* void arch_enable_cache(uint flags);
+ * For EL1  only.
+ */
+FUNCTION(arch_enable_cache)
+    stp     x29, x30, [sp, #-32]!
+    stp     x24, x25, [sp, #16]
+
+    mov     x25, x0
+    /* check DCACHE flag */
+    tst     x25, #DCACHE
+    b.eq    .L__enable_icache
+    mrs     x24, sctlr_el1
+    tst     x24, #(1<<2)
+    b.ne    .L__enable_icache
+
+    /* invalidate dcache */
+    mov     x0, #DCISW
+    dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+    /* enable dcache enable bit */
+    orr     x24, x24, #(1<<2)
+    msr     sctlr_el1, x24
+
+.L__enable_icache:
+    /* check ICACHE flag */
+    tst     x25, #ICACHE
+    b.eq    .L__done_enable
+    mrs     x24, sctlr_el1
+    tst     x24, #(1<<12)
+    b.ne    .L__done_enable
+
+    /* invalidate icache */
+    dsb     sy
+    ic      iallu
+    dsb     sy
+    isb
+
+    /* enable icache enable bit */
+    mrs     x24, sctlr_el1
+    orr     x24, x24, #(1<<12)
+    msr     sctlr_el1, x24
+
+.L__done_enable:
+    ldp     x24, x25, [sp, #16]
+    ldp     x29, x30, [sp], #32
+    ret
+
+/* void arch_disable_cache(uint flags) */
+/* only for el1 here */
+FUNCTION(arch_disable_cache)
+    stp     x29, x30, [sp, #-32]!
+    str     x25, [sp, #16]
+
+    mov     x25, x0
+    /* check DCACHE flag */
+    tst     x25, #DCACHE
+    b.eq    .L__disable_icache
+    mrs     x1, sctlr_el1
+    tst     x1, #(1<<2)
+    b.eq    .L__dcache_already_disabled
+
+    /* disable dcache enable bit */
+    bic     x1, x1, #(1<<2)
+    msr     sctlr_el1, x1
+
+    /* clean & invalidate dcache */
+    mov     x0, #DCCISW
+    b       .L__flush_dcache
+
+.L__dcache_already_disabled:
+    /* invalidate dcache */
+    mov     x0, #DCISW
+.L__flush_dcache:
+    dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+
+.L__disable_icache:
+    /* check ICACHE flag */
+    tst     x25, #ICACHE
+    b.eq    .L__done_disable
+    /* disable icache enable bit */
+    mrs     x1, sctlr_el1
+    bic     x1, x1, #(1<<12)
+    msr     sctlr_el1, x1
+
+    /* invalidate icache for PE to PoU */
+    dsb     sy
+    ic      iallu
+    dsb     sy
+    isb
+
+.L__done_disable:
+    ldr     x25, [sp, #16]
+    ldp     x29, x30, [sp], #32
+    ret
diff --git a/src/bsp/lk/arch/arm64/exceptions.S b/src/bsp/lk/arch/arm64/exceptions.S
new file mode 100644
index 0000000..331de7f
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/exceptions.S
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/asm_macros.h>
+
+.section .text.boot.vectab
+.align 12
+
+#define lr x30
+#define regsave_long_offset 0xf0
+#define regsave_short_offset 0x90
+
+.macro regsave_long
+sub  sp, sp, #32
+push x28, x29
+push x26, x27
+push x24, x25
+push x22, x23
+push x20, x21
+push x18, x19
+push x16, x17
+push x14, x15
+push x12, x13
+push x10, x11
+push x8, x9
+push x6, x7
+push x4, x5
+push x2, x3
+push x0, x1
+add  x0, sp, #regsave_long_offset
+mrs  x1, elr_el1
+mrs  x2, spsr_el1
+stp  lr, x0, [sp, #regsave_long_offset]
+stp  x1, x2, [sp, #regsave_long_offset + 16]
+.endm
+
+.macro regsave_short
+sub  sp, sp, #32
+push x16, x17
+push x14, x15
+push x12, x13
+push x10, x11
+push x8, x9
+push x6, x7
+push x4, x5
+push x2, x3
+push x0, x1
+add  x0, sp, #regsave_short_offset
+mrs  x1, elr_el1
+mrs  x2, spsr_el1
+stp  lr, x0, [sp, #regsave_short_offset]
+stp  x1, x2, [sp, #regsave_short_offset + 16]
+.endm
+
+.macro regrestore_long
+ldr  lr, [sp, #regsave_long_offset]
+ldp  x1, x2, [sp, #regsave_long_offset + 16]
+msr  elr_el1, x1
+msr  spsr_el1, x2
+pop x0, x1
+pop x2, x3
+pop x4, x5
+pop x6, x7
+pop x8, x9
+pop x10, x11
+pop x12, x13
+pop x14, x15
+pop x16, x17
+pop x18, x19
+pop x20, x21
+pop x22, x23
+pop x24, x25
+pop x26, x27
+pop x28, x29
+add sp, sp, #32
+.endm
+
+.macro regrestore_short
+ldr  lr, [sp, #regsave_short_offset]
+ldp  x1, x2, [sp, #regsave_short_offset + 16]
+msr  elr_el1, x1
+msr  spsr_el1, x2
+pop x0, x1
+pop x2, x3
+pop x4, x5
+pop x6, x7
+pop x8, x9
+pop x10, x11
+pop x12, x13
+pop x14, x15
+pop x16, x17
+add sp, sp, #32
+.endm
+
+.macro invalid_exception, which
+    regsave_long
+    mov x1, #\which
+    mov x0, sp
+    bl  arm64_invalid_exception
+    b   .
+.endm
+
+.macro irq_exception
+    regsave_short
+    msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */
+    mov x0, sp
+    bl  platform_irq
+    cbz x0, .Lirq_exception_no_preempt\@
+    bl  thread_preempt
+.Lirq_exception_no_preempt\@:
+    msr daifset, #1 /* disable fiqs to protect elr and spsr restore */
+    b   arm64_exc_shared_restore_short
+.endm
+
+FUNCTION(arm64_exception_base)
+
+/* exceptions from current EL, using SP0 */
+LOCAL_FUNCTION(arm64_sync_exc_current_el_SP0)
+    invalid_exception 0
+
+.org 0x080
+LOCAL_FUNCTION(arm64_irq_current_el_SP0)
+    invalid_exception 1
+
+.org 0x100
+LOCAL_FUNCTION(arm64_fiq_current_el_SP0)
+    invalid_exception 2
+
+.org 0x180
+LOCAL_FUNCTION(arm64_err_exc_current_el_SP0)
+    invalid_exception 3
+
+/* exceptions from current EL, using SPx */
+.org 0x200
+LOCAL_FUNCTION(arm64_sync_exc_current_el_SPx)
+    regsave_long
+    mov x0, sp
+    bl  arm64_sync_exception
+    b  arm64_exc_shared_restore_long
+
+.org 0x280
+LOCAL_FUNCTION(arm64_irq_current_el_SPx)
+    irq_exception
+
+.org 0x300
+LOCAL_FUNCTION(arm64_fiq_current_el_SPx)
+    regsave_short
+    mov x0, sp
+    bl  platform_fiq
+    b  arm64_exc_shared_restore_short
+
+.org 0x380
+LOCAL_FUNCTION(arm64_err_exc_current_el_SPx)
+    invalid_exception 0x13
+
+/* exceptions from lower EL, running arm64 */
+.org 0x400
+LOCAL_FUNCTION(arm64_sync_exc_lower_el_64)
+    invalid_exception 0x20
+
+.org 0x480
+LOCAL_FUNCTION(arm64_irq_lower_el_64)
+    invalid_exception 0x21
+
+.org 0x500
+LOCAL_FUNCTION(arm64_fiq_lower_el_64)
+    invalid_exception 0x22
+
+.org 0x580
+LOCAL_FUNCTION(arm64_err_exc_lower_el_64)
+    invalid_exception 0x23
+
+/* exceptions from lower EL, running arm32 */
+.org 0x600
+LOCAL_FUNCTION(arm64_sync_exc_lower_el_32)
+    regsave_long
+    mov x0, sp
+    bl  arm64_sync_exception
+    b  arm64_exc_shared_restore_long
+
+.org 0x680
+LOCAL_FUNCTION(arm64_irq_lower_el_32)
+    irq_exception
+
+.org 0x700
+LOCAL_FUNCTION(arm64_fiq_lower_el_32)
+    regsave_short
+    mov x0, sp
+    bl  platform_fiq
+    b  arm64_exc_shared_restore_short
+
+.org 0x780
+LOCAL_FUNCTION(arm64_err_exc_lower_el_32)
+    invalid_exception 0x33
+
+LOCAL_FUNCTION(arm64_exc_shared_restore_long)
+    regrestore_long
+    eret
+
+LOCAL_FUNCTION(arm64_exc_shared_restore_short)
+       regrestore_short
+       eret
diff --git a/src/bsp/lk/arch/arm64/exceptions_c.c b/src/bsp/lk/arch/arm64/exceptions_c.c
new file mode 100644
index 0000000..3ff211d
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/exceptions_c.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <debug.h>
+#include <arch/arch_ops.h>
+#include <arch/arm64.h>
+
+#define SHUTDOWN_ON_FATAL 1
+
+struct fault_handler_table_entry {
+    uint64_t pc;
+    uint64_t fault_handler;
+};
+
+extern struct fault_handler_table_entry __fault_handler_table_start[];
+extern struct fault_handler_table_entry __fault_handler_table_end[];
+
+static void dump_iframe(const struct arm64_iframe_long *iframe)
+{
+    printf("iframe %p:\n", iframe);
+    printf("x0  0x%16llx x1  0x%16llx x2  0x%16llx x3  0x%16llx\n", iframe->r[0], iframe->r[1], iframe->r[2], iframe->r[3]);
+    printf("x4  0x%16llx x5  0x%16llx x6  0x%16llx x7  0x%16llx\n", iframe->r[4], iframe->r[5], iframe->r[6], iframe->r[7]);
+    printf("x8  0x%16llx x9  0x%16llx x10 0x%16llx x11 0x%16llx\n", iframe->r[8], iframe->r[9], iframe->r[10], iframe->r[11]);
+    printf("x12 0x%16llx x13 0x%16llx x14 0x%16llx x15 0x%16llx\n", iframe->r[12], iframe->r[13], iframe->r[14], iframe->r[15]);
+    printf("x16 0x%16llx x17 0x%16llx x18 0x%16llx x19 0x%16llx\n", iframe->r[16], iframe->r[17], iframe->r[18], iframe->r[19]);
+    printf("x20 0x%16llx x21 0x%16llx x22 0x%16llx x23 0x%16llx\n", iframe->r[20], iframe->r[21], iframe->r[22], iframe->r[23]);
+    printf("x24 0x%16llx x25 0x%16llx x26 0x%16llx x27 0x%16llx\n", iframe->r[24], iframe->r[25], iframe->r[26], iframe->r[27]);
+    printf("x28 0x%16llx x29 0x%16llx lr  0x%16llx sp  0x%16llx\n", iframe->r[28], iframe->r[29], iframe->r[30], iframe->r[31]);
+    printf("elr 0x%16llx\n", iframe->elr);
+    printf("spsr 0x%16llx\n", iframe->spsr);
+}
+
+void arm64_sync_exception(struct arm64_iframe_long *iframe)
+{
+    struct fault_handler_table_entry *fault_handler;
+    uint32_t esr = ARM64_READ_SYSREG(esr_el1);
+    uint32_t ec = esr >> 26;
+    uint32_t il = (esr >> 25) & 0x1;
+    uint32_t iss = esr & ((1<<24) - 1);
+
+#ifdef WITH_LIB_SYSCALL
+    if (ec == 0x15 || ec == 0x11) { // syscall 64/32
+        void arm64_syscall(struct arm64_iframe_long *iframe);
+        arch_enable_fiqs();
+        arm64_syscall(iframe);
+        arch_disable_fiqs();
+        return;
+    }
+#endif
+
+    /* floating point */
+    if (ec == 0x07) {
+        arm64_fpu_exception(iframe);
+        return;
+    }
+
+    for (fault_handler = __fault_handler_table_start; fault_handler < __fault_handler_table_end; fault_handler++) {
+        if (fault_handler->pc == iframe->elr) {
+            iframe->elr = fault_handler->fault_handler;
+            return;
+        }
+    }
+
+    printf("sync_exception\n");
+    dump_iframe(iframe);
+
+    printf("ESR 0x%x: ec 0x%x, il 0x%x, iss 0x%x\n", esr, ec, il, iss);
+
+    if (ec == 0x15) { // syscall
+        printf("syscall\n");
+        return;
+    }
+
+    panic("die\n");
+}
+
+void arm64_invalid_exception(struct arm64_iframe_long *iframe, unsigned int which)
+{
+    printf("invalid exception, which 0x%x\n", which);
+    dump_iframe(iframe);
+
+    panic("die\n");
+}
+
+
+
diff --git a/src/bsp/lk/arch/arm64/exceptions_el2_el3.S b/src/bsp/lk/arch/arm64/exceptions_el2_el3.S
new file mode 100644
index 0000000..8208a2b
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/exceptions_el2_el3.S
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/arm64/mmu.h>
+#include <arch/asm_macros.h>
+
+/* use x9 ~ x15 as scratch registers */
+tmp     .req x9
+tmp2    .req x10
+
+#define ESR_EC_SHIFT    26
+#define ESR_EC_LENGTH   6
+#define EC_AARCH64_HVC  0x16
+#define EC_AARCH64_SMC  0x17
+
+.weak mtk_sip
+
+FUNCTION(setup_el2_or_el3_exception_base)
+    /* install el2 or el3 exception table */
+    ldr     tmp, =.Lel2_or_el3_exception_base
+#if WITH_KERNEL_VM
+    and     tmp, tmp, #~(~0 << MMU_KERNEL_SIZE_SHIFT)
+#endif
+    mrs     tmp2, CurrentEL
+    cmp     tmp2, #(0b11 << 2)  /* in EL3? */
+    b.eq    .Lin_el3
+    cmp     tmp2, #(0b10 << 2)  /* in EL2? */
+    b.eq    .Lin_el2
+.Lin_el3:
+    msr     vbar_el3, tmp
+    b       .Lexit
+.Lin_el2:
+    msr     vbar_el2, tmp
+.Lexit:
+    ret
+
+.section .text.boot.vectab
+.align 12
+
+/*
+ * The next boot stage after lk can be ATF (lk as bl2 bootloader), linux
+ * kernel or hypervisor (lk as bl33 bootloader). Different entry execution
+ * level is required for each next boot stage,
+ *      - ATF: from EL3
+ *      - linux kernel: from EL2 or EL1
+ *      - hypervisor: from EL2
+ * It's necessary for lk to return to its beginning entry level before jumping
+ * to next boot stage.
+ *
+ * SMC or HVC will be used for this purpose, thus we install only the exception
+ * vector to handle sync exception from lower exception level.
+ *
+ * [TODO] add rest exception vectors to catch unhandled exceptions.
+ */
+.Lel2_or_el3_exception_base:
+FUNCTION(arm64_el2_or_el3_exception_base)
+/* exceptions from lower EL, running arm64 */
+.org 0x400
+LOCAL_FUNCTION(arm64_sync_exc_lower_el_64)
+#if WITH_KERNEL_VM
+    mov     tmp, sp
+    and     sp, tmp, #~(~0 << MMU_KERNEL_SIZE_SHIFT)
+#endif
+    mrs     tmp, CurrentEL
+    cmp     tmp, #(0b11 << 2)   /* in EL3? */
+    b.ne    .LnotEL3
+    mrs     tmp, esr_el3
+    b       .Lcheck_ec
+
+.LnotEL3:
+    cmp     tmp, #(0b10 << 2)   /* in EL2? */
+    b.ne    .Lunhandled_sync_exc
+    mrs     tmp, esr_el2
+
+.Lcheck_ec:
+    ubfx    tmp, tmp, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+    cmp     tmp, #EC_AARCH64_SMC
+    b.eq    .Lsip_handler
+    cmp     tmp, #EC_AARCH64_HVC
+    b.ne    .Lunhandled_sync_exc
+
+.Lsip_handler:
+    b       mtk_sip
+
+.Lunhandled_sync_exc:
+    b       .
+
diff --git a/src/bsp/lk/arch/arm64/fpu.c b/src/bsp/lk/arch/arm64/fpu.c
new file mode 100644
index 0000000..162b5c1
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/fpu.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <arch/arm64.h>
+#include <kernel/thread.h>
+#include <trace.h>
+
+#define LOCAL_TRACE 0
+
+static struct fpstate *current_fpstate[SMP_MAX_CPUS];
+
+static void arm64_fpu_load_state(struct thread *t)
+{
+    uint cpu = arch_curr_cpu_num();
+    struct fpstate *fpstate = &t->arch.fpstate;
+
+    if (fpstate == current_fpstate[cpu] && fpstate->current_cpu == cpu) {
+        LTRACEF("cpu %d, thread %s, fpstate already valid\n", cpu, t->name);
+        return;
+    }
+    LTRACEF("cpu %d, thread %s, load fpstate %p, last cpu %d, last fpstate %p\n",
+            cpu, t->name, fpstate, fpstate->current_cpu, current_fpstate[cpu]);
+    fpstate->current_cpu = cpu;
+    current_fpstate[cpu] = fpstate;
+
+
+    STATIC_ASSERT(sizeof(fpstate->regs) == 16 * 32);
+    __asm__ volatile("ldp     q0, q1, [%0, #(0 * 32)]\n"
+                     "ldp     q2, q3, [%0, #(1 * 32)]\n"
+                     "ldp     q4, q5, [%0, #(2 * 32)]\n"
+                     "ldp     q6, q7, [%0, #(3 * 32)]\n"
+                     "ldp     q8, q9, [%0, #(4 * 32)]\n"
+                     "ldp     q10, q11, [%0, #(5 * 32)]\n"
+                     "ldp     q12, q13, [%0, #(6 * 32)]\n"
+                     "ldp     q14, q15, [%0, #(7 * 32)]\n"
+                     "ldp     q16, q17, [%0, #(8 * 32)]\n"
+                     "ldp     q18, q19, [%0, #(9 * 32)]\n"
+                     "ldp     q20, q21, [%0, #(10 * 32)]\n"
+                     "ldp     q22, q23, [%0, #(11 * 32)]\n"
+                     "ldp     q24, q25, [%0, #(12 * 32)]\n"
+                     "ldp     q26, q27, [%0, #(13 * 32)]\n"
+                     "ldp     q28, q29, [%0, #(14 * 32)]\n"
+                     "ldp     q30, q31, [%0, #(15 * 32)]\n"
+                     "msr     fpcr, %1\n"
+                     "msr     fpsr, %2\n"
+                     :: "r"(fpstate),
+                     "r"((uint64_t)fpstate->fpcr),
+                     "r"((uint64_t)fpstate->fpsr));
+}
+
+void arm64_fpu_save_state(struct thread *t)
+{
+    uint64_t fpcr, fpsr;
+    struct fpstate *fpstate = &t->arch.fpstate;
+    __asm__ volatile("stp     q0, q1, [%2, #(0 * 32)]\n"
+                     "stp     q2, q3, [%2, #(1 * 32)]\n"
+                     "stp     q4, q5, [%2, #(2 * 32)]\n"
+                     "stp     q6, q7, [%2, #(3 * 32)]\n"
+                     "stp     q8, q9, [%2, #(4 * 32)]\n"
+                     "stp     q10, q11, [%2, #(5 * 32)]\n"
+                     "stp     q12, q13, [%2, #(6 * 32)]\n"
+                     "stp     q14, q15, [%2, #(7 * 32)]\n"
+                     "stp     q16, q17, [%2, #(8 * 32)]\n"
+                     "stp     q18, q19, [%2, #(9 * 32)]\n"
+                     "stp     q20, q21, [%2, #(10 * 32)]\n"
+                     "stp     q22, q23, [%2, #(11 * 32)]\n"
+                     "stp     q24, q25, [%2, #(12 * 32)]\n"
+                     "stp     q26, q27, [%2, #(13 * 32)]\n"
+                     "stp     q28, q29, [%2, #(14 * 32)]\n"
+                     "stp     q30, q31, [%2, #(15 * 32)]\n"
+                     "mrs     %0, fpcr\n"
+                     "mrs     %1, fpsr\n"
+                     : "=r"(fpcr), "=r"(fpsr)
+                     : "r"(fpstate));
+
+    fpstate->fpcr = fpcr;
+    fpstate->fpsr = fpsr;
+
+    LTRACEF("thread %s, fpcr %x, fpsr %x\n", t->name, fpstate->fpcr, fpstate->fpsr);
+}
+
+void arm64_fpu_exception(struct arm64_iframe_long *iframe)
+{
+    uint64_t cpacr = ARM64_READ_SYSREG(cpacr_el1);
+    if (((cpacr >> 20) & 3) != 3) {
+        cpacr |= 3 << 20;
+        ARM64_WRITE_SYSREG(cpacr_el1, cpacr);
+        thread_t *t = get_current_thread();
+        if (likely(t))
+            arm64_fpu_load_state(t);
+        return;
+    }
+}
diff --git a/src/bsp/lk/arch/arm64/include/arch/arch_ops.h b/src/bsp/lk/arch/arm64/include/arch/arch_ops.h
new file mode 100644
index 0000000..6c76c2b
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/arch_ops.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifndef ASSEMBLY
+
+#include <stdbool.h>
+#include <compiler.h>
+#include <reg.h>
+#include <arch/arm64.h>
+
+#define USE_GCC_ATOMICS 1
+#define ENABLE_CYCLE_COUNTER 1
+
+// override of some routines
+static inline void arch_enable_ints(void)
+{
+    CF;
+    __asm__ volatile("msr daifclr, #2" ::: "memory");
+}
+
+static inline void arch_disable_ints(void)
+{
+    __asm__ volatile("msr daifset, #2" ::: "memory");
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    unsigned long state;
+
+    __asm__ volatile("mrs %0, daif" : "=r"(state));
+    state &= (1<<7);
+
+    return !!state;
+}
+
+static inline void arch_enable_fiqs(void)
+{
+    CF;
+    __asm__ volatile("msr daifclr, #1" ::: "memory");
+}
+
+static inline void arch_disable_fiqs(void)
+{
+    __asm__ volatile("msr daifset, #1" ::: "memory");
+    CF;
+}
+
+// XXX
+static inline bool arch_fiqs_disabled(void)
+{
+    unsigned long state;
+
+    __asm__ volatile("mrs %0, daif" : "=r"(state));
+    state &= (1<<6);
+
+    return !!state;
+}
+
+#define mb()        __asm__ volatile("dsb sy" : : : "memory")
+#define rmb()       __asm__ volatile("dsb ld" : : : "memory")
+#define wmb()       __asm__ volatile("dsb st" : : : "memory")
+
+#ifdef WITH_SMP
+#define smp_mb()    __asm__ volatile("dmb ish" : : : "memory")
+#define smp_rmb()   __asm__ volatile("dmb ishld" : : : "memory")
+#define smp_wmb()   __asm__ volatile("dmb ishst" : : : "memory")
+#else
+#define smp_mb()    CF
+#define smp_wmb()   CF
+#define smp_rmb()   CF
+#endif
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex  %[old], [%[ptr]]\n"
+            "adds   %[temp], %[old], %[val]\n"
+            "strex  %[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex  %[old], [%[ptr]]\n"
+            "orrs   %[temp], %[old], %[val]\n"
+            "strex  %[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_and(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int temp;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex  %[old], [%[ptr]]\n"
+            "ands   %[temp], %[old], %[val]\n"
+            "strex  %[test], %[temp], [%[ptr]]\n"
+            : [old]"=&r" (old), [temp]"=&r" (temp), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory", "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+#if USE_GCC_ATOMICS
+    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
+#else
+    int old;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex  %[old], [%[ptr]]\n"
+            "strex  %[test], %[val], [%[ptr]]\n"
+            : [old]"=&r" (old), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [val]"r" (val)
+            : "memory");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
+{
+#if USE_GCC_ATOMICS
+    __atomic_compare_exchange_n(ptr, &oldval, newval, false,
+                                __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+    return oldval;
+#else
+    int old;
+    int test;
+
+    do {
+        __asm__ volatile(
+            "ldrex  %[old], [%[ptr]]\n"
+            "mov    %[test], #0\n"
+            "teq    %[old], %[oldval]\n"
+#if ARM_ISA_ARMV7M
+            "bne    0f\n"
+            "strex  %[test], %[newval], [%[ptr]]\n"
+            "0:\n"
+#else
+            "strexeq %[test], %[newval], [%[ptr]]\n"
+#endif
+            : [old]"=&r" (old), [test]"=&r" (test)
+            : [ptr]"r" (ptr), [oldval]"Ir" (oldval), [newval]"r" (newval)
+            : "cc");
+
+    } while (test != 0);
+
+    return old;
+#endif
+}
+
+static inline uint32_t arch_cycle_count(void)
+{
+#if ARM_ISA_ARM7M
+#if ENABLE_CYCLE_COUNTER
+#define DWT_CYCCNT (0xE0001004)
+    return *REG32(DWT_CYCCNT);
+#else
+    return 0;
+#endif
+#elif ARM_ISA_ARMV7
+    uint32_t count;
+    __asm__ volatile("mrc       p15, 0, %0, c9, c13, 0"
+        : "=r" (count)
+        );
+    return count;
+#else
+//#warning no arch_cycle_count implementation
+    return 0;
+#endif
+}
+
+/* use the cpu local thread context pointer to store current_thread */
+static inline struct thread *get_current_thread(void)
+{
+    return (struct thread *)ARM64_READ_SYSREG(tpidr_el1);
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    ARM64_WRITE_SYSREG(tpidr_el1, (uint64_t)t);
+}
+
+#if WITH_SMP
+extern const uint8_t *linear_cpuid_map;
+
+static inline uint arch_curr_cpu_num(void)
+{
+    uint64_t mpidr =  ARM64_READ_SYSREG(mpidr_el1);
+    mpidr = ((mpidr & ((1U << SMP_CPU_ID_BITS) - 1)) >> 8 << SMP_CPU_CLUSTER_SHIFT) | (mpidr & 0xff);
+    return linear_cpuid_map ? *(linear_cpuid_map + mpidr) : mpidr;
+}
+#else
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+#endif
+
+#endif // ASSEMBLY
+
diff --git a/src/bsp/lk/arch/arm64/include/arch/arch_thread.h b/src/bsp/lk/arch/arm64/include/arch/arch_thread.h
new file mode 100644
index 0000000..922ff14
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/arch_thread.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <sys/types.h>
+
+struct fpstate {
+    uint64_t    regs[64];
+    uint32_t    fpcr;
+    uint32_t    fpsr;
+    uint        current_cpu;
+};
+
+struct arch_thread {
+    vaddr_t sp;
+    struct fpstate fpstate __attribute__((aligned(0x10)));
+};
+
diff --git a/src/bsp/lk/arch/arm64/include/arch/arm64.h b/src/bsp/lk/arch/arm64/include/arch/arm64.h
new file mode 100644
index 0000000..173b03b
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/arm64.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <stdbool.h>
+#include <sys/types.h>
+#include <compiler.h>
+
+__BEGIN_CDECLS
+
+#define DSB __asm__ volatile("dsb sy" ::: "memory")
+#define ISB __asm__ volatile("isb" ::: "memory")
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+
+#define ARM64_READ_SYSREG(reg) \
+({ \
+    uint64_t _val; \
+    __asm__ volatile("mrs %0," TOSTRING(reg) : "=r" (_val)); \
+    _val; \
+})
+
+#define ARM64_WRITE_SYSREG(reg, val) \
+({ \
+    __asm__ volatile("msr " TOSTRING(reg) ", %0" :: "r" (val)); \
+    ISB; \
+})
+
+void arm64_context_switch(vaddr_t *old_sp, vaddr_t new_sp);
+
+/* exception handling */
+struct arm64_iframe_long {
+    uint64_t r[32];
+    uint64_t elr;
+    uint64_t spsr;
+};
+
+struct arm64_iframe_short {
+    uint64_t r[20];
+    uint64_t elr;
+    uint64_t spsr;
+};
+
+struct thread;
+extern void arm64_exception_base(void);
+extern void arm64_el2_or_el3_exception_base(void);
+void arm64_elX_to_el1(void);
+void arm64_fpu_exception(struct arm64_iframe_long *iframe);
+void arm64_fpu_save_state(struct thread *thread);
+void arm64_chain_load(paddr_t entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) __NO_RETURN;
+
+static inline void arm64_fpu_pre_context_switch(struct thread *thread)
+{
+    uint64_t cpacr = ARM64_READ_SYSREG(cpacr_el1);
+    if ((cpacr >> 20) & 3) {
+        arm64_fpu_save_state(thread);
+        cpacr &= ~(3 << 20);
+        ARM64_WRITE_SYSREG(cpacr_el1, cpacr);
+    }
+}
+
+__END_CDECLS
+
diff --git a/src/bsp/lk/arch/arm64/include/arch/arm64/mmu.h b/src/bsp/lk/arch/arm64/include/arch/arm64/mmu.h
new file mode 100644
index 0000000..bd6aaf2
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/arm64/mmu.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2014 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __ARCH_ARM64_MMU_H
+#define __ARCH_ARM64_MMU_H
+
+#include <arch/defines.h>
+
+#define IFTE(c,t,e) (!!(c) * (t) | !(c) * (e))
+#define NBITS01(n)      IFTE(n, 1, 0)
+#define NBITS02(n)      IFTE((n) >>  1,  1 + NBITS01((n) >>  1), NBITS01(n))
+#define NBITS04(n)      IFTE((n) >>  2,  2 + NBITS02((n) >>  2), NBITS02(n))
+#define NBITS08(n)      IFTE((n) >>  4,  4 + NBITS04((n) >>  4), NBITS04(n))
+#define NBITS16(n)      IFTE((n) >>  8,  8 + NBITS08((n) >>  8), NBITS08(n))
+#define NBITS32(n)      IFTE((n) >> 16, 16 + NBITS16((n) >> 16), NBITS16(n))
+#define NBITS(n)        IFTE((n) >> 32, 32 + NBITS32((n) >> 32), NBITS32(n))
+
+#ifndef MMU_KERNEL_SIZE_SHIFT
+#define KERNEL_ASPACE_BITS (NBITS(0xffffffffffffffff-KERNEL_ASPACE_BASE))
+#define KERNEL_BASE_BITS (NBITS(0xffffffffffffffff-KERNEL_BASE))
+#if KERNEL_BASE_BITS > KERNEL_ASPACE_BITS
+#define KERNEL_ASPACE_BITS KERNEL_BASE_BITS /* KERNEL_BASE should not be below KERNEL_ASPACE_BASE */
+#endif
+
+#if KERNEL_ASPACE_BITS < 25
+#define MMU_KERNEL_SIZE_SHIFT (25)
+#else
+#define MMU_KERNEL_SIZE_SHIFT (KERNEL_ASPACE_BITS)
+#endif
+#endif
+
+#ifndef MMU_USER_SIZE_SHIFT
+#define MMU_USER_SIZE_SHIFT 48
+#endif
+
+#ifndef MMU_IDENT_SIZE_SHIFT
+#define MMU_IDENT_SIZE_SHIFT 42 /* Max size supported by block mappings */
+#endif
+
+#define MMU_KERNEL_PAGE_SIZE_SHIFT      (PAGE_SIZE_SHIFT)
+#define MMU_USER_PAGE_SIZE_SHIFT        (USER_PAGE_SIZE_SHIFT)
+
+#if MMU_IDENT_SIZE_SHIFT < 25
+#error MMU_IDENT_SIZE_SHIFT too small
+#elif MMU_IDENT_SIZE_SHIFT <= 29 /* Use 2MB block mappings (4K page size) */
+#define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_4K)
+#elif MMU_IDENT_SIZE_SHIFT <= 30 /* Use 512MB block mappings (64K page size) */
+#define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_64K)
+#elif MMU_IDENT_SIZE_SHIFT <= 39 /* Use 1GB block mappings (4K page size) */
+#define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_4K)
+#elif MMU_IDENT_SIZE_SHIFT <= 42 /* Use 512MB block mappings (64K page size) */
+#define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_64K)
+#else
+#error MMU_IDENT_SIZE_SHIFT too large
+#endif
+
+/*
+ * TCR TGx values
+ *
+ * Page size:   4K      16K     64K
+ * TG0:         0       2       1
+ * TG1:         2       1       3
+ */
+
+#define MMU_TG0(page_size_shift) ((((page_size_shift == 14) & 1) << 1) | \
+                                  ((page_size_shift == 16) & 1))
+
+#define MMU_TG1(page_size_shift) ((((page_size_shift == 12) & 1) << 1) | \
+                                  ((page_size_shift == 14) & 1) | \
+                                  ((page_size_shift == 16) & 1) | \
+                                  (((page_size_shift == 16) & 1) << 1))
+
+#define MMU_LX_X(page_shift, level) ((4 - (level)) * ((page_shift) - 3) + 3)
+
+#if MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 0)
+#define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 0)
+#elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 1)
+#define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 1)
+#elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 2)
+#define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 2)
+#elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 3)
+#define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 3)
+#else
+#error User address space size must be larger than page size
+#endif
+#define MMU_USER_PAGE_TABLE_ENTRIES_TOP (0x1 << (MMU_USER_SIZE_SHIFT - MMU_USER_TOP_SHIFT))
+
+#if MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 0)
+#define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 0)
+#elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 1)
+#define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 1)
+#elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2)
+#define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2)
+#elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 3)
+#define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 3)
+#else
+#error Kernel address space size must be larger than page size
+#endif
+#define MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP (0x1 << (MMU_KERNEL_SIZE_SHIFT - MMU_KERNEL_TOP_SHIFT))
+
+#if MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 0)
+#define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 0)
+#elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 1)
+#define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 1)
+#elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
+#define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
+#elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 3)
+#define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 3)
+#else
+#error Ident address space size must be larger than page size
+#endif
+#define MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT (MMU_IDENT_SIZE_SHIFT - MMU_IDENT_TOP_SHIFT)
+#define MMU_PAGE_TABLE_ENTRIES_IDENT (0x1 << MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT)
+
+#define MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT      (30)
+
+#ifndef ASSEMBLY
+#define BM(base, count, val) (((val) & ((1UL << (count)) - 1)) << (base))
+#else
+#define BM(base, count, val) (((val) & ((0x1 << (count)) - 1)) << (base))
+#endif
+
+#define MMU_SH_NON_SHAREABLE                    (0)
+#define MMU_SH_OUTER_SHAREABLE                  (2)
+#define MMU_SH_INNER_SHAREABLE                  (3)
+
+#define MMU_RGN_NON_CACHEABLE                   (0)
+#define MMU_RGN_WRITE_BACK_ALLOCATE             (1)
+#define MMU_RGN_WRITE_THROUGH_NO_ALLOCATE       (2)
+#define MMU_RGN_WRITE_BACK_NO_ALLOCATE          (3)
+
+#define MMU_TCR_TBI1                            BM(38, 1, 1)
+#define MMU_TCR_TBI0                            BM(37, 1, 1)
+#define MMU_TCR_AS                              BM(36, 1, 1)
+#define MMU_TCR_IPS(size)                       BM(32, 3, (size))
+#define MMU_TCR_TG1(granule_size)               BM(30, 2, (granule_size))
+#define MMU_TCR_SH1(shareability_flags)         BM(28, 2, (shareability_flags))
+#define MMU_TCR_ORGN1(cache_flags)              BM(26, 2, (cache_flags))
+#define MMU_TCR_IRGN1(cache_flags)              BM(24, 2, (cache_flags))
+#define MMU_TCR_EPD1                            BM(23, 1, 1)
+#define MMU_TCR_A1                              BM(22, 1, 1)
+#define MMU_TCR_T1SZ(size)                      BM(16, 6, (size))
+#define MMU_TCR_TG0(granule_size)               BM(14, 2, (granule_size))
+#define MMU_TCR_SH0(shareability_flags)         BM(12, 2, (shareability_flags))
+#define MMU_TCR_ORGN0(cache_flags)              BM(10, 2, (cache_flags))
+#define MMU_TCR_IRGN0(cache_flags)              BM( 8, 2, (cache_flags))
+#define MMU_TCR_EPD0                            BM( 7, 1, 1)
+#define MMU_TCR_T0SZ(size)                      BM( 0, 6, (size))
+
+#define MMU_MAIR_ATTR(index, attr)              BM(index * 8, 8, (attr))
+
+
+/* L0/L1/L2/L3 descriptor types */
+#define MMU_PTE_DESCRIPTOR_INVALID              BM(0, 2, 0)
+#define MMU_PTE_DESCRIPTOR_MASK                 BM(0, 2, 3)
+
+/* L0/L1/L2 descriptor types */
+#define MMU_PTE_L012_DESCRIPTOR_BLOCK           BM(0, 2, 1)
+#define MMU_PTE_L012_DESCRIPTOR_TABLE           BM(0, 2, 3)
+
+/* L3 descriptor types */
+#define MMU_PTE_L3_DESCRIPTOR_PAGE              BM(0, 2, 3)
+
+/* Output address mask */
+#define MMU_PTE_OUTPUT_ADDR_MASK                BM(12, 36, 0xfffffffff)
+
+/* Table attrs */
+#define MMU_PTE_ATTR_NS_TABLE                   BM(63, 1, 1)
+#define MMU_PTE_ATTR_AP_TABLE_NO_WRITE          BM(62, 1, 1)
+#define MMU_PTE_ATTR_AP_TABLE_NO_EL0            BM(61, 1, 1)
+#define MMU_PTE_ATTR_UXN_TABLE                  BM(60, 1, 1)
+#define MMU_PTE_ATTR_PXN_TABLE                  BM(59, 1, 1)
+
+/* Block/Page attrs */
+#define MMU_PTE_ATTR_RES_SOFTWARE               BM(55, 4, 0xf)
+#define MMU_PTE_ATTR_UXN                        BM(54, 1, 1)
+#define MMU_PTE_ATTR_PXN                        BM(53, 1, 1)
+#define MMU_PTE_ATTR_CONTIGUOUS                 BM(52, 1, 1)
+
+#define MMU_PTE_ATTR_NON_GLOBAL                 BM(11, 1, 1)
+#define MMU_PTE_ATTR_AF                         BM(10, 1, 1)
+
+#define MMU_PTE_ATTR_SH_NON_SHAREABLE           BM(8, 2, 0)
+#define MMU_PTE_ATTR_SH_OUTER_SHAREABLE         BM(8, 2, 2)
+#define MMU_PTE_ATTR_SH_INNER_SHAREABLE         BM(8, 2, 3)
+
+#define MMU_PTE_ATTR_AP_P_RW_U_NA               BM(6, 2, 0)
+#define MMU_PTE_ATTR_AP_P_RW_U_RW               BM(6, 2, 1)
+#define MMU_PTE_ATTR_AP_P_RO_U_NA               BM(6, 2, 2)
+#define MMU_PTE_ATTR_AP_P_RO_U_RO               BM(6, 2, 3)
+#define MMU_PTE_ATTR_AP_MASK                    BM(6, 2, 3)
+
+#define MMU_PTE_ATTR_NON_SECURE                 BM(5, 1, 1)
+
+#define MMU_PTE_ATTR_ATTR_INDEX(attrindex)      BM(2, 3, attrindex)
+#define MMU_PTE_ATTR_ATTR_INDEX_MASK            MMU_PTE_ATTR_ATTR_INDEX(7)
+
+/* Default configuration for main kernel page table:
+ *    - do cached translation walks
+ */
+
+/* Device-nGnRnE memory */
+#define MMU_MAIR_ATTR0                  MMU_MAIR_ATTR(0, 0x00)
+#define MMU_PTE_ATTR_STRONGLY_ORDERED   MMU_PTE_ATTR_ATTR_INDEX(0)
+
+/* Device-nGnRE memory */
+#define MMU_MAIR_ATTR1                  MMU_MAIR_ATTR(1, 0x04)
+#define MMU_PTE_ATTR_DEVICE             MMU_PTE_ATTR_ATTR_INDEX(1)
+
+/* Normal Memory, Outer Write-back non-transient Read/Write allocate,
+ * Inner Write-back non-transient Read/Write allocate
+ */
+#define MMU_MAIR_ATTR2                  MMU_MAIR_ATTR(2, 0xff)
+#define MMU_PTE_ATTR_NORMAL_MEMORY      MMU_PTE_ATTR_ATTR_INDEX(2)
+
+#define MMU_MAIR_ATTR3                  (0)
+#define MMU_MAIR_ATTR4                  (0)
+#define MMU_MAIR_ATTR5                  (0)
+#define MMU_MAIR_ATTR6                  (0)
+#define MMU_MAIR_ATTR7                  (0)
+
+#define MMU_MAIR_VAL                    (MMU_MAIR_ATTR0 | MMU_MAIR_ATTR1 | \
+                                         MMU_MAIR_ATTR2 | MMU_MAIR_ATTR3 | \
+                                         MMU_MAIR_ATTR4 | MMU_MAIR_ATTR5 | \
+                                         MMU_MAIR_ATTR6 | MMU_MAIR_ATTR7 )
+
+#define MMU_TCR_IPS_DEFAULT MMU_TCR_IPS(2) /* TODO: read at runtime, or configure per platform */
+
+/* Enable cached page table walks:
+ * inner/outer (IRGN/ORGN): write-back + write-allocate
+ */
+#define MMU_TCR_FLAGS1 (MMU_TCR_TG1(MMU_TG1(MMU_KERNEL_PAGE_SIZE_SHIFT)) | \
+                        MMU_TCR_SH1(MMU_SH_INNER_SHAREABLE) | \
+                        MMU_TCR_ORGN1(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_IRGN1(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_T1SZ(64 - MMU_KERNEL_SIZE_SHIFT))
+#define MMU_TCR_FLAGS0 (MMU_TCR_TG0(MMU_TG0(MMU_USER_PAGE_SIZE_SHIFT)) | \
+                        MMU_TCR_SH0(MMU_SH_INNER_SHAREABLE) | \
+                        MMU_TCR_ORGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_IRGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_T0SZ(64 - MMU_USER_SIZE_SHIFT))
+#define MMU_TCR_FLAGS0_IDENT \
+                       (MMU_TCR_TG0(MMU_TG0(MMU_IDENT_PAGE_SIZE_SHIFT)) | \
+                        MMU_TCR_SH0(MMU_SH_INNER_SHAREABLE) | \
+                        MMU_TCR_ORGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_IRGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
+                        MMU_TCR_T0SZ(64 - MMU_IDENT_SIZE_SHIFT))
+#define MMU_TCR_FLAGS_IDENT (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0_IDENT)
+#define MMU_TCR_FLAGS_KERNEL (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0 | MMU_TCR_EPD0)
+#define MMU_TCR_FLAGS_USER (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0)
+
+
+#if MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
+#define MMU_PTE_IDENT_DESCRIPTOR MMU_PTE_L012_DESCRIPTOR_BLOCK
+#else
+#define MMU_PTE_IDENT_DESCRIPTOR MMU_PTE_L3_DESCRIPTOR_PAGE
+#endif
+#define MMU_PTE_IDENT_FLAGS \
+    (MMU_PTE_IDENT_DESCRIPTOR | \
+     MMU_PTE_ATTR_AF | \
+     MMU_PTE_ATTR_SH_INNER_SHAREABLE | \
+     MMU_PTE_ATTR_NORMAL_MEMORY | \
+     MMU_PTE_ATTR_AP_P_RW_U_NA)
+
+#define MMU_PTE_KERNEL_RO_FLAGS \
+    (MMU_PTE_ATTR_UXN | \
+     MMU_PTE_ATTR_AF | \
+     MMU_PTE_ATTR_SH_INNER_SHAREABLE | \
+     MMU_PTE_ATTR_NORMAL_MEMORY | \
+     MMU_PTE_ATTR_AP_P_RO_U_NA)
+
+#define MMU_PTE_KERNEL_DATA_FLAGS \
+    (MMU_PTE_ATTR_UXN | \
+     MMU_PTE_ATTR_PXN | \
+     MMU_PTE_ATTR_AF | \
+     MMU_PTE_ATTR_SH_INNER_SHAREABLE | \
+     MMU_PTE_ATTR_NORMAL_MEMORY | \
+     MMU_PTE_ATTR_AP_P_RW_U_NA)
+
+#define MMU_INITIAL_MAP_STRONGLY_ORDERED \
+    (MMU_PTE_ATTR_UXN | \
+     MMU_PTE_ATTR_PXN | \
+     MMU_PTE_ATTR_AF | \
+     MMU_PTE_ATTR_STRONGLY_ORDERED | \
+     MMU_PTE_ATTR_AP_P_RW_U_NA)
+
+#define MMU_INITIAL_MAP_DEVICE \
+    (MMU_PTE_ATTR_UXN | \
+     MMU_PTE_ATTR_PXN | \
+     MMU_PTE_ATTR_AF | \
+     MMU_PTE_ATTR_DEVICE | \
+     MMU_PTE_ATTR_AP_P_RW_U_NA)
+
+#ifndef ASSEMBLY
+
+#include <sys/types.h>
+#include <assert.h>
+#include <compiler.h>
+#include <arch/arm64.h>
+
+typedef uint64_t pte_t;
+
+__BEGIN_CDECLS
+
+#define ARM64_TLBI_NOADDR(op) \
+({ \
+    __asm__ volatile("tlbi " #op::); \
+    ISB; \
+})
+
+#define ARM64_TLBI(op, val) \
+({ \
+    __asm__ volatile("tlbi " #op ", %0" :: "r" (val)); \
+    ISB; \
+})
+
+#define MMU_ARM64_GLOBAL_ASID (~0U)
+int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs,
+                  vaddr_t vaddr_base, uint top_size_shift,
+                  uint top_index_shift, uint page_size_shift,
+                  pte_t *top_page_table, uint asid);
+int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
+                    vaddr_t vaddr_base, uint top_size_shift,
+                    uint top_index_shift, uint page_size_shift,
+                    pte_t *top_page_table, uint asid);
+
+__END_CDECLS
+#endif /* ASSEMBLY */
+
+#endif
diff --git a/src/bsp/lk/arch/arm64/include/arch/asm_macros.h b/src/bsp/lk/arch/arm64/include/arch/asm_macros.h
new file mode 100644
index 0000000..05543f0
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/asm_macros.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+.macro push ra, rb
+stp \ra, \rb, [sp,#-16]!
+.endm
+
+.macro pop ra, rb
+ldp \ra, \rb, [sp], #16
+.endm
+
+.macro tbzmask, reg, mask, label, shift=0
+.if \shift >= 64
+    .error "tbzmask: unsupported mask, \mask"
+.elseif \mask == 1 << \shift
+    tbz     \reg, #\shift, \label
+.else
+    tbzmask \reg, \mask, \label, "(\shift + 1)"
+.endif
+.endm
+
+.macro tbnzmask, reg, mask, label, shift=0
+.if \shift >= 64
+    .error "tbnzmask: unsupported mask, \mask"
+.elseif \mask == 1 << \shift
+    tbnz     \reg, #\shift, \label
+.else
+    tbnzmask \reg, \mask, \label, "(\shift + 1)"
+.endif
+.endm
+
+.macro calloc_bootmem_aligned, new_ptr, new_ptr_end, tmp, size_shift, phys_offset=0
+.if \size_shift < 4
+    .error "calloc_bootmem_aligned: Unsupported size_shift, \size_shift"
+.endif
+
+    /* load boot_alloc_end */
+    adrp    \tmp, boot_alloc_end
+    ldr     \new_ptr, [\tmp, #:lo12:boot_alloc_end]
+
+    /* align to page */
+.if \size_shift > 12
+    add     \new_ptr, \new_ptr, #(1 << \size_shift)
+    sub     \new_ptr, \new_ptr, #1
+.else
+    add     \new_ptr, \new_ptr, #(1 << \size_shift) - 1
+.endif
+    and     \new_ptr, \new_ptr, #~((1 << \size_shift) - 1)
+
+    /* add one page and store boot_alloc_end */
+    add     \new_ptr_end, \new_ptr, #(1 << \size_shift)
+    str     \new_ptr_end, [\tmp, #:lo12:boot_alloc_end]
+
+    /* translate address */
+    sub     \new_ptr, \new_ptr, \phys_offset
+    sub     \new_ptr_end, \new_ptr_end, \phys_offset
+
+    /* clear page */
+    mov     \tmp, \new_ptr
+.Lcalloc_bootmem_aligned_clear_loop\@:
+    stp     xzr, xzr, [\tmp], #16
+    cmp     \tmp, \new_ptr_end
+    b.lo    .Lcalloc_bootmem_aligned_clear_loop\@
+.endm
+
+/* Set fault handler for next instruction */
+.macro set_fault_handler, handler
+.Lfault_location\@:
+.pushsection .rodata.fault_handler_table
+    .quad    .Lfault_location\@
+    .quad    \handler
+.popsection
+.endm
diff --git a/src/bsp/lk/arch/arm64/include/arch/defines.h b/src/bsp/lk/arch/arm64/include/arch/defines.h
new file mode 100755
index 0000000..2907825
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/defines.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#define SHIFT_4K        (12)
+#define SHIFT_16K       (14)
+#define SHIFT_64K       (16)
+
+/* arm specific stuff */
+#ifdef ARM64_LARGE_PAGESIZE_64K
+#define PAGE_SIZE_SHIFT (SHIFT_64K)
+#elif ARM64_LARGE_PAGESIZE_16K
+#define PAGE_SIZE_SHIFT (SHIFT_16K)
+#else
+#define PAGE_SIZE_SHIFT (SHIFT_4K)
+#endif
+#define USER_PAGE_SIZE_SHIFT SHIFT_4K
+
+#define PAGE_SIZE (1UL << PAGE_SIZE_SHIFT)
+#define USER_PAGE_SIZE (1UL << USER_PAGE_SIZE_SHIFT)
+
+#define CACHE_LINE 64
+
diff --git a/src/bsp/lk/arch/arm64/include/arch/spinlock.h b/src/bsp/lk/arch/arm64/include/arch/spinlock.h
new file mode 100644
index 0000000..f063cee
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/include/arch/spinlock.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <stdbool.h>
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned long spin_lock_t;
+
+typedef unsigned int spin_lock_saved_state_t;
+typedef unsigned int spin_lock_save_flags_t;
+
+#if WITH_SMP
+void arch_spin_lock(spin_lock_t *lock);
+int arch_spin_trylock(spin_lock_t *lock);
+void arch_spin_unlock(spin_lock_t *lock);
+#else
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+#endif
+
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+enum {
+    /* Possible future flags:
+     * SPIN_LOCK_FLAG_PMR_MASK         = 0x000000ff,
+     * SPIN_LOCK_FLAG_PREEMPTION       = 0x10000000,
+     * SPIN_LOCK_FLAG_SET_PMR          = 0x20000000,
+     */
+
+    /* ARM specific flags */
+    SPIN_LOCK_FLAG_IRQ              = 0x40000000,
+    SPIN_LOCK_FLAG_FIQ              = 0x80000000, /* Do not use unless IRQs are already disabled */
+    SPIN_LOCK_FLAG_IRQ_FIQ          = SPIN_LOCK_FLAG_IRQ | SPIN_LOCK_FLAG_FIQ,
+
+    /* Generic flags */
+    SPIN_LOCK_FLAG_INTERRUPTS       = SPIN_LOCK_FLAG_IRQ,
+};
+
+    /* default arm flag is to just disable plain irqs */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  SPIN_LOCK_FLAG_INTERRUPTS
+
+enum {
+    /* private */
+    SPIN_LOCK_STATE_RESTORE_IRQ = 1,
+    SPIN_LOCK_STATE_RESTORE_FIQ = 2,
+};
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    spin_lock_saved_state_t state = 0;
+    if ((flags & SPIN_LOCK_FLAG_IRQ) && !arch_ints_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_IRQ;
+        arch_disable_ints();
+    }
+    if ((flags & SPIN_LOCK_FLAG_FIQ) && !arch_fiqs_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_FIQ;
+        arch_disable_fiqs();
+    }
+    *statep = state;
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    if ((flags & SPIN_LOCK_FLAG_FIQ) && (old_state & SPIN_LOCK_STATE_RESTORE_FIQ))
+        arch_enable_fiqs();
+    if ((flags & SPIN_LOCK_FLAG_IRQ) && (old_state & SPIN_LOCK_STATE_RESTORE_IRQ))
+        arch_enable_ints();
+}
+
+
+
diff --git a/src/bsp/lk/arch/arm64/mmu.c b/src/bsp/lk/arch/arm64/mmu.c
new file mode 100644
index 0000000..c723bf8
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/mmu.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2014 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <arch/arm64/mmu.h>
+#include <assert.h>
+#include <debug.h>
+#include <err.h>
+#include <kernel/vm.h>
+#include <lib/heap.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <trace.h>
+
+#define LOCAL_TRACE 0
+
+STATIC_ASSERT(((long)KERNEL_BASE >> MMU_KERNEL_SIZE_SHIFT) == -1);
+STATIC_ASSERT(((long)KERNEL_ASPACE_BASE >> MMU_KERNEL_SIZE_SHIFT) == -1);
+STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT <= 48);
+STATIC_ASSERT(MMU_KERNEL_SIZE_SHIFT >= 25);
+
+/* the main translation table */
+pte_t arm64_kernel_translation_table[MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP] __ALIGNED(MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP * 8) __SECTION(".bss.prebss.translation_table");
+
+/* convert user level mmu flags to flags that go in L1 descriptors */
+static pte_t mmu_flags_to_pte_attr(uint flags)
+{
+    pte_t attr = MMU_PTE_ATTR_AF;
+
+    switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
+        case ARCH_MMU_FLAG_CACHED:
+            attr |= MMU_PTE_ATTR_NORMAL_MEMORY | MMU_PTE_ATTR_SH_INNER_SHAREABLE;
+            break;
+        case ARCH_MMU_FLAG_UNCACHED:
+            attr |= MMU_PTE_ATTR_STRONGLY_ORDERED;
+            break;
+        case ARCH_MMU_FLAG_UNCACHED_DEVICE:
+            attr |= MMU_PTE_ATTR_DEVICE;
+            break;
+        default:
+            /* invalid user-supplied flag */
+            DEBUG_ASSERT(1);
+            return ERR_INVALID_ARGS;
+    }
+
+    switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
+        case 0:
+            attr |= MMU_PTE_ATTR_AP_P_RW_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_RO:
+            attr |= MMU_PTE_ATTR_AP_P_RO_U_NA;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER:
+            attr |= MMU_PTE_ATTR_AP_P_RW_U_RW;
+            break;
+        case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
+            attr |= MMU_PTE_ATTR_AP_P_RO_U_RO;
+            break;
+    }
+
+    if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
+        attr |= MMU_PTE_ATTR_UXN | MMU_PTE_ATTR_PXN;
+    }
+
+    if (flags & ARCH_MMU_FLAG_NS) {
+            attr |= MMU_PTE_ATTR_NON_SECURE;
+    }
+
+    return attr;
+}
+
+status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
+{
+    uint index;
+    uint index_shift;
+    pte_t pte;
+    pte_t pte_addr;
+    uint descriptor_type;
+    pte_t *page_table;
+    vaddr_t kernel_base = ~0UL << MMU_KERNEL_SIZE_SHIFT;
+    vaddr_t vaddr_rem;
+
+    if (vaddr < kernel_base) {
+        TRACEF("vaddr 0x%lx < base 0x%lx\n", vaddr, kernel_base);
+        return ERR_INVALID_ARGS;
+    }
+
+    index_shift = MMU_KERNEL_TOP_SHIFT;
+    page_table = arm64_kernel_translation_table;
+
+    vaddr_rem = vaddr - kernel_base;
+    index = vaddr_rem >> index_shift;
+    ASSERT(index < MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP);
+
+    while (true) {
+        index = vaddr_rem >> index_shift;
+        vaddr_rem -= (vaddr_t)index << index_shift;
+        pte = page_table[index];
+        descriptor_type = pte & MMU_PTE_DESCRIPTOR_MASK;
+        pte_addr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
+
+        LTRACEF("va 0x%lx, index %d, index_shift %d, rem 0x%lx, pte 0x%llx\n",
+                vaddr, index, index_shift, vaddr_rem, pte);
+
+        if (descriptor_type == MMU_PTE_DESCRIPTOR_INVALID)
+            return ERR_NOT_FOUND;
+
+        if (descriptor_type == ((index_shift > MMU_KERNEL_PAGE_SIZE_SHIFT) ?
+                                 MMU_PTE_L012_DESCRIPTOR_BLOCK :
+                                 MMU_PTE_L3_DESCRIPTOR_PAGE)) {
+            break;
+        }
+
+        if (index_shift <= MMU_KERNEL_PAGE_SIZE_SHIFT ||
+            descriptor_type != MMU_PTE_L012_DESCRIPTOR_TABLE) {
+            PANIC_UNIMPLEMENTED;
+        }
+
+        page_table = paddr_to_kvaddr(pte_addr);
+        index_shift -= MMU_KERNEL_PAGE_SIZE_SHIFT - 3;
+    }
+
+    if (paddr)
+        *paddr = pte_addr + vaddr_rem;
+    if (flags) {
+        *flags = 0;
+        if (pte & MMU_PTE_ATTR_NON_SECURE)
+            *flags |= ARCH_MMU_FLAG_NS;
+        switch (pte & MMU_PTE_ATTR_ATTR_INDEX_MASK) {
+            case MMU_PTE_ATTR_STRONGLY_ORDERED:
+                *flags |= ARCH_MMU_FLAG_UNCACHED;
+                break;
+            case MMU_PTE_ATTR_DEVICE:
+                *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
+                break;
+            case MMU_PTE_ATTR_NORMAL_MEMORY:
+                break;
+            default:
+                PANIC_UNIMPLEMENTED;
+        }
+        switch (pte & MMU_PTE_ATTR_AP_MASK) {
+            case MMU_PTE_ATTR_AP_P_RW_U_NA:
+                break;
+            case MMU_PTE_ATTR_AP_P_RW_U_RW:
+                *flags |= ARCH_MMU_FLAG_PERM_USER;
+                break;
+            case MMU_PTE_ATTR_AP_P_RO_U_NA:
+                *flags |= ARCH_MMU_FLAG_PERM_RO;
+                break;
+            case MMU_PTE_ATTR_AP_P_RO_U_RO:
+                *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
+                break;
+        }
+        if ((pte & MMU_PTE_ATTR_UXN) && (pte & MMU_PTE_ATTR_PXN)) {
+            *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+        }
+    }
+    LTRACEF("va 0x%lx, paddr 0x%lx, flags 0x%x\n",
+            vaddr, paddr ? *paddr : ~0UL, flags ? *flags : ~0U);
+    return 0;
+}
+
+static int alloc_page_table(paddr_t *paddrp, uint page_size_shift)
+{
+    size_t ret;
+    size_t count;
+    size_t size = 1U << page_size_shift;
+    void *vaddr;
+
+    if (size >= PAGE_SIZE) {
+        count = size / PAGE_SIZE;
+        ret = pmm_alloc_contiguous(count, page_size_shift, paddrp, NULL);
+        if (ret != count)
+            return ERR_NO_MEMORY;
+    } else {
+        vaddr = memalign(size, size);
+        if (!vaddr)
+            return ERR_NO_MEMORY;
+        ret = arch_mmu_query((vaddr_t)vaddr, paddrp, NULL);
+        if (ret) {
+            free(vaddr);
+            return ret;
+        }
+    }
+    return 0;
+}
+
+static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift)
+{
+    vm_page_t *address_to_page(paddr_t addr); /* TODO: remove */
+
+    size_t size = 1U << page_size_shift;
+    vm_page_t *page;
+
+    if (size >= PAGE_SIZE) {
+        page = address_to_page(paddr);
+        if (!page)
+            panic("bad page table paddr 0x%lx\n", paddr);
+        pmm_free_page(page);
+    } else {
+        free(vaddr);
+    }
+}
+
+static pte_t *arm64_mmu_get_page_table(vaddr_t index, uint page_size_shift, pte_t *page_table)
+{
+    pte_t pte;
+    paddr_t paddr;
+    void *vaddr;
+    int ret;
+
+    pte = page_table[index];
+    switch (pte & MMU_PTE_DESCRIPTOR_MASK) {
+    case MMU_PTE_DESCRIPTOR_INVALID:
+        ret = alloc_page_table(&paddr, page_size_shift);
+        if (ret) {
+            TRACEF("failed to allocate page table\n");
+            return NULL;
+        }
+        vaddr = paddr_to_kvaddr(paddr);
+        LTRACEF("allocated page table, vaddr %p, paddr 0x%lx\n", vaddr, paddr);
+        memset(vaddr, MMU_PTE_DESCRIPTOR_INVALID, 1U << page_size_shift);
+        __asm__ volatile("dmb ishst" ::: "memory");
+        pte = paddr | MMU_PTE_L012_DESCRIPTOR_TABLE;
+        page_table[index] = pte;
+        LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
+        return vaddr;
+
+    case MMU_PTE_L012_DESCRIPTOR_TABLE:
+        paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
+        LTRACEF("found page table 0x%lx\n", paddr);
+        return paddr_to_kvaddr(paddr);
+
+    case MMU_PTE_L012_DESCRIPTOR_BLOCK:
+        return NULL;
+
+    default:
+        PANIC_UNIMPLEMENTED;
+    }
+}
+
+static bool page_table_is_clear(pte_t *page_table, uint page_size_shift)
+{
+    int i;
+    int count = 1U << (page_size_shift - 3);
+    pte_t pte;
+
+    for (i = 0; i < count; i++) {
+        pte = page_table[i];
+        if (pte != MMU_PTE_DESCRIPTOR_INVALID) {
+            LTRACEF("page_table at %p still in use, index %d is 0x%llx\n",
+                    page_table, i, pte);
+            return false;
+        }
+    }
+
+    LTRACEF("page table at %p is clear\n", page_table);
+    return true;
+}
+
+static void arm64_mmu_unmap_pt(vaddr_t vaddr, vaddr_t vaddr_rel,
+                               size_t size,
+                               uint index_shift, uint page_size_shift,
+                               pte_t *page_table, uint asid)
+{
+    pte_t *next_page_table;
+    vaddr_t index;
+    size_t chunk_size;
+    vaddr_t vaddr_rem;
+    vaddr_t block_size;
+    vaddr_t block_mask;
+    pte_t pte;
+    paddr_t page_table_paddr;
+
+    LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, size 0x%lx, index shift %d, page_size_shift %d, page_table %p\n",
+            vaddr, vaddr_rel, size, index_shift, page_size_shift, page_table);
+
+    while (size) {
+        block_size = 1UL << index_shift;
+        block_mask = block_size - 1;
+        vaddr_rem = vaddr_rel & block_mask;
+        chunk_size = MIN(size, block_size - vaddr_rem);
+        index = vaddr_rel >> index_shift;
+
+        pte = page_table[index];
+
+        if (index_shift > page_size_shift &&
+            (pte & MMU_PTE_DESCRIPTOR_MASK) == MMU_PTE_L012_DESCRIPTOR_TABLE) {
+            page_table_paddr = pte & MMU_PTE_OUTPUT_ADDR_MASK;
+            next_page_table = paddr_to_kvaddr(page_table_paddr);
+            arm64_mmu_unmap_pt(vaddr, vaddr_rem, chunk_size,
+                               index_shift - (page_size_shift - 3),
+                               page_size_shift,
+                               next_page_table, asid);
+            if (chunk_size == block_size ||
+                page_table_is_clear(next_page_table, page_size_shift)) {
+                LTRACEF("pte %p[0x%lx] = 0 (was page table)\n", page_table, index);
+                page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
+                __asm__ volatile("dmb ishst" ::: "memory");
+                free_page_table(next_page_table, page_table_paddr, page_size_shift);
+            }
+        } else if (pte) {
+            LTRACEF("pte %p[0x%lx] = 0\n", page_table, index);
+            page_table[index] = MMU_PTE_DESCRIPTOR_INVALID;
+            CF;
+            if (asid == MMU_ARM64_GLOBAL_ASID)
+                ARM64_TLBI(vaae1is, vaddr >> 12);
+            else
+                ARM64_TLBI(vae1is, vaddr >> 12 | (vaddr_t)asid << 48);
+        } else {
+            LTRACEF("pte %p[0x%lx] already clear\n", page_table, index);
+        }
+        vaddr += chunk_size;
+        vaddr_rel += chunk_size;
+        size -= chunk_size;
+    }
+}
+
+static int arm64_mmu_map_pt(vaddr_t vaddr_in, vaddr_t vaddr_rel_in,
+                            paddr_t paddr_in,
+                            size_t size_in, pte_t attrs,
+                            uint index_shift, uint page_size_shift,
+                            pte_t *page_table, uint asid)
+{
+    int ret;
+    pte_t *next_page_table;
+    vaddr_t index;
+    vaddr_t vaddr = vaddr_in;
+    vaddr_t vaddr_rel = vaddr_rel_in;
+    paddr_t paddr = paddr_in;
+    size_t size = size_in;
+    size_t chunk_size;
+    vaddr_t vaddr_rem;
+    vaddr_t block_size;
+    vaddr_t block_mask;
+    pte_t pte;
+
+    LTRACEF("vaddr 0x%lx, vaddr_rel 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, index shift %d, page_size_shift %d, page_table %p\n",
+            vaddr, vaddr_rel, paddr, size, attrs,
+            index_shift, page_size_shift, page_table);
+
+    if ((vaddr_rel | paddr | size) & ((1UL << page_size_shift) - 1)) {
+        TRACEF("not page aligned\n");
+        return ERR_INVALID_ARGS;
+    }
+
+    while (size) {
+        block_size = 1UL << index_shift;
+        block_mask = block_size - 1;
+        vaddr_rem = vaddr_rel & block_mask;
+        chunk_size = MIN(size, block_size - vaddr_rem);
+        index = vaddr_rel >> index_shift;
+
+        if (((vaddr_rel | paddr) & block_mask) ||
+            (chunk_size != block_size) ||
+            (index_shift > MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT)) {
+            next_page_table = arm64_mmu_get_page_table(index, page_size_shift,
+                                                       page_table);
+            if (!next_page_table)
+                goto err;
+
+            ret = arm64_mmu_map_pt(vaddr, vaddr_rem, paddr, chunk_size, attrs,
+                                   index_shift - (page_size_shift - 3),
+                                   page_size_shift, next_page_table, asid);
+            if (ret)
+                goto err;
+        } else {
+            pte = page_table[index];
+            if (pte) {
+                TRACEF("page table entry already in use, index 0x%lx, 0x%llx\n",
+                       index, pte);
+                goto err;
+            }
+
+            pte = paddr | attrs;
+            if (index_shift > page_size_shift)
+                pte |= MMU_PTE_L012_DESCRIPTOR_BLOCK;
+            else
+                pte |= MMU_PTE_L3_DESCRIPTOR_PAGE;
+
+            LTRACEF("pte %p[0x%lx] = 0x%llx\n", page_table, index, pte);
+            page_table[index] = pte;
+        }
+        vaddr += chunk_size;
+        vaddr_rel += chunk_size;
+        paddr += chunk_size;
+        size -= chunk_size;
+    }
+
+    return 0;
+
+err:
+    arm64_mmu_unmap_pt(vaddr_in, vaddr_rel_in, size_in - size,
+                       index_shift, page_size_shift, page_table, asid);
+    DSB;
+    return ERR_GENERIC;
+}
+
+int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs,
+                  vaddr_t vaddr_base, uint top_size_shift,
+                  uint top_index_shift, uint page_size_shift,
+                  pte_t *top_page_table, uint asid)
+{
+    int ret;
+    vaddr_t vaddr_rel = vaddr - vaddr_base;
+    vaddr_t vaddr_rel_max = 1UL << top_size_shift;
+
+    LTRACEF("vaddr 0x%lx, paddr 0x%lx, size 0x%lx, attrs 0x%llx, asid 0x%x\n",
+            vaddr, paddr, size, attrs, asid);
+
+    if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
+        TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
+               vaddr, size, vaddr_base, vaddr_rel_max);
+        return ERR_INVALID_ARGS;
+    }
+
+    if (!top_page_table) {
+        TRACEF("page table is NULL\n");
+        return ERR_INVALID_ARGS;
+    }
+
+    ret = arm64_mmu_map_pt(vaddr, vaddr_rel, paddr, size, attrs,
+                           top_index_shift, page_size_shift, top_page_table, asid);
+    DSB;
+    return ret;
+}
+
+int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
+                    vaddr_t vaddr_base, uint top_size_shift,
+                    uint top_index_shift, uint page_size_shift,
+                    pte_t *top_page_table, uint asid)
+{
+    vaddr_t vaddr_rel = vaddr - vaddr_base;
+    vaddr_t vaddr_rel_max = 1UL << top_size_shift;
+
+    LTRACEF("vaddr 0x%lx, size 0x%lx, asid 0x%x\n", vaddr, size, asid);
+
+    if (vaddr_rel > vaddr_rel_max - size || size > vaddr_rel_max) {
+        TRACEF("vaddr 0x%lx, size 0x%lx out of range vaddr 0x%lx, size 0x%lx\n",
+               vaddr, size, vaddr_base, vaddr_rel_max);
+        return ERR_INVALID_ARGS;
+    }
+
+    if (!top_page_table) {
+        TRACEF("page table is NULL\n");
+        return ERR_INVALID_ARGS;
+    }
+
+    arm64_mmu_unmap_pt(vaddr, vaddr_rel, size,
+                       top_index_shift, page_size_shift, top_page_table, asid);
+    DSB;
+    return 0;
+}
+
+int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
+{
+    return arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE,
+                         mmu_flags_to_pte_attr(flags),
+                         ~0UL << MMU_KERNEL_SIZE_SHIFT, MMU_KERNEL_SIZE_SHIFT,
+                         MMU_KERNEL_TOP_SHIFT, MMU_KERNEL_PAGE_SIZE_SHIFT,
+                         arm64_kernel_translation_table, MMU_ARM64_GLOBAL_ASID);
+}
+
+int arch_mmu_unmap(vaddr_t vaddr, uint count)
+{
+    return arm64_mmu_unmap(vaddr, count * PAGE_SIZE,
+                           ~0UL << MMU_KERNEL_SIZE_SHIFT, MMU_KERNEL_SIZE_SHIFT,
+                           MMU_KERNEL_TOP_SHIFT, MMU_KERNEL_PAGE_SIZE_SHIFT,
+                           arm64_kernel_translation_table,
+                           MMU_ARM64_GLOBAL_ASID);
+}
diff --git a/src/bsp/lk/arch/arm64/mp.c b/src/bsp/lk/arch/arm64/mp.c
new file mode 100644
index 0000000..c3cfcea
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/mp.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/mp.h>
+
+#include <assert.h>
+#include <compiler.h>
+#include <trace.h>
+#include <err.h>
+#include <platform/interrupts.h>
+#include <arch/ops.h>
+
+#if WITH_DEV_INTERRUPT_ARM_GIC
+#include <dev/interrupt/arm_gic.h>
+//#else
+//#error need other implementation of interrupt controller that can ipi
+#endif
+
+#define LOCAL_TRACE 0
+
+#define GIC_IPI_BASE (14)
+
+__WEAK status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi)
+{
+    LTRACEF("target 0x%x, ipi %u\n", target, ipi);
+
+#if WITH_DEV_INTERRUPT_ARM_GIC
+    uint gic_ipi_num = ipi + GIC_IPI_BASE;
+
+    /* filter out targets outside of the range of cpus we care about */
+    target &= ((1UL << SMP_MAX_CPUS) - 1);
+    if (target != 0) {
+        LTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
+        arm_gic_sgi(gic_ipi_num, ARM_GIC_SGI_FLAG_NS, target);
+    }
+#endif
+
+    return NO_ERROR;
+}
+
+enum handler_return arm_ipi_generic_handler(void *arg)
+{
+    LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
+
+    return INT_NO_RESCHEDULE;
+}
+
+enum handler_return arm_ipi_reschedule_handler(void *arg)
+{
+    LTRACEF("cpu %u, arg %p\n", arch_curr_cpu_num(), arg);
+
+    return mp_mbx_reschedule_irq();
+}
+
+__WEAK void arch_mp_init_percpu(void)
+{
+    register_int_handler(MP_IPI_GENERIC + GIC_IPI_BASE, &arm_ipi_generic_handler, 0);
+    register_int_handler(MP_IPI_RESCHEDULE + GIC_IPI_BASE, &arm_ipi_reschedule_handler, 0);
+
+    //unmask_interrupt(MP_IPI_GENERIC);
+    //unmask_interrupt(MP_IPI_RESCHEDULE);
+}
+
diff --git a/src/bsp/lk/arch/arm64/rules.mk b/src/bsp/lk/arch/arm64/rules.mk
new file mode 100644
index 0000000..ab6ac75
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/rules.mk
@@ -0,0 +1,125 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+GLOBAL_DEFINES += \
+	ARM64_CPU_$(ARM_CPU)=1 \
+	ARM_ISA_ARMV8=1 \
+	IS_64BIT=1
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/exceptions.S \
+	$(LOCAL_DIR)/exceptions_el2_el3.S \
+	$(LOCAL_DIR)/exceptions_c.c \
+	$(LOCAL_DIR)/fpu.c \
+	$(LOCAL_DIR)/thread.c \
+	$(LOCAL_DIR)/spinlock.S \
+	$(LOCAL_DIR)/start.S \
+	$(LOCAL_DIR)/cache-ops.S \
+
+#	$(LOCAL_DIR)/arm/start.S \
+	$(LOCAL_DIR)/arm/cache.c \
+	$(LOCAL_DIR)/arm/ops.S \
+	$(LOCAL_DIR)/arm/faults.c \
+	$(LOCAL_DIR)/arm/dcc.S
+
+GLOBAL_DEFINES += \
+	ARCH_DEFAULT_STACK_SIZE=8192
+
+# if its requested we build with SMP, arm generically supports 4 cpus
+ifeq ($(WITH_SMP),1)
+SMP_MAX_CPUS ?= 4
+SMP_CPU_CLUSTER_SHIFT ?= 8
+SMP_CPU_ID_BITS ?= 24 # Ignore aff3 bits for now since they are not next to aff2
+
+GLOBAL_DEFINES += \
+    WITH_SMP=1 \
+    SMP_MAX_CPUS=$(SMP_MAX_CPUS) \
+    SMP_CPU_CLUSTER_SHIFT=$(SMP_CPU_CLUSTER_SHIFT) \
+    SMP_CPU_ID_BITS=$(SMP_CPU_ID_BITS)
+
+MODULE_SRCS += \
+    $(LOCAL_DIR)/mp.c
+else
+GLOBAL_DEFINES += \
+    SMP_MAX_CPUS=1
+endif
+
+ARCH_OPTFLAGS := -O2
+
+# we have a mmu and want the vmm/pmm
+WITH_KERNEL_VM ?= 1
+
+ifeq ($(WITH_KERNEL_VM),1)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/mmu.c
+
+KERNEL_ASPACE_BASE ?= 0xffff000000000000
+KERNEL_ASPACE_SIZE ?= 0x0001000000000000
+USER_ASPACE_BASE   ?= 0x0000000001000000
+USER_ASPACE_SIZE   ?= 0x0000fffffe000000
+
+GLOBAL_DEFINES += \
+    KERNEL_ASPACE_BASE=$(KERNEL_ASPACE_BASE) \
+    KERNEL_ASPACE_SIZE=$(KERNEL_ASPACE_SIZE) \
+    USER_ASPACE_BASE=$(USER_ASPACE_BASE) \
+    USER_ASPACE_SIZE=$(USER_ASPACE_SIZE)
+
+KERNEL_BASE ?= $(KERNEL_ASPACE_BASE)
+KERNEL_LOAD_OFFSET ?= 0
+
+GLOBAL_DEFINES += \
+    KERNEL_BASE=$(KERNEL_BASE) \
+    KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
+
+else
+
+KERNEL_BASE ?= $(MEMBASE)
+KERNEL_LOAD_OFFSET ?= 0
+
+endif
+
+GLOBAL_DEFINES += \
+	MEMBASE=$(MEMBASE) \
+	MEMSIZE=$(MEMSIZE)
+
+# try to find the toolchain
+include $(LOCAL_DIR)/toolchain.mk
+TOOLCHAIN_PREFIX := $(ARCH_$(ARCH)_TOOLCHAIN_PREFIX)
+$(info TOOLCHAIN_PREFIX = $(TOOLCHAIN_PREFIX))
+
+ARCH_COMPILEFLAGS += $(ARCH_$(ARCH)_COMPILEFLAGS)
+
+GLOBAL_LDFLAGS += -z max-page-size=4096
+
+
+# make sure some bits were set up
+MEMVARS_SET := 0
+ifneq ($(MEMBASE),)
+MEMVARS_SET := 1
+endif
+ifneq ($(MEMSIZE),)
+MEMVARS_SET := 1
+endif
+ifeq ($(MEMVARS_SET),0)
+$(error missing MEMBASE or MEMSIZE variable, please set in target rules.mk)
+endif
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/system-onesegment.ld
+
+# rules for generating the linker script
+$(BUILDDIR)/system-onesegment.ld: $(LOCAL_DIR)/system-onesegment.ld $(wildcard arch/*.ld) linkerscript.phony
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/" < $< > $@.tmp
+	@$(call TESTANDREPLACEFILE,$@.tmp,$@)
+
+linkerscript.phony:
+.PHONY: linkerscript.phony
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/arm64/spinlock.S b/src/bsp/lk/arch/arm64/spinlock.S
new file mode 100644
index 0000000..ef5b3d1
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/spinlock.S
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 Google Inc. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+.text
+
+FUNCTION(arch_spin_trylock)
+	mov	x2, x0
+	mov	x1, #1
+	ldaxr	x0, [x2]
+	cbnz	x0, 1f
+	stxr	w0, x1, [x2]
+1:
+	ret
+
+FUNCTION(arch_spin_lock)
+	mov	x1, #1
+	sevl
+1:
+	wfe
+	ldaxr	x2, [x0]
+	cbnz	x2, 1b
+	stxr	w2, x1, [x0]
+	cbnz	w2, 1b
+	ret
+
+FUNCTION(arch_spin_unlock)
+	stlr	xzr, [x0]
+	ret
diff --git a/src/bsp/lk/arch/arm64/start.S b/src/bsp/lk/arch/arm64/start.S
new file mode 100644
index 0000000..f36f5bf
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/start.S
@@ -0,0 +1,440 @@
+#include <asm.h>
+#include <arch/arm64/mmu.h>
+#include <arch/asm_macros.h>
+#include <arch/arch_ops.h>
+#include <kernel/vm.h>
+
+/*
+ * Register use:
+ *  x0-x3   Arguments
+ *  x9-x15  Scratch
+ *  x19-x28 Globals
+ */
+tmp                     .req x9
+tmp2                    .req x10
+wtmp2                   .req w10
+idx                     .req x11
+idx_shift               .req x12
+page_table              .req x13
+new_page_table          .req x14
+phys_offset             .req x15
+
+cpuid                   .req x19
+page_table0             .req x20
+page_table1             .req x21
+mmu_initial_mapping     .req x22
+vaddr                   .req x23
+paddr                   .req x24
+mapping_size            .req x25
+size                    .req x26
+attr                    .req x27
+
+.section .text.boot
+FUNCTION(_start)
+.globl arm_reset
+arm_reset:
+
+    bl      setup_el2_or_el3_exception_base
+
+    mrs     tmp, CurrentEL
+    cmp     tmp, #(0b11 << 2)
+    b.ne    .Lsetup_el2_or_el3_stack
+
+    /* el3 set secure timer */
+    ldr     tmp2, =13000000
+    msr     cntfrq_el0, tmp2
+
+    /* el3 enable smp bit */
+    mrs     tmp2, s3_1_c15_c2_1
+    orr     tmp2, tmp2, #(1<<6)
+    msr     s3_1_c15_c2_1, tmp2
+
+.Lsetup_el2_or_el3_stack:
+    /* set el2 or el3 stack pointer */
+    ldr     tmp2, = __stack_end
+    mov     sp, tmp2
+
+    /* initialization required in EL3. weak symbol at asm.S */
+    cmp     tmp, #(0b11 << 2)
+    b.ne    .LelX_to_el1
+    bl      platform_el3_init
+
+.LelX_to_el1:
+    /* change to el1 */
+    bl      arm64_elX_to_el1
+
+#if WITH_KERNEL_VM
+    /* enable caches so atomics and spinlocks work */
+    mrs     tmp, sctlr_el1
+    orr     tmp, tmp, #(1<<12) /* Enable icache */
+    orr     tmp, tmp, #(1<<2)  /* Enable dcache/ucache */
+    bic     tmp, tmp, #(1<<3)  /* Disable Stack Alignment Check */ /* TODO: don't use unaligned stacks */
+    msr     sctlr_el1, tmp
+
+    /* set up the mmu according to mmu_initial_mappings */
+
+    /* load the base of the translation table and clear the table */
+    adrp    page_table1, arm64_kernel_translation_table
+    add     page_table1, page_table1, #:lo12:arm64_kernel_translation_table
+
+    /* Prepare tt_trampoline page table */
+    /* Calculate pagetable physical addresses */
+    adrp    page_table0, tt_trampoline
+    add     page_table0, page_table0, #:lo12:tt_trampoline
+
+#if WITH_SMP
+    mrs     cpuid, mpidr_el1
+    ubfx    cpuid, cpuid, #0, #SMP_CPU_ID_BITS
+    cbnz    cpuid, .Lmmu_enable_secondary
+#endif
+
+    mov     tmp, #0
+
+    /* walk through all the entries in the translation table, setting them up */
+.Lclear_top_page_table_loop:
+    str     xzr, [page_table1, tmp, lsl #3]
+    add     tmp, tmp, #1
+    cmp     tmp, #MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP
+    bne     .Lclear_top_page_table_loop
+
+    /* load the address of the mmu_initial_mappings table and start processing */
+    adrp    mmu_initial_mapping, mmu_initial_mappings
+    add     mmu_initial_mapping, mmu_initial_mapping, #:lo12:mmu_initial_mappings
+
+.Linitial_mapping_loop:
+/* Read entry of mmu_initial_mappings (likely defined in platform.c) */
+    ldp     paddr, vaddr, [mmu_initial_mapping, #__MMU_INITIAL_MAPPING_PHYS_OFFSET]
+    ldp     size, tmp, [mmu_initial_mapping, #__MMU_INITIAL_MAPPING_SIZE_OFFSET]
+
+    tbzmask tmp, MMU_INITIAL_MAPPING_FLAG_DYNAMIC, .Lnot_dynamic
+    adr     paddr, _start
+    mov     size, x0
+    str     paddr, [mmu_initial_mapping, #__MMU_INITIAL_MAPPING_PHYS_OFFSET]
+    str     size, [mmu_initial_mapping, #__MMU_INITIAL_MAPPING_SIZE_OFFSET]
+
+.Lnot_dynamic:
+    /* if size == 0, end of list, done with initial mapping */
+    cbz     size, .Linitial_mapping_done
+    mov     mapping_size, size
+
+    /* set up the flags */
+    tbzmask tmp, MMU_INITIAL_MAPPING_FLAG_UNCACHED, .Lnot_uncached
+    ldr     attr, =MMU_INITIAL_MAP_STRONGLY_ORDERED
+    b       .Lmem_type_done
+
+.Lnot_uncached:
+    /* is this memory mapped to device/peripherals? */
+    tbzmask tmp, MMU_INITIAL_MAPPING_FLAG_DEVICE, .Lnot_device
+    ldr     attr, =MMU_INITIAL_MAP_DEVICE
+    b       .Lmem_type_done
+.Lnot_device:
+
+/* Determine the segment in which the memory resides and set appropriate
+ *  attributes.  In order to handle offset kernels, the following rules are
+ *  implemented below:
+ *      KERNEL_BASE    to __code_start             -read/write (see note below)
+ *      __code_start   to __rodata_start (.text)   -read only
+ *      __rodata_start to __data_start   (.rodata) -read only, execute never
+ *      __data_start   to .....          (.data)   -read/write
+ *
+ *  The space below __code_start is presently left as read/write (same as .data)
+ *   mainly as a workaround for the raspberry pi boot process.  Boot vectors for
+ *   secondary CPUs are in this area and need to be updated by cpu0 once the system
+ *   is ready to boot the secondary processors.
+ *   TODO: handle this via mmu_initial_mapping entries, which may need to be
+ *         extended with additional flag types
+ */
+.Lmapping_size_loop:
+    ldr     attr, =MMU_PTE_KERNEL_DATA_FLAGS
+    ldr     tmp, =__code_start
+    subs    size, tmp, vaddr
+    /* If page is below  the entry point (_start) mark as kernel data */
+    b.hi    .Lmem_type_done
+
+    ldr     attr, =MMU_PTE_KERNEL_RO_FLAGS
+    ldr     tmp, =__rodata_start
+    subs    size, tmp, vaddr
+    b.hi    .Lmem_type_done
+    orr     attr, attr, #MMU_PTE_ATTR_PXN
+    ldr     tmp, =__data_start
+    subs    size, tmp, vaddr
+    b.hi    .Lmem_type_done
+    ldr     attr, =MMU_PTE_KERNEL_DATA_FLAGS
+    ldr     tmp, =_end
+    subs    size, tmp, vaddr
+    b.lo    . /* Error: _end < vaddr */
+    cmp     mapping_size, size
+    b.lo    . /* Error: mapping_size < size => RAM size too small for data/bss */
+    mov     size, mapping_size
+
+.Lmem_type_done:
+    subs    mapping_size, mapping_size, size
+    b.lo    . /* Error: mapping_size < size (RAM size too small for code/rodata?) */
+
+    /* Check that paddr, vaddr and size are page aligned */
+    orr     tmp, vaddr, paddr
+    orr     tmp, tmp, size
+    tst     tmp, #(1 << MMU_KERNEL_PAGE_SIZE_SHIFT) - 1
+    bne     . /* Error: not page aligned */
+
+    /* Clear top bits of virtual address (should be all set) */
+    eor     vaddr, vaddr, #(~0 << MMU_KERNEL_SIZE_SHIFT)
+
+    /* Check that top bits were all set */
+    tst     vaddr, #(~0 << MMU_KERNEL_SIZE_SHIFT)
+    bne     . /* Error: vaddr out of range */
+
+.Lmap_range_top_loop:
+    /* Select top level page table */
+    mov     page_table, page_table1
+    mov     idx_shift, #MMU_KERNEL_TOP_SHIFT
+
+    lsr     idx, vaddr, idx_shift
+
+
+/* determine the type of page table entry to use given alignment and size
+ *  of the chunk of memory we are mapping
+ */
+.Lmap_range_one_table_loop:
+    /* Check if current level allow block descriptors */
+    cmp     idx_shift, #MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT
+    b.hi    .Lmap_range_need_page_table
+
+    /* Check if paddr and vaddr alignment allows a block descriptor */
+    orr     tmp2, vaddr, paddr
+    lsr     tmp, tmp2, idx_shift
+    lsl     tmp, tmp, idx_shift
+    cmp     tmp, tmp2
+    b.ne    .Lmap_range_need_page_table
+
+    /* Check if size is large enough for a block mapping */
+    lsr     tmp, size, idx_shift
+    cbz     tmp, .Lmap_range_need_page_table
+
+    /* Select descriptor type, page for level 3, block for level 0-2 */
+    orr     tmp, attr, #MMU_PTE_L3_DESCRIPTOR_PAGE
+    cmp     idx_shift, MMU_KERNEL_PAGE_SIZE_SHIFT
+    beq     .Lmap_range_l3
+    orr     tmp, attr, #MMU_PTE_L012_DESCRIPTOR_BLOCK
+.Lmap_range_l3:
+
+    /* Write page table entry */
+    orr     tmp, tmp, paddr
+    str     tmp, [page_table, idx, lsl #3]
+
+    /* Move to next page table entry */
+    mov     tmp, #1
+    lsl     tmp, tmp, idx_shift
+    add     vaddr, vaddr, tmp
+    add     paddr, paddr, tmp
+    subs    size, size, tmp
+    /* TODO: add local loop if next entry is in the same page table */
+    b.ne    .Lmap_range_top_loop /* size != 0 */
+
+    /* Restore top bits of virtual address (should be all set) */
+    eor     vaddr, vaddr, #(~0 << MMU_KERNEL_SIZE_SHIFT)
+    /* Move to next subtype of ram mmu_initial_mappings entry */
+    cbnz     mapping_size, .Lmapping_size_loop
+
+    /* Move to next mmu_initial_mappings entry */
+    add     mmu_initial_mapping, mmu_initial_mapping, __MMU_INITIAL_MAPPING_SIZE
+    b       .Linitial_mapping_loop
+
+.Lmap_range_need_page_table:
+    /* Check if page table entry is unused */
+    ldr     new_page_table, [page_table, idx, lsl #3]
+    cbnz    new_page_table, .Lmap_range_has_page_table
+
+    /* Calculate phys offset (needed for memory allocation) */
+.Lphys_offset:
+    adr     phys_offset, .Lphys_offset /* phys */
+    ldr     tmp, =.Lphys_offset /* virt */
+    sub     phys_offset, tmp, phys_offset
+
+    /* Allocate new page table */
+    calloc_bootmem_aligned new_page_table, tmp, tmp2, MMU_KERNEL_PAGE_SIZE_SHIFT, phys_offset
+
+    /* Write page table entry (with allocated page table) */
+    orr     new_page_table, new_page_table, #MMU_PTE_L012_DESCRIPTOR_TABLE
+    str     new_page_table, [page_table, idx, lsl #3]
+
+.Lmap_range_has_page_table:
+    /* Check descriptor type */
+    and     tmp, new_page_table, #MMU_PTE_DESCRIPTOR_MASK
+    cmp     tmp, #MMU_PTE_L012_DESCRIPTOR_TABLE
+    b.ne    . /* Error: entry already in use (as a block entry) */
+
+    /* switch to next page table level */
+    bic     page_table, new_page_table, #MMU_PTE_DESCRIPTOR_MASK
+    mov     tmp, #~0
+    lsl     tmp, tmp, idx_shift
+    bic     tmp, vaddr, tmp
+    sub     idx_shift, idx_shift, #(MMU_KERNEL_PAGE_SIZE_SHIFT - 3)
+    lsr     idx, tmp, idx_shift
+
+    b       .Lmap_range_one_table_loop
+
+.Linitial_mapping_done:
+
+    /* Prepare tt_trampoline page table */
+
+    /* Zero tt_trampoline translation tables */
+    mov     tmp, #0
+.Lclear_tt_trampoline:
+    str     xzr, [page_table0, tmp, lsl#3]
+    add     tmp, tmp, #1
+    cmp     tmp, #MMU_PAGE_TABLE_ENTRIES_IDENT
+    blt     .Lclear_tt_trampoline
+
+    /* Setup mapping at phys -> phys */
+    adr     tmp, .Lmmu_on_pc
+    lsr     tmp, tmp, #MMU_IDENT_TOP_SHIFT    /* tmp = paddr index */
+    ldr     tmp2, =MMU_PTE_IDENT_FLAGS
+    add     tmp2, tmp2, tmp, lsl #MMU_IDENT_TOP_SHIFT  /* tmp2 = pt entry */
+
+    str     tmp2, [page_table0, tmp, lsl #3]     /* tt_trampoline[paddr index] = pt entry */
+
+#if WITH_SMP
+    adrp    tmp, page_tables_not_ready
+    add     tmp, tmp, #:lo12:page_tables_not_ready
+    str     wzr, [tmp]
+    b       .Lpage_tables_ready
+
+.Lmmu_enable_secondary:
+    adrp    tmp, page_tables_not_ready
+    add     tmp, tmp, #:lo12:page_tables_not_ready
+.Lpage_tables_not_ready:
+    ldr     wtmp2, [tmp]
+    cbnz    wtmp2, .Lpage_tables_not_ready
+.Lpage_tables_ready:
+#endif
+
+    /* set up the mmu */
+
+    /* Invalidate TLB */
+    tlbi    vmalle1is
+    isb
+    dsb     sy
+
+    /* Initialize Memory Attribute Indirection Register */
+    ldr     tmp, =MMU_MAIR_VAL
+    msr     mair_el1, tmp
+
+    /* Initialize TCR_EL1 */
+    /* set cacheable attributes on translation walk */
+    /* (SMP extensions) non-shareable, inner write-back write-allocate */
+    ldr     tmp, =MMU_TCR_FLAGS_IDENT
+    msr     tcr_el1, tmp
+
+    isb
+
+    /* Write ttbr with phys addr of the translation table */
+    msr     ttbr0_el1, page_table0
+    msr     ttbr1_el1, page_table1
+    isb
+
+    /* Read SCTLR */
+    mrs     tmp, sctlr_el1
+
+    /* Turn on the MMU */
+    orr     tmp, tmp, #0x1
+
+    /* Write back SCTLR */
+    msr     sctlr_el1, tmp
+.Lmmu_on_pc:
+    isb
+
+    /* Jump to virtual code address */
+    ldr     tmp, =.Lmmu_on_vaddr
+    br      tmp
+
+.Lmmu_on_vaddr:
+
+    /* Disable trampoline page-table in ttbr0 */
+    ldr     tmp, =MMU_TCR_FLAGS_KERNEL
+    msr     tcr_el1, tmp
+    isb
+
+
+    /* Invalidate TLB */
+    tlbi    vmalle1
+    isb
+
+#if WITH_SMP
+    cbnz    cpuid, .Lsecondary_boot
+#endif
+#endif /* WITH_KERNEL_VM */
+
+    ldr tmp, =__stack_end
+    mov sp, tmp
+
+    /* clear bss */
+.L__do_bss:
+    /* clear out the bss excluding the stack and kernel translation table  */
+    /* NOTE: relies on __post_prebss_bss_start and __bss_end being 8 byte aligned */
+    ldr     tmp, =__post_prebss_bss_start
+    ldr     tmp2, =__bss_end
+    sub     tmp2, tmp2, tmp
+    cbz     tmp2, .L__bss_loop_done
+.L__bss_loop:
+    sub     tmp2, tmp2, #8
+    str     xzr, [tmp], #8
+    cbnz    tmp2, .L__bss_loop
+.L__bss_loop_done:
+
+    bl  lk_main
+    b   .
+
+#if WITH_SMP
+.Lsecondary_boot:
+    and     tmp, cpuid, #0xff
+    cmp     tmp, #(1 << SMP_CPU_CLUSTER_SHIFT)
+    bge     .Lunsupported_cpu_trap
+    bic     cpuid, cpuid, #0xff
+    orr     cpuid, tmp, cpuid, LSR #(8 - SMP_CPU_CLUSTER_SHIFT)
+    adrp    tmp, linear_cpuid_map
+    add     tmp, tmp, #:lo12:linear_cpuid_map
+    ldr     tmp, [tmp]
+    cbz     tmp, .Lno_cpuid_remap
+    add     tmp, tmp, cpuid
+    ldrb    wtmp2, [tmp]
+    ubfx    cpuid, tmp2, #0, #31
+
+.Lno_cpuid_remap:
+    cmp     cpuid, #SMP_MAX_CPUS
+    bge     .Lunsupported_cpu_trap
+
+    /* Set up the stack */
+    ldr     tmp, =__stack_end
+    mov     tmp2, #ARCH_DEFAULT_STACK_SIZE
+    mul     tmp2, tmp2, cpuid
+    sub     sp, tmp, tmp2
+
+    mov     x0, cpuid
+    bl      arm64_secondary_entry
+
+.Lunsupported_cpu_trap:
+    wfe
+    b       .Lunsupported_cpu_trap
+#endif
+
+.ltorg
+
+#if WITH_SMP
+.data
+DATA(page_tables_not_ready)
+    .long       1
+#endif
+
+.section .bss.prebss.stack
+    .align 4
+DATA(__stack)
+    .skip ARCH_DEFAULT_STACK_SIZE * SMP_MAX_CPUS
+DATA(__stack_end)
+
+#if WITH_KERNEL_VM
+.section ".bss.prebss.translation_table"
+.align 3 + MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT
+DATA(tt_trampoline)
+    .skip 8 * MMU_PAGE_TABLE_ENTRIES_IDENT
+#endif
diff --git a/src/bsp/lk/arch/arm64/system-onesegment.ld b/src/bsp/lk/arch/arm64/system-onesegment.ld
new file mode 100644
index 0000000..b750b10
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/system-onesegment.ld
@@ -0,0 +1,123 @@
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
+
+    /* text/read-only data */
+    /* set the load address to physical MEMBASE */
+    .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET%) {
+        __code_start = .;
+        KEEP(*(.text.boot))
+        KEEP(*(.text.boot.vectab))
+        *(.text* .sram.text.glue_7* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) } =0x9090
+    .plt : { *(.plt) }
+
+    /* .ARM.exidx is sorted, so has to go in its own output section.  */
+    __exidx_start = .;
+    .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+    __exidx_end = .;
+
+    .dummy_post_text : {
+	    __code_end = .;
+    }
+
+    .rodata : ALIGN(4096) {
+        __rodata_start = .;
+        __fault_handler_table_start = .;
+        KEEP(*(.rodata.fault_handler_table))
+        __fault_handler_table_end = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+    .data : ALIGN(4096) {
+        /* writable data  */
+        __data_start_rom = .;
+        /* in one segment binaries, the rom data address is on top of the ram data address */
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+    }
+
+    .ctors : ALIGN(8) {
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+    }
+    .dtors : ALIGN(8) {
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+    }
+    .got : { *(.got.plt) *(.got) }
+    .dynamic : { *(.dynamic) }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : ALIGN(4096) {
+        __bss_start = .;
+        KEEP(*(.bss.prebss.*))
+        . = ALIGN(8);
+	__post_prebss_bss_start = .;
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+        . = ALIGN(8);
+        __bss_end = .;
+    }
+
+    /* Align the end to ensure anything after the kernel ends up on its own pages */
+    . = ALIGN(4096);
+    _end = .;
+
+    . = %KERNEL_BASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/arm64/thread.c b/src/bsp/lk/arch/arm64/thread.c
new file mode 100644
index 0000000..56b0bf9
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/thread.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <trace.h>
+#include <kernel/thread.h>
+#include <arch/arm64.h>
+
+#define LOCAL_TRACE 0
+
+struct context_switch_frame {
+    vaddr_t lr;
+    vaddr_t r18;
+    vaddr_t r19;
+    vaddr_t r20;
+    vaddr_t r21;
+    vaddr_t r22;
+    vaddr_t r23;
+    vaddr_t r24;
+    vaddr_t r25;
+    vaddr_t r26;
+    vaddr_t r27;
+    vaddr_t r28;
+    vaddr_t r29;
+    vaddr_t padding;
+};
+
+extern void arm64_context_switch(addr_t *old_sp, addr_t new_sp);
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+    thread_t *current_thread = get_current_thread();
+
+    LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    ret = current_thread->entry(current_thread->arg);
+
+    LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    // create a default stack frame on the stack
+    vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
+
+    // make sure the top of the stack is 16 byte aligned for EABI compliance
+    stack_top = ROUNDDOWN(stack_top, 16);
+
+    struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
+    frame--;
+
+    // fill it in
+    memset(frame, 0, sizeof(*frame));
+    frame->lr = (vaddr_t)&initial_thread_func;
+
+    // set the stack pointer
+    t->arch.sp = (vaddr_t)frame;
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+    LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
+    arm64_fpu_pre_context_switch(oldthread);
+    arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
+    }
+}
diff --git a/src/bsp/lk/arch/arm64/toolchain.mk b/src/bsp/lk/arch/arm64/toolchain.mk
new file mode 100644
index 0000000..88a17a1
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/toolchain.mk
@@ -0,0 +1,34 @@
+ifndef ARCH_arm64_TOOLCHAIN_PREFIX
+ARCH_arm64_TOOLCHAIN_PREFIX := aarch64-elf-
+FOUNDTOOL=$(shell which $(ARCH_arm64_TOOLCHAIN_PREFIX)gcc)
+ifeq ($(FOUNDTOOL),)
+ARCH_arm64_TOOLCHAIN_PREFIX := aarch64-linux-android-
+FOUNDTOOL=$(shell which $(ARCH_arm64_TOOLCHAIN_PREFIX)gcc)
+ifeq ($(FOUNDTOOL),)
+$(error cannot find toolchain, please set ARCH_arm64_TOOLCHAIN_PREFIX or add it to your path)
+endif
+endif
+endif
+
+ifeq (false,$(call TOBOOL,$(ALLOW_FP_USE)))
+ARCH_arm64_COMPILEFLAGS := -mgeneral-regs-only -DWITH_NO_FP=1
+else
+ARCH_arm64_COMPILEFLAGS :=
+endif
+
+ifeq ($(call TOBOOL,$(CLANGBUILD)),true)
+
+CLANG_ARM64_TARGET_SYS ?= linux
+CLANG_ARM64_TARGET_ABI ?= gnu
+
+CLANG_ARM64_AS_DIR ?= $(shell dirname $(shell dirname $(ARCH_arm64_TOOLCHAIN_PREFIX)))
+
+ARM64_AS_PATH ?= $(wildcard $(CLANG_ARM64_AS_DIR)/*/bin/as)
+ifeq ($(ARM64_AS_PATH),)
+$(error Could not find $(CLANG_ARM64_AS_DIR)/*/bin/as, did the directory structure change?)
+endif
+
+ARCH_arm64_COMPILEFLAGS += -target aarch64-$(CLANG_ARM64_TARGET_SYS)-$(CLANG_ARM64_TARGET_ABI) \
+			   --gcc-toolchain=$(CLANG_ARM64_AS_DIR)/
+
+endif
diff --git a/src/bsp/lk/arch/microblaze/arch.c b/src/bsp/lk/arch/microblaze/arch.c
new file mode 100644
index 0000000..3dad646
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/arch.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <debug.h>
+#include <stdint.h>
+#include <arch/microblaze.h>
+
+#define LOCAL_TRACE 0
+
+void arch_early_init(void)
+{
+    LTRACE;
+
+    /* enable i/d cache */
+    uint32_t val = mb_read_msr();
+    val |= (1 << (31 - 26)) | (1 << (31 - 24));
+    mb_write_msr(val);
+}
+
+void arch_init(void)
+{
+    LTRACE;
+}
+
+void arch_idle(void)
+{
+    asm volatile("sleep");
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
+
+/* unimplemented cache operations */
+void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
+void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
+
+void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
diff --git a/src/bsp/lk/arch/microblaze/asm.S b/src/bsp/lk/arch/microblaze/asm.S
new file mode 100644
index 0000000..24933fd
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/asm.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+/* void microblaze_context_switch(
+    struct microblaze_context_switch_frame *oldcs,
+    struct microblaze_context_switch_frame *newcs); */
+FUNCTION(microblaze_context_switch)
+    # r5 = oldcs
+    # r6 = newcs
+    swi     r1, r5, 0x0
+    swi     r2, r5, 0x4
+    swi     r13, r5, 0x8
+    swi     r14, r5, 0xc
+    swi     r15, r5, 0x10
+    swi     r16, r5, 0x14
+    swi     r17, r5, 0x18
+    swi     r18, r5, 0x1c
+    swi     r19, r5, 0x20
+    swi     r20, r5, 0x24
+    swi     r21, r5, 0x28
+    swi     r22, r5, 0x2c
+    swi     r23, r5, 0x30
+    swi     r24, r5, 0x34
+    swi     r25, r5, 0x38
+    swi     r26, r5, 0x3c
+    swi     r27, r5, 0x40
+    swi     r28, r5, 0x44
+    swi     r29, r5, 0x48
+    swi     r30, r5, 0x4c
+    swi     r31, r5, 0x50
+
+    # restore the new context
+    lwi     r31, r6, 0x50
+    lwi     r30, r6, 0x4c
+    lwi     r29, r6, 0x48
+    lwi     r28, r6, 0x44
+    lwi     r27, r6, 0x40
+    lwi     r26, r6, 0x3c
+    lwi     r25, r6, 0x38
+    lwi     r24, r6, 0x34
+    lwi     r23, r6, 0x30
+    lwi     r22, r6, 0x2c
+    lwi     r21, r6, 0x28
+    lwi     r20, r6, 0x24
+    lwi     r19, r6, 0x20
+    lwi     r18, r6, 0x1c
+    lwi     r17, r6, 0x18
+    lwi     r16, r6, 0x14
+    lwi     r15, r6, 0x10
+    lwi     r14, r6, 0xc
+    lwi     r13, r6, 0x8
+    lwi     r2, r6, 0x4
+    lwi     r1, r6, 0x0
+
+    rtsd    r15, 8
+    nop
+
diff --git a/src/bsp/lk/arch/microblaze/exceptions.c b/src/bsp/lk/arch/microblaze/exceptions.c
new file mode 100644
index 0000000..857662e
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/exceptions.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <compiler.h>
+#include <trace.h>
+#include <arch/microblaze.h>
+#include <kernel/thread.h>
+
+void microblaze_irq(void) __attribute__((interrupt_handler));
+
+enum handler_return platform_irq_handler(void);
+
+void microblaze_irq(void)
+{
+    if (platform_irq_handler() == INT_RESCHEDULE)
+        thread_preempt();
+}
+
diff --git a/src/bsp/lk/arch/microblaze/include/arch/arch_ops.h b/src/bsp/lk/arch/microblaze/include/arch/arch_ops.h
new file mode 100644
index 0000000..8646f34
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/include/arch/arch_ops.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+
+#define USE_MSRSET 1
+
+static inline void arch_enable_ints(void)
+{
+    CF;
+    uint32_t temp;
+    __asm__ volatile(
+#if USE_MSRSET
+        "msrset %0, (1<<1)"
+#else
+        "mfs    %0, rmsr;"
+        "ori    %0, %0, (1<<1);"
+        "mts    rmsr, %0"
+#endif
+        : "=r" (temp));
+}
+
+static inline void arch_disable_ints(void)
+{
+    uint32_t temp;
+    __asm__ volatile(
+#if USE_MSRSET
+        "msrclr %0, (1<<1)"
+#else
+        "mfs    %0, rmsr;"
+        "andni  %0, %0, (1<<1);"
+        "mts    rmsr, %0"
+#endif
+        : "=r" (temp));
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    uint32_t state;
+
+    __asm__ volatile(
+        "mfs    %0, rmsr;"
+        : "=r" (state));
+
+    return !(state & (1<<1));
+}
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_and(volatile int *ptr, int val)
+{
+    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+static inline uint32_t arch_cycle_count(void) { return 0; }
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+
diff --git a/src/bsp/lk/arch/microblaze/include/arch/arch_thread.h b/src/bsp/lk/arch/microblaze/include/arch/arch_thread.h
new file mode 100644
index 0000000..baa650c
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/include/arch/arch_thread.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <sys/types.h>
+
+struct microblaze_context_switch_frame {
+    uint32_t r1; // stack pointer
+    uint32_t r2; // read-only small data base pointer
+
+    uint32_t r13; // read-write small data base pointer
+    uint32_t r14;
+    uint32_t r15; // link register
+    uint32_t r16;
+    uint32_t r17;
+    uint32_t r18;
+
+    /* callee saved */
+    uint32_t r19;
+    uint32_t r20;
+    uint32_t r21;
+    uint32_t r22;
+    uint32_t r23;
+    uint32_t r24;
+    uint32_t r25;
+    uint32_t r26;
+    uint32_t r27;
+    uint32_t r28;
+    uint32_t r29;
+    uint32_t r30;
+    uint32_t r31;
+};
+
+struct arch_thread {
+    struct microblaze_context_switch_frame cs_frame;
+};
+
+void microblaze_context_switch(struct microblaze_context_switch_frame *oldcs,
+    struct microblaze_context_switch_frame *newcs);
+
diff --git a/src/bsp/lk/arch/microblaze/include/arch/defines.h b/src/bsp/lk/arch/microblaze/include/arch/defines.h
new file mode 100644
index 0000000..2d3d8e7
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/include/arch/defines.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#define PAGE_SIZE 4096
+#define PAGE_SIZE_SHIFT 12
+
+// XXX is this right?
+#define CACHE_LINE 32
+
+#define ARCH_DEFAULT_STACK_SIZE 4096
diff --git a/src/bsp/lk/arch/microblaze/include/arch/microblaze.h b/src/bsp/lk/arch/microblaze/include/arch/microblaze.h
new file mode 100644
index 0000000..3f14333
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/include/arch/microblaze.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+static inline uint32_t mb_read_msr(void)
+{
+    uint32_t temp;
+    __asm__ volatile(
+        "mfs    %0, rmsr;" : "=r" (temp));
+
+    return temp;
+}
+
+static inline void mb_write_msr(uint32_t val)
+{
+    __asm__ volatile(
+        "mts    rmsr, %0" :: "r" (val));
+}
+
+
diff --git a/src/bsp/lk/arch/microblaze/include/arch/spinlock.h b/src/bsp/lk/arch/microblaze/include/arch/spinlock.h
new file mode 100644
index 0000000..5c50c5b
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/include/arch/spinlock.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <stdbool.h>
+
+#if WITH_SMP
+#error microblaze does not support SMP
+#endif
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned int spin_lock_t;
+
+typedef unsigned int spin_lock_saved_state_t;
+typedef unsigned int spin_lock_save_flags_t;
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+    /* default arm flag is to just disable plain irqs */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  0
+
+enum {
+    /* private */
+    SPIN_LOCK_STATE_RESTORE_IRQ = 1,
+};
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    spin_lock_saved_state_t state = 0;
+    if (!arch_ints_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_IRQ;
+        arch_disable_ints();
+    }
+    *statep = state;
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    if (old_state & SPIN_LOCK_STATE_RESTORE_IRQ)
+        arch_enable_ints();
+}
+
+
+
+
diff --git a/src/bsp/lk/arch/microblaze/linker.ld b/src/bsp/lk/arch/microblaze/linker.ld
new file mode 100644
index 0000000..5c549e7
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/linker.ld
@@ -0,0 +1,129 @@
+OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
+OUTPUT_ARCH(microblaze)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
+
+    _start = .;
+
+    /* vector table goes at 0, for qemu target, at least */
+    .vectors : AT(%VECTOR_BASE_PHYS%) {
+        KEEP(*(.vectors))
+    }
+
+    /* text/read-only data */
+    /* set the load address to physical MEMBASE */
+    .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET% + SIZEOF(.vectors)) {
+        KEEP(*(.text.boot))
+        *(.text* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) }
+    .plt : { *(.plt) }
+
+    .rodata : ALIGN(4) {
+        __rodata_start = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+        . = ALIGN(4);
+        _SDATA2_START__ = .;
+        *(.sdata2)
+        _SDATA2_END__ = .;
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+    .data : ALIGN(4) {
+        /* writable data  */
+        __data_start_rom = .;
+        /* in one segment binaries, the rom data address is on top of the ram data address */
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+        *(.got*)
+        *(.dynamic)
+
+        /* read-write small data with initial value */
+        _SDATA_START__ = .;
+        *(.sdata)
+        _SDATA_END__ = .;
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : ALIGN(4) {
+        __bss_start = .;
+
+        /* read only small variables without initial value */
+        _SBSS2_START__ = .;
+        *(.sbss2*)
+        _SBSS2_END__ = .;
+
+        /* read-write small variables without initial value */
+        _sbss_start__ = .;
+        *(.sbss*)
+        _sbss_end__ = .;
+
+        /* regular bss */
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+
+        . = ALIGN(4);
+        __bss_end = .;
+    }
+
+    _end = .;
+
+    . = %KERNEL_BASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
+
diff --git a/src/bsp/lk/arch/microblaze/rules.mk b/src/bsp/lk/arch/microblaze/rules.mk
new file mode 100644
index 0000000..86f4a11
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/rules.mk
@@ -0,0 +1,70 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/start.S \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/exceptions.c \
+	$(LOCAL_DIR)/thread.c \
+
+#	$(LOCAL_DIR)/cache.c \
+	$(LOCAL_DIR)/cache-ops.S \
+	$(LOCAL_DIR)/ops.S \
+	$(LOCAL_DIR)/mmu.c \
+	$(LOCAL_DIR)/faults.c \
+	$(LOCAL_DIR)/descriptor.c
+
+GLOBAL_DEFINES += \
+	SMP_MAX_CPUS=1
+
+# set the default toolchain to microblaze elf and set a #define
+ifndef TOOLCHAIN_PREFIX
+TOOLCHAIN_PREFIX := microblaze-elf-
+endif
+
+WITH_LINKER_GC ?= 0
+
+LITTLE_ENDIAN ?= 0
+
+ifneq ($(LITTLE_ENDIAN),0)
+GLOBAL_COMPILEFLAGS += -mlittle-endian
+GLOBAL_LDFLAGS += -EL
+GLOBAL_MODULE_LDFLAGS += -EL
+endif
+
+
+cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
+	then echo "$(2)"; else echo "$(3)"; fi ;)
+
+ARCH_COMPILEFLAGS :=
+ARCH_OPTFLAGS := -O2
+
+GLOBAL_LDFLAGS += -relax
+
+KERNEL_BASE ?= $(MEMBASE)
+KERNEL_LOAD_OFFSET ?= 0
+VECTOR_BASE_PHYS ?= 0
+
+GLOBAL_DEFINES += \
+    MEMBASE=$(MEMBASE) \
+    MEMSIZE=$(MEMSIZE)
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/linker.ld
+
+# rules for generating the linker
+$(BUILDDIR)/linker.ld: $(LOCAL_DIR)/linker.ld $(wildcard arch/*.ld) linkerscript.phony
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/;s/%VECTOR_BASE_PHYS%/$(VECTOR_BASE_PHYS)/" < $< > $@.tmp
+	@$(call TESTANDREPLACEFILE,$@.tmp,$@)
+
+linkerscript.phony:
+.PHONY: linkerscript.phony
+
+LINKER_SCRIPT += $(BUILDDIR)/linker.ld
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/microblaze/start.S b/src/bsp/lk/arch/microblaze/start.S
new file mode 100644
index 0000000..744f0d6
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/start.S
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+.section ".vectors", "ax"
+.globl _vectab
+_vectab:
+/* vector table here */
+    # start vector
+    brai start
+    # user exception
+    brai unhandled_exception
+    # interrupt
+    brai microblaze_irq
+    # break
+    brai unhandled_exception
+    # hardware exception
+    brai unhandled_exception
+
+    # reserved for future
+.fill (0x50 - 0x28)
+
+.section ".text.boot"
+FUNCTION(start)
+    # set the default stack
+    addik   r1, r0, default_stack_top
+
+    # set up small data pointers
+    addik   r2, r0, _SDATA2_START__
+    addik   r13, r0, _SDATA_START__
+
+    # set the processor mode to default
+    mts     rmsr, r0
+
+    # zero out bss sections
+    addik   r5, r0, __bss_start
+    addik   r6, r0, 0
+    rsubik  r7, r5, __bss_end
+    brlid   r15, memset
+    nop
+
+    # arguments to main
+    addik   r5, r0, 1
+    addik   r6, r0, 2
+    addik   r7, r0, 3
+    brlid   r15, lk_main
+    addik   r8, r0, 4
+
+    # shouldn't be here
+    bri     .
+
+FUNCTION(unhandled_exception)
+    bri     .
+
+.bss
+.align 3
+LOCAL_DATA(default_stack)
+    .skip 4096
+LOCAL_DATA(default_stack_top)
+
+/* vim: set ts=4 sw=4 expandtab: */
+
diff --git a/src/bsp/lk/arch/microblaze/thread.c b/src/bsp/lk/arch/microblaze/thread.c
new file mode 100644
index 0000000..41410bd
--- /dev/null
+++ b/src/bsp/lk/arch/microblaze/thread.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <trace.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <kernel/thread.h>
+#include <arch/microblaze.h>
+
+#define LOCAL_TRACE 0
+
+struct thread *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    thread_t *ct = get_current_thread();
+
+#if LOCAL_TRACE
+    LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
+    dump_thread(ct);
+#endif
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    int ret = ct->entry(ct->arg);
+
+    LTRACEF("thread %p exiting with %d\n", ct, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    LTRACEF("t %p (%s)\n", t, t->name);
+
+    /* some registers we want to clone for the new thread */
+    register uint32_t r2 asm("r2");
+    register uint32_t r13 asm("r13");
+
+    /* zero out the thread context */
+    memset(&t->arch.cs_frame, 0, sizeof(t->arch.cs_frame));
+
+    t->arch.cs_frame.r1 = (vaddr_t)t->stack + t->stack_size;
+    t->arch.cs_frame.r2 = r2;
+    t->arch.cs_frame.r13 = r13;
+    t->arch.cs_frame.r15 = (vaddr_t)&initial_thread_func;
+    // NOTE: appears to be bug in binutils 2.25 that forces us to -8 from the offset
+    // using this method if gc-sections is enabled.
+    *(volatile uint32_t *)&t->arch.cs_frame.r15 -= 8;
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+    LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
+
+    microblaze_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.r1);
+    }
+}
+
diff --git a/src/bsp/lk/arch/mips/arch.c b/src/bsp/lk/arch/mips/arch.c
new file mode 100644
index 0000000..e12982f
--- /dev/null
+++ b/src/bsp/lk/arch/mips/arch.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <debug.h>
+#include <stdint.h>
+#include <bits.h>
+#include <arch/mips.h>
+#include <platform.h>
+
+#define LOCAL_TRACE 0
+
+void arch_early_init(void)
+{
+    LTRACE;
+
+    /* configure the vector table */
+    uint32_t temp = mips_read_c0_status();
+    temp &= ~(1<<22); /* unset BEV, which moves vectors to 0x80000000 */
+    temp &= ~(1<<2);  /* clear ERL */
+
+    /* mask all of the irq handlers */
+    temp &= ~(1<<8); // IM0
+    temp &= ~(1<<9); // IM1
+    temp &= ~(1<<10); // IM2
+    temp &= ~(1<<11); // IM3
+    temp &= ~(1<<12); // IM4
+    temp &= ~(1<<13); // IM5
+    temp &= ~(1<<14); // IM6
+    temp &= ~(1<<15); // IM7
+    temp &= ~(1<<16); // IM8
+    temp &= ~(1<<18); // IM9 (note the bit gap)
+
+    mips_write_c0_status(temp);
+
+    /* set ebase */
+    mips_write_c0_ebase(MEMBASE);
+
+    /* make sure we take exceptions in 32bit mips mode */
+    mips_write_c0_config3(mips_read_c0_config3() & ~(1<<16));
+
+    /* set vectored mode */
+    temp = mips_read_c0_intctl();
+    temp &= ~(0b1111 << 5);
+    temp |= 1 << 5; /* 32 byte spacing */
+    STATIC_ASSERT(VECTORED_OFFSET_SHIFT == 32);
+
+    mips_write_c0_intctl(temp);
+
+    temp = mips_read_c0_cause();
+    temp |= (1<<23); /* IV vectored mode */
+    mips_write_c0_cause(temp);
+}
+
+void arch_init(void)
+{
+    LTRACE;
+
+    printf("MIPS registers:\n");
+    printf("\tPRId 0x%x\n", mips_read_c0_prid());
+    printf("\tconfig  0x%x\n", mips_read_c0_config());
+    printf("\tconfig1 0x%x\n", mips_read_c0_config1());
+    printf("\tconfig2 0x%x\n", mips_read_c0_config2());
+    printf("\tconfig3 0x%x\n", mips_read_c0_config3());
+    printf("\tconfig4 0x%x\n", mips_read_c0_config4());
+    printf("\tconfig5 0x%x\n", mips_read_c0_config5());
+    printf("\tconfig6 0x%x\n", mips_read_c0_config6());
+    printf("\tconfig7 0x%x\n", mips_read_c0_config7());
+    printf("\tstatus  0x%x\n", mips_read_c0_status());
+    uint32_t intctl = mips_read_c0_intctl();
+    printf("\tintctl  0x%x\n", intctl);
+    printf("\t\tIPTI  0x%lx\n", BITS_SHIFT(intctl, 31, 29));
+    printf("\t\tIPPCI 0x%lx\n", BITS_SHIFT(intctl, 28, 26));
+    printf("\t\tIPFDC 0x%lx\n", BITS_SHIFT(intctl, 25, 23));
+    printf("\tsrsctl  0x%x\n", mips_read_c0_srsctl());
+    printf("\tebase   0x%x\n", mips_read_c0_ebase());
+    printf("\tcount   0x%x\n", mips_read_c0_count());
+    printf("\tcompare 0x%x\n", mips_read_c0_compare());
+
+    __asm__ volatile("syscall");
+
+    LTRACE_EXIT;
+}
+
+void arch_idle(void)
+{
+    asm volatile("wait");
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
+
+void mips_enable_irq(uint num)
+{
+    uint32_t temp = mips_read_c0_status();
+    if (num < 9) {
+        temp |= (1 << (num + 8));
+    } else if (num == 9) {
+        temp |= (1 << 18);
+    }
+    mips_write_c0_status(temp);
+}
+
+void mips_disable_irq(uint num)
+{
+    uint32_t temp = mips_read_c0_status();
+    if (num < 9) {
+        temp &= ~(1 << (num + 8));
+    } else if (num == 9) {
+        temp &= ~(1 << 18);
+    }
+    mips_write_c0_status(temp);
+}
+
+/* unimplemented cache operations */
+void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
+void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
+
+void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
+void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
diff --git a/src/bsp/lk/arch/mips/asm.S b/src/bsp/lk/arch/mips/asm.S
new file mode 100644
index 0000000..7700039
--- /dev/null
+++ b/src/bsp/lk/arch/mips/asm.S
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+/* void mips_context_switch(
+    struct mips_context_switch_frame *oldcs,
+    struct mips_context_switch_frame *newcs); */
+FUNCTION(mips_context_switch)
+    # a0 = oldcs
+    # a1 = newcs
+
+    # save old state
+    sw      $s0, 0($a0)
+    sw      $s1, 4($a0)
+    sw      $s2, 8($a0)
+    sw      $s3, 12($a0)
+    sw      $s4, 16($a0)
+    sw      $s5, 20($a0)
+    sw      $s6, 24($a0)
+    sw      $s7, 28($a0)
+    sw      $s8, 32($a0)
+    sw      $ra, 36($a0)
+    sw      $sp, 40($a0)
+
+    # load new state
+    lw      $s0, 0($a1)
+    lw      $s1, 4($a1)
+    lw      $s2, 8($a1)
+    lw      $s3, 12($a1)
+    lw      $s4, 16($a1)
+    lw      $s5, 20($a1)
+    lw      $s6, 24($a1)
+    lw      $s7, 28($a1)
+    lw      $s8, 32($a1)
+    lw      $ra, 36($a1)
+    lw      $sp, 40($a1)
+
+    jr      $ra
+    nop
+
diff --git a/src/bsp/lk/arch/mips/exceptions.c b/src/bsp/lk/arch/mips/exceptions.c
new file mode 100644
index 0000000..f2b1035
--- /dev/null
+++ b/src/bsp/lk/arch/mips/exceptions.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <debug.h>
+#include <assert.h>
+#include <stdint.h>
+#include <bits.h>
+#include <kernel/thread.h>
+#include <kernel/debug.h>
+#include <arch/mips.h>
+
+#define LOCAL_TRACE 0
+
+extern enum handler_return platform_irq(struct mips_iframe *iframe, uint num);
+
+void mips_gen_exception(struct mips_iframe *iframe)
+{
+    uint32_t excode = BITS_SHIFT(iframe->cause, 6, 2);
+    if (excode == 0x8) {
+        LTRACEF("SYSCALL, EPC 0x%x\n", iframe->epc);
+        iframe->epc += 4;
+    } else {
+        LTRACEF("status 0x%x\n", iframe->status);
+        LTRACEF("cause 0x%x\n", iframe->cause);
+        LTRACEF("\texcode 0x%x\n", excode);
+        LTRACEF("epc 0x%x\n", iframe->epc);
+        for (;;);
+    }
+}
+
+void mips_irq(struct mips_iframe *iframe, uint num)
+{
+    // unset IE and clear EXL
+    mips_write_c0_status(mips_read_c0_status() & ~(3<<0));
+
+    THREAD_STATS_INC(interrupts);
+    KEVLOG_IRQ_ENTER(num);
+
+    LTRACEF("IRQ %u, EPC 0x%x, old status 0x%x, status 0x%x\n",
+            num, iframe->epc, iframe->status, mips_read_c0_status());
+
+    enum handler_return ret = INT_NO_RESCHEDULE;
+
+    // figure out which interrupt the timer is set to
+    uint32_t ipti = BITS_SHIFT(mips_read_c0_intctl(), 31, 29);
+    if (ipti >= 2 && ipti == num) {
+        // builtin timer
+        ret = mips_timer_irq();
+#if PLATFORM_QEMU_MIPS
+    } else if (num == 2) {
+        ret = platform_irq(iframe, num);
+#endif
+    } else {
+        panic("mips: unhandled irq\n");
+    }
+
+    KEVLOG_IRQ_EXIT(num);
+
+    if (ret != INT_NO_RESCHEDULE)
+        thread_preempt();
+}
+
diff --git a/src/bsp/lk/arch/mips/include/arch/arch_ops.h b/src/bsp/lk/arch/mips/include/arch/arch_ops.h
new file mode 100644
index 0000000..42a178b
--- /dev/null
+++ b/src/bsp/lk/arch/mips/include/arch/arch_ops.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <arch/mips.h>
+
+static inline void arch_enable_ints(void)
+{
+    CF;
+#if 0
+    uint32_t status = mips_read_c0_status();
+    status |= 0x1;
+    mips_write_c0_status(status);
+#else
+    __asm__ volatile("ei");
+#endif
+}
+
+static inline void arch_disable_ints(void)
+{
+#if 0
+    uint32_t status = mips_read_c0_status();
+    status &= ~0x1;
+    mips_write_c0_status(status);
+#else
+    __asm__ volatile("di");
+#endif
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    uint32_t state;
+
+    state = mips_read_c0_status();
+
+    return (state & (1<<1)) || !(state & (1<<0)); // check if EXL or IE is set
+}
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_and(volatile int *ptr, int val)
+{
+    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+static inline uint32_t arch_cycle_count(void) { return 0; }
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+
diff --git a/src/bsp/lk/arch/mips/include/arch/arch_thread.h b/src/bsp/lk/arch/mips/include/arch/arch_thread.h
new file mode 100644
index 0000000..9dd8480
--- /dev/null
+++ b/src/bsp/lk/arch/mips/include/arch/arch_thread.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <sys/types.h>
+
+struct mips_context_switch_frame {
+    /* callee saved */
+    uint32_t s0;
+    uint32_t s1;
+    uint32_t s2;
+    uint32_t s3;
+    uint32_t s4;
+    uint32_t s5;
+    uint32_t s6;
+    uint32_t s7;
+    uint32_t s8;
+    uint32_t ra;
+    uint32_t sp;
+};
+
+struct arch_thread {
+    struct mips_context_switch_frame cs_frame;
+};
+
+void mips_context_switch(struct mips_context_switch_frame *oldcs, struct mips_context_switch_frame *newcs);
+
diff --git a/src/bsp/lk/arch/mips/include/arch/defines.h b/src/bsp/lk/arch/mips/include/arch/defines.h
new file mode 100644
index 0000000..2d3d8e7
--- /dev/null
+++ b/src/bsp/lk/arch/mips/include/arch/defines.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#define PAGE_SIZE 4096
+#define PAGE_SIZE_SHIFT 12
+
+// XXX is this right?
+#define CACHE_LINE 32
+
+#define ARCH_DEFAULT_STACK_SIZE 4096
diff --git a/src/bsp/lk/arch/mips/include/arch/mips.h b/src/bsp/lk/arch/mips/include/arch/mips.h
new file mode 100644
index 0000000..a73eb66
--- /dev/null
+++ b/src/bsp/lk/arch/mips/include/arch/mips.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#ifndef ASSEMBLY
+#include <compiler.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#define GEN_CP_REG_FUNCS(regname, regnum, sel) \
+static inline __ALWAYS_INLINE uint32_t mips_read_##regname(void) { \
+    uint32_t val; \
+    __asm__ volatile("mfc0 %0, $" #regnum ", " #sel : "=r" (val)); \
+    return val; \
+} \
+\
+static inline __ALWAYS_INLINE uint32_t mips_read_##regname##_relaxed(void) { \
+    uint32_t val; \
+    __asm__("mfc0 %0, $" #regnum ", " #sel : "=r" (val)); \
+    return val; \
+} \
+\
+static inline __ALWAYS_INLINE void mips_write_##regname(uint32_t val) { \
+    __asm__ volatile("mtc0 %0, $" #regnum ", " #sel :: "r" (val)); \
+} \
+\
+static inline __ALWAYS_INLINE void mips_write_##regname##_relaxed(uint32_t val) { \
+    __asm__ volatile("mtc0 %0, $" #regnum ", " #sel :: "r" (val)); \
+}
+
+GEN_CP_REG_FUNCS(c0_count, 9, 0)
+GEN_CP_REG_FUNCS(c0_compare, 11, 0)
+GEN_CP_REG_FUNCS(c0_status, 12, 0)
+GEN_CP_REG_FUNCS(c0_intctl, 12, 1)
+GEN_CP_REG_FUNCS(c0_srsctl, 12, 2)
+GEN_CP_REG_FUNCS(c0_srsmap1, 12, 3)
+GEN_CP_REG_FUNCS(c0_view_ipl, 12, 4)
+GEN_CP_REG_FUNCS(c0_srsmap2, 12, 5)
+GEN_CP_REG_FUNCS(c0_cause, 13, 0)
+GEN_CP_REG_FUNCS(c0_epc, 14, 0)
+GEN_CP_REG_FUNCS(c0_prid, 15, 0)
+GEN_CP_REG_FUNCS(c0_ebase, 15, 1)
+GEN_CP_REG_FUNCS(c0_cdmmbase, 15, 2)
+GEN_CP_REG_FUNCS(c0_config, 16, 0)
+GEN_CP_REG_FUNCS(c0_config1, 16, 1)
+GEN_CP_REG_FUNCS(c0_config2, 16, 2)
+GEN_CP_REG_FUNCS(c0_config3, 16, 3)
+GEN_CP_REG_FUNCS(c0_config4, 16, 4)
+GEN_CP_REG_FUNCS(c0_config5, 16, 5)
+GEN_CP_REG_FUNCS(c0_config6, 16, 6)
+GEN_CP_REG_FUNCS(c0_config7, 16, 7)
+GEN_CP_REG_FUNCS(c0_config8, 16, 8)
+
+struct mips_iframe {
+    uint32_t at;
+    uint32_t v0;
+    uint32_t v1;
+    uint32_t a0;
+    uint32_t a1;
+    uint32_t a2;
+    uint32_t a3;
+    uint32_t t0;
+    uint32_t t1;
+    uint32_t t2;
+    uint32_t t3;
+    uint32_t t4;
+    uint32_t t5;
+    uint32_t t6;
+    uint32_t t7;
+    uint32_t t8;
+    uint32_t t9;
+    uint32_t gp;
+    uint32_t ra;
+    uint32_t status;
+    uint32_t cause;
+    uint32_t epc;
+};
+STATIC_ASSERT(sizeof(struct mips_iframe) == 88);
+
+void mips_init_timer(uint32_t freq);
+enum handler_return mips_timer_irq(void);
+
+void mips_enable_irq(uint num);
+void mips_disable_irq(uint num);
+
+#endif // !ASSEMBLY
+
+#define VECTORED_OFFSET_SHIFT 32
+
diff --git a/src/bsp/lk/arch/mips/include/arch/spinlock.h b/src/bsp/lk/arch/mips/include/arch/spinlock.h
new file mode 100644
index 0000000..5c50c5b
--- /dev/null
+++ b/src/bsp/lk/arch/mips/include/arch/spinlock.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <stdbool.h>
+
+#if WITH_SMP
+#error microblaze does not support SMP
+#endif
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned int spin_lock_t;
+
+typedef unsigned int spin_lock_saved_state_t;
+typedef unsigned int spin_lock_save_flags_t;
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+    /* default arm flag is to just disable plain irqs */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  0
+
+enum {
+    /* private */
+    SPIN_LOCK_STATE_RESTORE_IRQ = 1,
+};
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    spin_lock_saved_state_t state = 0;
+    if (!arch_ints_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_IRQ;
+        arch_disable_ints();
+    }
+    *statep = state;
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    if (old_state & SPIN_LOCK_STATE_RESTORE_IRQ)
+        arch_enable_ints();
+}
+
+
+
+
diff --git a/src/bsp/lk/arch/mips/linker.ld b/src/bsp/lk/arch/mips/linker.ld
new file mode 100644
index 0000000..c237648
--- /dev/null
+++ b/src/bsp/lk/arch/mips/linker.ld
@@ -0,0 +1,135 @@
+OUTPUT_FORMAT("elf32-bigmips", "elf32-bigmips", "elf32-littlemips")
+OUTPUT_ARCH(mips)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
+
+    /* text/read-only data */
+    .text : {
+        KEEP(*(.text.vectab))
+        KEEP(*(.text.boot))
+        *(.text* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) }
+    .plt : { *(.plt) }
+
+    .rodata : ALIGN(4) {
+        __rodata_start = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+    }
+
+    .sdata2 : ALIGN(4) {
+        _SDATA2_START__ = .;
+        *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+        _SDATA2_END__ = .;
+    }
+
+    .sbss2 : ALIGN(4) {
+        /* read only small variables without initial value */
+        _SBSS2_START__ = .;
+        *(.sbss2*)
+        _SBSS2_END__ = .;
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+    .data : ALIGN(4) {
+        /* writable data  */
+        __data_start_rom = .;
+        /* in one segment binaries, the rom data address is on top of the ram data address */
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+        *(.got*)
+        *(.dynamic)
+
+    }
+
+    .sdata : {
+        /* read-write small data with initial value */
+        _SDATA_START__ = .;
+        *(.sdata .sdata.* .gnu.linkonce.s.*)
+        _SDATA_END__ = .;
+    }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+    . = ALIGN(4);
+    __bss_start = .;
+
+    .sbss : {
+        /* read-write small variables without initial value */
+        _sbss_start__ = .;
+        *(.dynsbss)
+        *(.sbss .sbss.* .gnu.linkonce.sb.*)
+        *(.scommon)
+        _sbss_end__ = .;
+    }
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : {
+        /* regular bss */
+        *(.dynbss)
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+    }
+
+    . = ALIGN(4);
+    __bss_end = .;
+
+    _end = .;
+
+    . = %KERNEL_BASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
+
diff --git a/src/bsp/lk/arch/mips/mips.ld b/src/bsp/lk/arch/mips/mips.ld
new file mode 100644
index 0000000..9963f06
--- /dev/null
+++ b/src/bsp/lk/arch/mips/mips.ld
@@ -0,0 +1,227 @@
+/* Default linker script, for normal executables */
+OUTPUT_FORMAT("elf32-bigmips", "elf32-bigmips", "elf32-littlemips")
+OUTPUT_ARCH(mips)
+ENTRY(__start)
+SECTIONS
+{
+
+  . = 0;
+
+  /* Read-only sections, merged into text segment: */
+  .interp         : { *(.interp) }
+  .reginfo        : { *(.reginfo) }
+  .dynamic        : { *(.dynamic) }
+  .hash           : { *(.hash) }
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+  .rel.init       : { *(.rel.init) }
+  .rela.init      : { *(.rela.init) }
+  .rel.text       : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+  .rela.text      : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+  .rel.fini       : { *(.rel.fini) }
+  .rela.fini      : { *(.rela.fini) }
+  .rel.rodata     : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+  .rela.rodata    : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+  .rel.data.rel.ro   : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
+  .rela.data.rel.ro   : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) }
+  .rel.data       : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+  .rela.data      : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+  .rel.tdata     : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+  .rela.tdata    : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+  .rel.tbss      : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+  .rela.tbss     : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+  .rel.ctors      : { *(.rel.ctors) }
+  .rela.ctors     : { *(.rela.ctors) }
+  .rel.dtors      : { *(.rel.dtors) }
+  .rela.dtors     : { *(.rela.dtors) }
+  .rel.got        : { *(.rel.got) }
+  .rela.got       : { *(.rela.got) }
+  .rel.sdata      : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
+  .rela.sdata     : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
+  .rel.sbss       : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
+  .rela.sbss      : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
+  .rel.sdata2     : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
+  .rela.sdata2    : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
+  .rel.sbss2      : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
+  .rela.sbss2     : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
+  .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+  .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+  .rel.plt        : { *(.rel.plt) }
+  .rela.plt       : { *(.rela.plt) }
+  .init           :
+  {
+    KEEP (*(.init))
+  } =0x47ff041f
+  .plt            : { *(.plt) }
+  .text           :
+  {
+    _ftext = . ;
+    *(.text .stub .text.* .gnu.linkonce.t.*)
+    KEEP (*(.text.*personality*))
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+    *(.mips16.fn.*) *(.mips16.call.*)
+  } =0x47ff041f
+  .fini           :
+  {
+    KEEP (*(.fini))
+  } =0x47ff041f
+  PROVIDE (__etext = .);
+  PROVIDE (_etext = .);
+  PROVIDE (etext = .);
+  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+  .rodata1        : { *(.rodata1) }
+  .sdata2         :
+  {
+    *(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
+  }
+  .sbss2          : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+  .eh_frame_hdr : { *(.eh_frame_hdr) }
+  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = ALIGN (0x40000) - ((0x40000 - .) & (0x40000 - 1)); . = DATA_SEGMENT_ALIGN (0x40000, 0x1000);
+  /* Exception handling  */
+  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+  /* Thread Local Storage sections  */
+  .tdata         : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+  .tbss                  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+  .preinit_array     :
+  {
+    PROVIDE_HIDDEN (__preinit_array_start = .);
+    KEEP (*(.preinit_array))
+    PROVIDE_HIDDEN (__preinit_array_end = .);
+  }
+  .init_array     :
+  {
+     PROVIDE_HIDDEN (__init_array_start = .);
+     KEEP (*(SORT(.init_array.*)))
+     KEEP (*(.init_array))
+     PROVIDE_HIDDEN (__init_array_end = .);
+  }
+  .fini_array     :
+  {
+    PROVIDE_HIDDEN (__fini_array_start = .);
+    KEEP (*(.fini_array))
+    KEEP (*(SORT(.fini_array.*)))
+    PROVIDE_HIDDEN (__fini_array_end = .);
+  }
+  .ctors          :
+  {
+        __ctor_list = .;
+    /* gcc uses crtbegin.o to find the start of
+       the constructors, so we make sure it is
+       first.  Because this is a wildcard, it
+       doesn't matter if the user does not
+       actually link against crtbegin.o; the
+       linker won't look for a file to match a
+       wildcard.  The wildcard also means that it
+       doesn't matter which directory crtbegin.o
+       is in.  */
+    KEEP (*crtbegin*.o(.ctors))
+    /* We don't want to include the .ctor section from
+       the crtend.o file until after the sorted ctors.
+       The .ctor section from the crtend file contains the
+       end of ctors marker and it must be last */
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+    KEEP (*(SORT(.ctors.*)))
+    KEEP (*(.ctors))
+        __ctor_end = .;
+  }
+  .dtors          :
+  {
+    KEEP (*crtbegin*.o(.dtors))
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+    KEEP (*(SORT(.dtors.*)))
+    KEEP (*(.dtors))
+  }
+  .jcr            : { KEEP (*(.jcr)) }
+  .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+  . = DATA_SEGMENT_RELRO_END (0, .);
+  .data           :
+  {
+    _fdata = . ;
+    *(.data .data.* .gnu.linkonce.d.*)
+    KEEP (*(.gnu.linkonce.d.*personality*))
+    SORT(CONSTRUCTORS)
+  }
+  .data1          : { *(.data1) }
+  . = .;
+  _gp = ALIGN(16) + 0x7ff0;
+  .got            : { *(.got.plt) *(.got) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata          :
+  {
+    *(.sdata .sdata.* .gnu.linkonce.s.*)
+  }
+  .lit8           : { *(.lit8) }
+  .lit4           : { *(.lit4) }
+  _edata = .; PROVIDE (edata = .);
+  __bss_start = .;
+  _fbss = .;
+  .sbss           :
+  {
+    *(.dynsbss)
+    *(.sbss .sbss.* .gnu.linkonce.sb.*)
+    *(.scommon)
+  }
+  .bss            :
+  {
+   *(.dynbss)
+   *(.bss .bss.* .gnu.linkonce.b.*)
+   *(COMMON)
+   /* Align here to ensure that the .bss section occupies space up to
+      _end.  Align after .bss to ensure correct alignment even if the
+      .bss section disappears because there are no input sections.
+      FIXME: Why do we need it? When there is no .bss section, we don't
+      pad the .data section.  */
+   . = ALIGN(. != 0 ? 32 / 8 : 1);
+  }
+  . = ALIGN(32 / 8);
+  . = ALIGN(32 / 8);
+  _end = .; PROVIDE (end = .);
+  . = DATA_SEGMENT_END (.);
+  /* Stabs debugging sections.  */
+  .stab          0 : { *(.stab) }
+  .stabstr       0 : { *(.stabstr) }
+  .stab.excl     0 : { *(.stab.excl) }
+  .stab.exclstr  0 : { *(.stab.exclstr) }
+  .stab.index    0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment       0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
+  .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
+  /DISCARD/ : { *(.note.GNU-stack) }
+}
diff --git a/src/bsp/lk/arch/mips/rules.mk b/src/bsp/lk/arch/mips/rules.mk
new file mode 100644
index 0000000..715818e
--- /dev/null
+++ b/src/bsp/lk/arch/mips/rules.mk
@@ -0,0 +1,79 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/exceptions.c \
+	$(LOCAL_DIR)/start.S \
+	$(LOCAL_DIR)/thread.c \
+	$(LOCAL_DIR)/timer.c \
+	$(LOCAL_DIR)/vectors.S \
+
+#	$(LOCAL_DIR)/cache.c \
+	$(LOCAL_DIR)/cache-ops.S \
+	$(LOCAL_DIR)/ops.S \
+	$(LOCAL_DIR)/mmu.c \
+	$(LOCAL_DIR)/faults.c \
+	$(LOCAL_DIR)/descriptor.c
+
+GLOBAL_DEFINES += \
+	SMP_MAX_CPUS=1
+
+# set the default toolchain to microblaze elf and set a #define
+ifndef TOOLCHAIN_PREFIX
+TOOLCHAIN_PREFIX := mips-elf-
+endif
+
+WITH_LINKER_GC ?= 0
+LITTLE_ENDIAN ?= 0
+
+ifneq ($(LITTLE_ENDIAN),0)
+GLOBAL_COMPILEFLAGS += -EL
+GLOBAL_ASFLAGS += -EL
+GLOBAL_LDFLAGS += -EL
+GLOBAL_MODULE_LDFLAGS += -EL
+endif
+
+ARCH_COMPILEFLAGS := -mno-gpopt
+ARCH_OPTFLAGS := -O2
+
+ifeq ($(MIPS_CPU),m14k)
+ARCH_COMPILEFLAGS += -march=m14k
+endif
+ifeq ($(MIPS_CPU),microaptiv-uc)
+ARCH_COMPILEFLAGS += -march=m14k
+endif
+
+
+cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
+	then echo "$(2)"; else echo "$(3)"; fi ;)
+
+KERNEL_BASE ?= $(MEMBASE)
+KERNEL_LOAD_OFFSET ?= 0
+VECTOR_BASE_PHYS ?= 0
+
+GLOBAL_DEFINES += \
+    MEMBASE=$(MEMBASE) \
+    MEMSIZE=$(MEMSIZE) \
+    KERNEL_BASE=$(KERNEL_BASE) \
+    KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/linker.ld
+
+# rules for generating the linker
+$(BUILDDIR)/linker.ld: $(LOCAL_DIR)/linker.ld $(wildcard arch/*.ld) linkerscript.phony
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/;s/%VECTOR_BASE_PHYS%/$(VECTOR_BASE_PHYS)/" < $< > $@.tmp
+	@$(call TESTANDREPLACEFILE,$@.tmp,$@)
+
+linkerscript.phony:
+.PHONY: linkerscript.phony
+
+LINKER_SCRIPT += $(BUILDDIR)/linker.ld
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/mips/start.S b/src/bsp/lk/arch/mips/start.S
new file mode 100644
index 0000000..4097648
--- /dev/null
+++ b/src/bsp/lk/arch/mips/start.S
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+.section ".text.boot"
+FUNCTION(_start)
+    # set the default stack
+    la      $sp, default_stack_top
+
+    # zero out the bss section
+    la      $t0, __bss_start
+    la     $t1, __bss_end
+0:
+    sw      $zero, ($t0)
+    addi    $t0, 4
+    bne     $t0, $t1, 0b
+
+    # args to main and call it
+    li      $a0, 1
+    li      $a1, 2
+    li      $a2, 3
+    li      $a3, 4
+    jal     lk_main
+
+    # should never return here
+    b       .
+
+.bss
+.align 3
+LOCAL_DATA(default_stack)
+    .skip 4096
+LOCAL_DATA(default_stack_top)
+
diff --git a/src/bsp/lk/arch/mips/thread.c b/src/bsp/lk/arch/mips/thread.c
new file mode 100644
index 0000000..eaef3d9
--- /dev/null
+++ b/src/bsp/lk/arch/mips/thread.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <trace.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <kernel/thread.h>
+#include <arch/mips.h>
+
+#define LOCAL_TRACE 0
+
+struct thread *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    thread_t *ct = get_current_thread();
+
+#if LOCAL_TRACE
+    LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
+    dump_thread(ct);
+#endif
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    int ret = ct->entry(ct->arg);
+
+    LTRACEF("thread %p exiting with %d\n", ct, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    LTRACEF("t %p (%s)\n", t, t->name);
+
+    /* zero out the thread context */
+    memset(&t->arch.cs_frame, 0, sizeof(t->arch.cs_frame));
+
+    t->arch.cs_frame.ra = (vaddr_t)&initial_thread_func;
+    t->arch.cs_frame.sp = (vaddr_t)t->stack + t->stack_size;
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+    LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
+
+    mips_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.sp);
+    }
+}
+
diff --git a/src/bsp/lk/arch/mips/timer.c b/src/bsp/lk/arch/mips/timer.c
new file mode 100644
index 0000000..2b665cd
--- /dev/null
+++ b/src/bsp/lk/arch/mips/timer.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/mips.h>
+#include <err.h>
+#include <trace.h>
+#include <debug.h>
+#include <assert.h>
+#include <stdint.h>
+#include <bits.h>
+#include <arch/ops.h>
+#include <platform.h>
+#include <platform/timer.h>
+
+#define LOCAL_TRACE 0
+
+static volatile uint64_t ticks;
+static volatile uint32_t last_compare_set;
+
+static uint32_t tick_rate;
+static uint32_t tick_rate_mhz;
+
+static lk_time_t tick_interval_ms;
+static lk_bigtime_t tick_interval_us;
+static uint32_t tick_interval;
+
+static platform_timer_callback cb;
+static void *cb_args;
+
+enum handler_return mips_timer_irq(void)
+{
+    LTRACEF("count   0x%x\n", mips_read_c0_count());
+    LTRACEF("compare 0x%x\n", mips_read_c0_compare());
+
+    /* reset it for the next interval */
+retry:
+    ticks++;
+    last_compare_set += tick_interval;
+    uint32_t count = mips_read_c0_count();
+    if (unlikely(TIME_GT(count, last_compare_set))) {
+        /* if it took us too long to get to this irq, make sure it fires immediately */
+        //printf("took too long to service timer irq! %u %u\n", count, last_compare_set);
+        goto retry;
+        //mips_write_c0_compare(mips_read_c0_count() + tick_rate_mhz);
+    } else {
+        mips_write_c0_compare(last_compare_set);
+    }
+
+    enum handler_return ret = INT_NO_RESCHEDULE;
+    if (cb) {
+        lk_time_t now = current_time();
+        ret = cb(cb_args, now);
+    }
+
+    return ret;
+}
+
+status_t platform_set_periodic_timer(platform_timer_callback callback, void *arg, lk_time_t interval)
+{
+    TRACEF("callback %p, arg %p, interval %u\n", callback, arg, interval);
+
+    DEBUG_ASSERT(interval > 0);
+    DEBUG_ASSERT(tick_rate != 0 && tick_rate_mhz != 0);
+
+    cb = callback;
+    cb_args = arg;
+
+    tick_interval_ms = interval;
+    tick_interval_us = interval * 1000;
+    tick_interval = interval * (tick_rate / 1000);
+
+    uint32_t now = mips_read_c0_count();
+    last_compare_set = now + tick_interval;
+    mips_write_c0_compare(last_compare_set);
+
+    // enable the counter
+    mips_write_c0_cause(mips_read_c0_cause() & ~(1<<27));
+
+    return NO_ERROR;
+}
+
+lk_time_t current_time(void)
+{
+    uint64_t t;
+    uint32_t last_compare;
+    uint32_t delta;
+
+    /* sample the tick counter, the last compare register set, and the current count atomically */
+    do {
+        t = ticks;
+        last_compare = last_compare_set;
+        delta = mips_read_c0_count();
+    } while (ticks != t || last_compare_set != last_compare);
+
+    /* convert ticks to msec */
+    delta = (delta - last_compare - tick_interval) / (tick_rate_mhz * 1000);
+    lk_time_t res = (t * tick_interval_ms) + delta;
+
+    return res;
+}
+
+lk_bigtime_t current_time_hires(void)
+{
+    uint64_t t;
+    uint32_t last_compare;
+    uint32_t delta;
+
+    /* sample the tick counter, the last compare register set, and the current count atomically */
+    do {
+        t = ticks;
+        last_compare = last_compare_set;
+        delta = mips_read_c0_count();
+    } while (ticks != t);
+
+    /* convert ticks to usec */
+    delta = (delta - last_compare - tick_interval) / tick_rate_mhz;
+    lk_bigtime_t res = (t * tick_interval_us) + delta;
+
+    return res;
+}
+
+void mips_init_timer(uint32_t freq)
+{
+    tick_rate = freq;
+    tick_rate_mhz = freq / 1000000;
+
+    // disable the counter
+    mips_write_c0_cause(mips_read_c0_cause() | (1<<27));
+
+    // figure out which interrupt the timer is set to
+    uint32_t ipti = BITS_SHIFT(mips_read_c0_intctl(), 31, 29);
+    if (ipti >= 2) {
+        mips_enable_irq(ipti);
+    }
+}
+
diff --git a/src/bsp/lk/arch/mips/vectors.S b/src/bsp/lk/arch/mips/vectors.S
new file mode 100644
index 0000000..aacfde4
--- /dev/null
+++ b/src/bsp/lk/arch/mips/vectors.S
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/mips.h>
+
+.section ".text.vectab"
+FUNCTION(vectab)
+.org 0
+_tlb_refill:
+    b       .
+
+.macro iframe_save
+    .set    push
+    .set    noat
+    addiu   $sp, -88
+
+    /* save all the non temporary registers */
+    sw      $at, 0($sp)
+    sw      $v0, 4($sp)
+    sw      $v1, 8($sp)
+    sw      $a0, 12($sp)
+    sw      $a1, 16($sp)
+    sw      $a2, 20($sp)
+    sw      $a3, 24($sp)
+    sw      $t0, 28($sp)
+    sw      $t1, 32($sp)
+    sw      $t2, 36($sp)
+    sw      $t3, 40($sp)
+    sw      $t4, 44($sp)
+    sw      $t5, 48($sp)
+    sw      $t6, 52($sp)
+    sw      $t7, 56($sp)
+    sw      $t8, 60($sp)
+    sw      $t9, 64($sp)
+    sw      $gp, 68($sp)
+    sw      $ra, 72($sp)
+
+    /* save the control registers */
+    mfc0    $at, $12 /* status */
+    sw      $at, 76($sp)
+    mfc0    $at, $13 /* cause */
+    sw      $at, 80($sp)
+    mfc0    $at, $14 /* epc */
+    sw      $at, 84($sp)
+
+    .set    pop
+.endm
+
+.macro iframe_restore
+    .set    push
+    .set    noat
+
+    /* restore the temporary registers */
+    lw      $at, 0($sp)
+    lw      $v0, 4($sp)
+    lw      $v1, 8($sp)
+    lw      $a0, 12($sp)
+    lw      $a1, 16($sp)
+    lw      $a2, 20($sp)
+    lw      $a3, 24($sp)
+    lw      $t0, 28($sp)
+    lw      $t1, 32($sp)
+    lw      $t2, 36($sp)
+    lw      $t3, 40($sp)
+    lw      $t4, 44($sp)
+    lw      $t5, 48($sp)
+    lw      $t6, 52($sp)
+    lw      $t7, 56($sp)
+    lw      $t8, 60($sp)
+    lw      $t9, 64($sp)
+    lw      $gp, 68($sp)
+    lw      $ra, 72($sp)
+
+    /* restore the control registers */
+    lw      $k0, 76($sp)
+    mtc0    $k0, $12 /* status */
+    lw      $k0, 80($sp)
+    mtc0    $k0, $13 /* cause */
+    lw      $k0, 84($sp)
+    mtc0    $k0, $14 /* epc */
+
+    addiu   $sp, 88
+    .set    pop
+.endm
+
+/* compatibility mode irq/syscall/general exception */
+.org 0x180
+_irq:
+    la      $k0, mips_gen_exception
+    li      $k1, 0
+    b       shared_irq_save_return
+
+/* vectored base */
+.macro vectored_irq, num
+.org 0x200 + VECTORED_OFFSET_SHIFT * \num
+_vectored_irq\num:
+    la      $k0, mips_irq
+    li      $k1, \num
+    b       shared_irq_save_return
+    b       .
+.endm
+
+vectored_irq 0
+vectored_irq 1
+vectored_irq 2
+vectored_irq 3
+vectored_irq 4
+vectored_irq 5
+vectored_irq 6
+vectored_irq 7
+vectored_irq 8
+vectored_irq 9
+
+/* branched to from above, k0 holds address to call, k1 holds arg to function */
+shared_irq_save_return:
+    iframe_save
+
+    move    $a0, $sp
+    move    $a1, $k1
+    jal     $k0
+
+    iframe_restore
+
+    eret
+
diff --git a/src/bsp/lk/arch/or1k/arch.c b/src/bsp/lk/arch/or1k/arch.c
new file mode 100644
index 0000000..b2c7890
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/arch.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <err.h>
+#include <debug.h>
+#include <arch/or1k.h>
+#include <arch/ops.h>
+#include <arch/mmu.h>
+
+void arch_early_init(void)
+{
+}
+
+void arch_init(void)
+{
+    TRACE;
+}
+
+void arch_idle(void)
+{
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
diff --git a/src/bsp/lk/arch/or1k/asm.S b/src/bsp/lk/arch/or1k/asm.S
new file mode 100644
index 0000000..b1643a0
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/asm.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+/* void or1k_context_switch(
+    struct or1k_context_switch_frame *oldcs,
+    struct or1k_context_switch_frame *newcs); */
+FUNCTION(or1k_context_switch)
+    /* save old context */
+    l.sw    0(r3), r1
+    l.sw    4(r3), r2
+    l.sw    8(r3), r9
+    l.sw    12(r3), r10
+    l.sw    16(r3), r14
+    l.sw    20(r3), r16
+    l.sw    24(r3), r18
+    l.sw    28(r3), r20
+    l.sw    32(r3), r22
+    l.sw    36(r3), r24
+    l.sw    40(r3), r26
+    l.sw    44(r3), r28
+    l.sw    48(r3), r30
+
+    /* restore new context */
+    l.lwz   r30, 48(r4)
+    l.lwz   r28, 44(r4)
+    l.lwz   r26, 40(r4)
+    l.lwz   r24, 36(r4)
+    l.lwz   r22, 32(r4)
+    l.lwz   r20, 28(r4)
+    l.lwz   r18, 24(r4)
+    l.lwz   r16, 20(r4)
+    l.lwz   r14, 16(r4)
+    l.lwz   r10, 12(r4)
+    l.lwz   r9, 8(r4)
+    l.lwz   r2, 4(r4)
+    l.jr    r9
+     l.lwz  r1, 0(r4)
diff --git a/src/bsp/lk/arch/or1k/cache-ops.c b/src/bsp/lk/arch/or1k/cache-ops.c
new file mode 100644
index 0000000..bd1d842
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/cache-ops.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <arch/ops.h>
+#include <arch/or1k.h>
+
+static inline uint32_t dc_block_size(void)
+{
+    uint32_t dccfgr = mfspr(OR1K_SPR_SYS_DCCFGR_ADDR);
+
+    return dccfgr & OR1K_SPR_SYS_DCCFGR_CBS_MASK ? 32 : 16;
+}
+
+static inline uint32_t dc_sets(void)
+{
+    uint32_t dccfgr = mfspr(OR1K_SPR_SYS_DCCFGR_ADDR);
+
+    return 1 << OR1K_SPR_SYS_DCCFGR_NCS_GET(dccfgr);
+}
+
+static inline uint32_t ic_block_size(void)
+{
+    uint32_t iccfgr = mfspr(OR1K_SPR_SYS_ICCFGR_ADDR);
+
+    return iccfgr & OR1K_SPR_SYS_ICCFGR_CBS_MASK ? 32 : 16;
+}
+
+static inline uint32_t ic_sets(void)
+{
+    uint32_t iccfgr = mfspr(OR1K_SPR_SYS_ICCFGR_ADDR);
+
+    return 1 << OR1K_SPR_SYS_ICCFGR_NCS_GET(iccfgr);
+}
+
+void arch_invalidate_cache_all(void)
+{
+    uint32_t i;
+    uint32_t cache_size;
+    uint32_t block_size;
+
+    block_size = ic_block_size();
+    cache_size = block_size * ic_sets();
+    for (i = 0; i < cache_size; i += block_size)
+        mtspr(OR1K_SPR_ICACHE_ICBIR_ADDR, i);
+
+    block_size = dc_block_size();
+    cache_size = block_size * dc_sets();
+    for (i = 0; i < cache_size; i += block_size)
+        mtspr(OR1K_SPR_DCACHE_DCBIR_ADDR, i);
+}
+
+void arch_disable_cache(uint flags)
+{
+    uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
+
+    if (flags & ICACHE)
+        sr &= ~OR1K_SPR_SYS_SR_ICE_MASK;
+    if (flags & DCACHE)
+        sr &= ~OR1K_SPR_SYS_SR_DCE_MASK;
+
+    mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
+}
+
+void arch_enable_cache(uint flags)
+{
+    uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
+
+    if (flags & ICACHE)
+        sr |= OR1K_SPR_SYS_SR_ICE_MASK;
+    if (flags & DCACHE)
+        sr |= OR1K_SPR_SYS_SR_DCE_MASK;
+
+    mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
+}
+
+/* flush dcache */
+void arch_clean_cache_range(addr_t start, size_t len)
+{
+    addr_t addr;
+    uint32_t block_size = dc_block_size();
+
+    for (addr = start; addr < start + len; addr += block_size)
+        mtspr(OR1K_SPR_DCACHE_DCBFR_ADDR, addr);
+}
+
+/* invalidate dcache */
+void arch_invalidate_cache_range(addr_t start, size_t len)
+{
+    addr_t addr;
+    uint32_t block_size = dc_block_size();
+
+    for (addr = start; addr < start + len; addr += block_size)
+        mtspr(OR1K_SPR_DCACHE_DCBIR_ADDR, addr);
+}
+
+/* flush + invalidate dcache */
+void arch_clean_invalidate_cache_range(addr_t start, size_t len)
+{
+    /* invalidate is implied by flush on or1k */
+    arch_clean_cache_range(start, len);
+}
+
+/* flush dcache + invalidate icache */
+void arch_sync_cache_range(addr_t start, size_t len)
+{
+    addr_t addr;
+    uint32_t block_size = ic_block_size();
+
+    arch_clean_cache_range(start, len);
+    for (addr = start; addr < start + len; addr += block_size)
+        mtspr(OR1K_SPR_ICACHE_ICBIR_ADDR, addr);
+}
diff --git a/src/bsp/lk/arch/or1k/exceptions.c b/src/bsp/lk/arch/or1k/exceptions.c
new file mode 100644
index 0000000..aca23f2
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/exceptions.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <kernel/thread.h>
+
+enum handler_return platform_irq(void);
+enum handler_return platform_tick(void);
+
+void or1k_irq(void)
+{
+    if (platform_irq() == INT_RESCHEDULE)
+        thread_preempt();
+}
+
+void or1k_tick(void)
+{
+    if (platform_tick() == INT_RESCHEDULE)
+        thread_preempt();
+}
diff --git a/src/bsp/lk/arch/or1k/faults.c b/src/bsp/lk/arch/or1k/faults.c
new file mode 100644
index 0000000..646af00
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/faults.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch/or1k.h>
+#include <kernel/thread.h>
+#include <platform.h>
+
+static void dump_fault_frame(struct or1k_iframe *frame)
+{
+    addr_t stack = (addr_t)((char *)frame + 128 + sizeof(frame));
+
+    dprintf(CRITICAL, "r0:  0x%08x r1:  0x%08x: r2:  0x%08x r3:  0x%08x\n",
+            0, (uint32_t)stack, frame->r2, frame->r3);
+    dprintf(CRITICAL, "r4:  0x%08x r5:  0x%08x: r6:  0x%08x r7:  0x%08x\n",
+            frame->r4, frame->r5, frame->r6, frame->r7);
+    dprintf(CRITICAL, "r8:  0x%08x r9:  0x%08x: r10: 0x%08x r11: 0x%08x\n",
+            frame->r8, frame->r9, frame->r10, frame->r11);
+    dprintf(CRITICAL, "r12: 0x%08x r13: 0x%08x: r14: 0x%08x r15: 0x%08x\n",
+            frame->r12, frame->r13, frame->r14, frame->r15);
+    dprintf(CRITICAL, "r16: 0x%08x r17: 0x%08x: r18: 0x%08x r19: 0x%08x\n",
+            frame->r16, frame->r17, frame->r18, frame->r19);
+    dprintf(CRITICAL, "r20: 0x%08x r21: 0x%08x: r22: 0x%08x r23: 0x%08x\n",
+            frame->r20, frame->r21, frame->r22, frame->r23);
+    dprintf(CRITICAL, "r24: 0x%08x r25: 0x%08x: r26: 0x%08x r27: 0x%08x\n",
+            frame->r24, frame->r25, frame->r26, frame->r27);
+    dprintf(CRITICAL, "r28: 0x%08x r29: 0x%08x: r30: 0x%08x r31: 0x%08x\n",
+            frame->r28, frame->r29, frame->r30, frame->r31);
+    dprintf(CRITICAL, "PC:  0x%08x SR:  0x%08x\n",
+            frame->pc, frame->sr);
+
+    dprintf(CRITICAL, "bottom of stack at 0x%08x:\n", (unsigned int)stack);
+    hexdump((void *)stack, 128);
+
+}
+
+static void exception_die(struct or1k_iframe *frame, const char *msg)
+{
+    dprintf(CRITICAL, msg);
+    dump_fault_frame(frame);
+
+    platform_halt(HALT_ACTION_HALT, HALT_REASON_SW_PANIC);
+    for (;;);
+}
+
+void or1k_busfault_handler(struct or1k_iframe *frame, uint32_t addr)
+{
+    dprintf(CRITICAL, "unhandled busfault (EEAR: 0x%08x)", addr);
+    exception_die(frame, ", halting\n");
+}
+
+void or1k_data_pagefault_handler(struct or1k_iframe *frame, uint32_t addr)
+{
+    dprintf(CRITICAL, "unhandled data pagefault (EEAR: 0x%08x)", addr);
+    exception_die(frame, ", halting\n");
+}
+
+void or1k_instruction_pagefault_handler(struct or1k_iframe *frame, uint32_t addr)
+{
+    dprintf(CRITICAL, "unhandled instruction pagefault (EEAR: 0x%08x)", addr);
+    exception_die(frame, ", halting\n");
+}
+
+void or1k_alignment_handler(struct or1k_iframe *frame, uint32_t addr)
+{
+    dprintf(CRITICAL, "unhandled unaligned access (EEAR: 0x%08x)", addr);
+    exception_die(frame, ", halting\n");
+}
+
+void or1k_illegal_instruction_handler(struct or1k_iframe *frame, uint32_t addr)
+{
+    dprintf(CRITICAL, "unhandled illegal instruction (EEAR: 0x%08x)", addr);
+    exception_die(frame, ", halting\n");
+}
+
+void or1k_syscall_handler(struct or1k_iframe *frame)
+{
+    exception_die(frame, "unhandled syscall, halting\n");
+}
+
+void or1k_unhandled_exception(struct or1k_iframe *frame, uint32_t vector)
+{
+    dprintf(CRITICAL, "unhandled exception (vector: 0x%08x)", vector);
+    exception_die(frame, ", halting\n");
+}
diff --git a/src/bsp/lk/arch/or1k/include/arch/arch_ops.h b/src/bsp/lk/arch/or1k/include/arch/arch_ops.h
new file mode 100644
index 0000000..ef7438c
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/arch_ops.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <compiler.h>
+#include <arch/or1k.h>
+
+#ifndef ASSEMBLY
+static inline void arch_enable_ints(void)
+{
+    uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
+
+    sr |= OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK;
+    mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
+}
+
+static inline void arch_disable_ints(void)
+{
+    uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
+
+    sr &= ~(OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK);
+    mtspr(OR1K_SPR_SYS_SR_ADDR, sr);
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    uint32_t sr = mfspr(OR1K_SPR_SYS_SR_ADDR);
+
+    return !(sr & (OR1K_SPR_SYS_SR_IEE_MASK | OR1K_SPR_SYS_SR_TEE_MASK));
+}
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_or(volatile int *ptr, int val)
+{
+    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_and(volatile int *ptr, int val)
+{
+    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
+}
+
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval)
+{
+    __asm__ __volatile__(
+        "1: l.lwa %0, 0(%1) \n"
+        "   l.sfeq %0, %2   \n"
+        "   l.bnf 1f        \n"
+        "    l.nop          \n"
+        "   l.swa 0(%1), %3 \n"
+        "   l.bnf 1b        \n"
+        "1:  l.nop          \n"
+        : "=&r"(oldval)
+        : "r"(ptr), "r"(oldval), "r"(newval)
+        : "cc", "memory");
+
+    return oldval;
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+static inline uint32_t arch_cycle_count(void) { return 0; }
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+#endif // !ASSEMBLY
diff --git a/src/bsp/lk/arch/or1k/include/arch/arch_thread.h b/src/bsp/lk/arch/or1k/include/arch/arch_thread.h
new file mode 100644
index 0000000..8abcfff
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/arch_thread.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <sys/types.h>
+
+struct or1k_context_switch_frame {
+    uint32_t r1; // stack pointer
+    uint32_t r2; // frame pointer
+
+    uint32_t r9; // link register
+
+    /* callee saved */
+    uint32_t r10;
+    uint32_t r14;
+    uint32_t r16;
+    uint32_t r18;
+    uint32_t r20;
+    uint32_t r22;
+    uint32_t r24;
+    uint32_t r26;
+    uint32_t r28;
+    uint32_t r30;
+};
+
+struct arch_thread {
+    struct or1k_context_switch_frame cs_frame;
+};
+
+void or1k_context_switch(struct or1k_context_switch_frame *oldcs,
+                         struct or1k_context_switch_frame *newcs);
diff --git a/src/bsp/lk/arch/or1k/include/arch/defines.h b/src/bsp/lk/arch/or1k/include/arch/defines.h
new file mode 100644
index 0000000..3b0eaf5
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/defines.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#define PAGE_SIZE 8192
+#define PAGE_SIZE_SHIFT 13
+
+/* Cache line can be configured, but this is max */
+#define CACHE_LINE 32
+
+#define ARCH_DEFAULT_STACK_SIZE 8192
diff --git a/src/bsp/lk/arch/or1k/include/arch/or1k-sprs.h b/src/bsp/lk/arch/or1k/include/arch/or1k-sprs.h
new file mode 100644
index 0000000..033004d
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/or1k-sprs.h
@@ -0,0 +1,2417 @@
+/* or1k-sprs.h -- OR1K SPR definitions
+   Copyright (c) 2014 OpenRISC Project Maintainers
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following condition
+   is met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+   FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+   COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+   INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+   SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+   HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+   STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+   OF THE POSSIBILITY OF SUCH DAMAGE.
+   */
+
+/*
+ * Generated from revision 4cab27375dd07cd890d5493e11446cc06ca8265e
+ *  on Fri Feb 13 03:15:06 2015
+ */
+
+#ifndef _OR1K_SPRS_H_
+#define _OR1K_SPRS_H_
+
+#define OR1K_SPR_GROUP_BITS   5
+#define OR1K_SPR_GROUP_LSB   11
+#define OR1K_SPR_GROUP_MSB   15
+#define OR1K_SPR_INDEX_BITS  11
+#define OR1K_SPR_INDEX_LSB    0
+#define OR1K_SPR_INDEX_MSB   10
+
+#ifdef __ASSEMBLER__
+#define OR1K_UNSIGNED(x) x
+#else
+#define OR1K_UNSIGNED(x) x##U
+#endif
+
+
+/****************/
+/* System Group */
+/****************/
+#define OR1K_SPR_SYS_GROUP 0x00
+
+/* Version Register */
+#define OR1K_SPR_SYS_VR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_SYS_VR_ADDR  OR1K_UNSIGNED(0x0000)
+
+/* Revision */
+#define OR1K_SPR_SYS_VR_REV_LSB    0
+#define OR1K_SPR_SYS_VR_REV_MSB    5
+#define OR1K_SPR_SYS_VR_REV_BITS   6
+#define OR1K_SPR_SYS_VR_REV_MASK   OR1K_UNSIGNED(0x0000003f)
+#define OR1K_SPR_SYS_VR_REV_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x0000003f))
+#define OR1K_SPR_SYS_VR_REV_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffc0)) | ((Y) << 0))
+
+/* Updated Version Registers Present */
+#define OR1K_SPR_SYS_VR_UVRP_OFFSET 6
+#define OR1K_SPR_SYS_VR_UVRP_MASK   0x00000040
+#define OR1K_SPR_SYS_VR_UVRP_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_VR_UVRP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Configuration Template */
+#define OR1K_SPR_SYS_VR_CFG_LSB    16
+#define OR1K_SPR_SYS_VR_CFG_MSB    23
+#define OR1K_SPR_SYS_VR_CFG_BITS   8
+#define OR1K_SPR_SYS_VR_CFG_MASK   OR1K_UNSIGNED(0x00ff0000)
+#define OR1K_SPR_SYS_VR_CFG_GET(X) (((X) >> 16) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_VR_CFG_SET(X, Y) (((X) & OR1K_UNSIGNED(0xff00ffff)) | ((Y) << 16))
+
+/* Version */
+#define OR1K_SPR_SYS_VR_VER_LSB    24
+#define OR1K_SPR_SYS_VR_VER_MSB    31
+#define OR1K_SPR_SYS_VR_VER_BITS   8
+#define OR1K_SPR_SYS_VR_VER_MASK   OR1K_UNSIGNED(0xff000000)
+#define OR1K_SPR_SYS_VR_VER_GET(X) (((X) >> 24) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_VR_VER_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00ffffff)) | ((Y) << 24))
+
+
+/* Unit Present Register */
+#define OR1K_SPR_SYS_UPR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_UPR_ADDR  OR1K_UNSIGNED(0x0001)
+
+/* UPR Present */
+#define OR1K_SPR_SYS_UPR_UP_OFFSET 0
+#define OR1K_SPR_SYS_UPR_UP_MASK   0x00000001
+#define OR1K_SPR_SYS_UPR_UP_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_UPR_UP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Data Cache Present */
+#define OR1K_SPR_SYS_UPR_DCP_OFFSET 1
+#define OR1K_SPR_SYS_UPR_DCP_MASK   0x00000002
+#define OR1K_SPR_SYS_UPR_DCP_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_SYS_UPR_DCP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Instruction Cache Present */
+#define OR1K_SPR_SYS_UPR_ICP_OFFSET 2
+#define OR1K_SPR_SYS_UPR_ICP_MASK   0x00000004
+#define OR1K_SPR_SYS_UPR_ICP_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_SYS_UPR_ICP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Data MMU Present */
+#define OR1K_SPR_SYS_UPR_DMP_OFFSET 3
+#define OR1K_SPR_SYS_UPR_DMP_MASK   0x00000008
+#define OR1K_SPR_SYS_UPR_DMP_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_UPR_DMP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Instruction MMU Present */
+#define OR1K_SPR_SYS_UPR_IMP_OFFSET 4
+#define OR1K_SPR_SYS_UPR_IMP_MASK   0x00000010
+#define OR1K_SPR_SYS_UPR_IMP_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_UPR_IMP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* MAC Present */
+#define OR1K_SPR_SYS_UPR_MP_OFFSET 5
+#define OR1K_SPR_SYS_UPR_MP_MASK   0x00000020
+#define OR1K_SPR_SYS_UPR_MP_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_UPR_MP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Debug Unit Present */
+#define OR1K_SPR_SYS_UPR_DUP_OFFSET 6
+#define OR1K_SPR_SYS_UPR_DUP_MASK   0x00000040
+#define OR1K_SPR_SYS_UPR_DUP_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_UPR_DUP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Performance Counters Unit Present */
+#define OR1K_SPR_SYS_UPR_PCUP_OFFSET 7
+#define OR1K_SPR_SYS_UPR_PCUP_MASK   0x00000080
+#define OR1K_SPR_SYS_UPR_PCUP_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_UPR_PCUP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Power Management Present */
+#define OR1K_SPR_SYS_UPR_PICP_OFFSET 8
+#define OR1K_SPR_SYS_UPR_PICP_MASK   0x00000100
+#define OR1K_SPR_SYS_UPR_PICP_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_UPR_PICP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Programmable Interrupt Controller Present */
+#define OR1K_SPR_SYS_UPR_PMP_OFFSET 9
+#define OR1K_SPR_SYS_UPR_PMP_MASK   0x00000200
+#define OR1K_SPR_SYS_UPR_PMP_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_UPR_PMP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Tick Timer Present */
+#define OR1K_SPR_SYS_UPR_TTP_OFFSET 10
+#define OR1K_SPR_SYS_UPR_TTP_MASK   0x00000400
+#define OR1K_SPR_SYS_UPR_TTP_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_UPR_TTP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Custom Units Present */
+#define OR1K_SPR_SYS_UPR_CUP_LSB    24
+#define OR1K_SPR_SYS_UPR_CUP_MSB    31
+#define OR1K_SPR_SYS_UPR_CUP_BITS   8
+#define OR1K_SPR_SYS_UPR_CUP_MASK   OR1K_UNSIGNED(0xff000000)
+#define OR1K_SPR_SYS_UPR_CUP_GET(X) (((X) >> 24) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_UPR_CUP_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00ffffff)) | ((Y) << 24))
+
+
+/* CPU Configuration Register */
+#define OR1K_SPR_SYS_CPUCFGR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_SYS_CPUCFGR_ADDR  OR1K_UNSIGNED(0x0002)
+
+/* Number of Shadow GPR Files */
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_LSB    0
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_MSB    3
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_BITS   4
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_MASK   OR1K_UNSIGNED(0x0000000f)
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_SYS_CPUCFGR_NSGF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff0)) | ((Y) << 0))
+
+/* Custom GPR File */
+#define OR1K_SPR_SYS_CPUCFGR_CGF_OFFSET 4
+#define OR1K_SPR_SYS_CPUCFGR_CGF_MASK   0x00000010
+#define OR1K_SPR_SYS_CPUCFGR_CGF_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_CGF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* ORBIS32 Supported */
+#define OR1K_SPR_SYS_CPUCFGR_OB32S_OFFSET 5
+#define OR1K_SPR_SYS_CPUCFGR_OB32S_MASK   0x00000020
+#define OR1K_SPR_SYS_CPUCFGR_OB32S_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_OB32S_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* ORBIS64 Supported */
+#define OR1K_SPR_SYS_CPUCFGR_OB64S_OFFSET 6
+#define OR1K_SPR_SYS_CPUCFGR_OB64S_MASK   0x00000040
+#define OR1K_SPR_SYS_CPUCFGR_OB64S_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_OB64S_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* ORFPX32 Supported */
+#define OR1K_SPR_SYS_CPUCFGR_OF32S_OFFSET 7
+#define OR1K_SPR_SYS_CPUCFGR_OF32S_MASK   0x00000080
+#define OR1K_SPR_SYS_CPUCFGR_OF32S_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_OF32S_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* ORFPX64 Supported */
+#define OR1K_SPR_SYS_CPUCFGR_OF64S_OFFSET 8
+#define OR1K_SPR_SYS_CPUCFGR_OF64S_MASK   0x00000100
+#define OR1K_SPR_SYS_CPUCFGR_OF64S_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_OF64S_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* ORVDX64 Supported */
+#define OR1K_SPR_SYS_CPUCFGR_OV64S_OFFSET 9
+#define OR1K_SPR_SYS_CPUCFGR_OV64S_MASK   0x00000200
+#define OR1K_SPR_SYS_CPUCFGR_OV64S_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_OV64S_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* No Delay-Slot */
+#define OR1K_SPR_SYS_CPUCFGR_ND_OFFSET 10
+#define OR1K_SPR_SYS_CPUCFGR_ND_MASK   0x00000400
+#define OR1K_SPR_SYS_CPUCFGR_ND_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_ND_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Architecture Version Register Present */
+#define OR1K_SPR_SYS_CPUCFGR_AVRP_OFFSET 11
+#define OR1K_SPR_SYS_CPUCFGR_AVRP_MASK   0x00000800
+#define OR1K_SPR_SYS_CPUCFGR_AVRP_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_AVRP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Exception Vector Base Address Register Present */
+#define OR1K_SPR_SYS_CPUCFGR_EVBARP_OFFSET 12
+#define OR1K_SPR_SYS_CPUCFGR_EVBARP_MASK   0x00001000
+#define OR1K_SPR_SYS_CPUCFGR_EVBARP_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_EVBARP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Implementation-Specific Registers (ISR0-7) Present */
+#define OR1K_SPR_SYS_CPUCFGR_ISRP_OFFSET 13
+#define OR1K_SPR_SYS_CPUCFGR_ISRP_MASK   0x00002000
+#define OR1K_SPR_SYS_CPUCFGR_ISRP_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_ISRP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+/* Arithmetic Exception Control/Status Registers Present */
+#define OR1K_SPR_SYS_CPUCFGR_AECSRP_OFFSET 14
+#define OR1K_SPR_SYS_CPUCFGR_AECSRP_MASK   0x00004000
+#define OR1K_SPR_SYS_CPUCFGR_AECSRP_GET(X) (((X) >> 14) & 0x1)
+#define OR1K_SPR_SYS_CPUCFGR_AECSRP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffbfff)) | ((!!(Y)) << 14))
+
+
+/* Data MMU Configuration Register */
+#define OR1K_SPR_SYS_DMMUCFGR_INDEX OR1K_UNSIGNED(0x003)
+#define OR1K_SPR_SYS_DMMUCFGR_ADDR  OR1K_UNSIGNED(0x0003)
+
+/* Number of TLB Ways */
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_LSB    0
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_MSB    1
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_BITS   2
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_MASK   OR1K_UNSIGNED(0x00000003)
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_SYS_DMMUCFGR_NTW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffc)) | ((Y) << 0))
+
+/* Number of TLB Sets */
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_LSB    2
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_MSB    4
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_BITS   3
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_MASK   OR1K_UNSIGNED(0x0000001c)
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_DMMUCFGR_NTS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffe3)) | ((Y) << 2))
+
+/* Number of ATB Entries */
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_LSB    5
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_MSB    7
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_BITS   3
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_MASK   OR1K_UNSIGNED(0x000000e0)
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_GET(X) (((X) >> 5) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_DMMUCFGR_NAE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff1f)) | ((Y) << 5))
+
+/* Control Register Implemented */
+#define OR1K_SPR_SYS_DMMUCFGR_CRI_OFFSET 8
+#define OR1K_SPR_SYS_DMMUCFGR_CRI_MASK   0x00000100
+#define OR1K_SPR_SYS_DMMUCFGR_CRI_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_DMMUCFGR_CRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Protection Register Implemented */
+#define OR1K_SPR_SYS_DMMUCFGR_PRI_OFFSET 9
+#define OR1K_SPR_SYS_DMMUCFGR_PRI_MASK   0x00000200
+#define OR1K_SPR_SYS_DMMUCFGR_PRI_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_DMMUCFGR_PRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* TLB Entry Invalidate Register Implemented */
+#define OR1K_SPR_SYS_DMMUCFGR_TEIRI_OFFSET 10
+#define OR1K_SPR_SYS_DMMUCFGR_TEIRI_MASK   0x00000400
+#define OR1K_SPR_SYS_DMMUCFGR_TEIRI_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_DMMUCFGR_TEIRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Hardware TLB Reload */
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_OFFSET 11
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_MASK   0x00000800
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* DTLB reloaded in software */
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_SW 0
+/* DTLB reloaded in hardware */
+#define OR1K_SPR_SYS_DMMUCFGR_HTR_HW 1
+
+/* Instruction MMU Configuration Register */
+#define OR1K_SPR_SYS_IMMUCFGR_INDEX OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_SYS_IMMUCFGR_ADDR  OR1K_UNSIGNED(0x0004)
+
+/* Number of TLB Ways */
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_LSB    0
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_MSB    1
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_BITS   2
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_MASK   OR1K_UNSIGNED(0x00000003)
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_SYS_IMMUCFGR_NTW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffc)) | ((Y) << 0))
+
+/* Number of TLB Sets */
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_LSB    2
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_MSB    4
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_BITS   3
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_MASK   OR1K_UNSIGNED(0x0000001c)
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_IMMUCFGR_NTS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffe3)) | ((Y) << 2))
+
+/* Number of ATB Entries */
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_LSB    5
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_MSB    7
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_BITS   3
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_MASK   OR1K_UNSIGNED(0x000000e0)
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_GET(X) (((X) >> 5) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_IMMUCFGR_NAE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff1f)) | ((Y) << 5))
+
+/* Control Register Implemented */
+#define OR1K_SPR_SYS_IMMUCFGR_CRI_OFFSET 8
+#define OR1K_SPR_SYS_IMMUCFGR_CRI_MASK   0x00000100
+#define OR1K_SPR_SYS_IMMUCFGR_CRI_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_IMMUCFGR_CRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Protection Register Implemented */
+#define OR1K_SPR_SYS_IMMUCFGR_PRI_OFFSET 9
+#define OR1K_SPR_SYS_IMMUCFGR_PRI_MASK   0x00000200
+#define OR1K_SPR_SYS_IMMUCFGR_PRI_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_IMMUCFGR_PRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* TLB Entry Invalidate Register Implemented */
+#define OR1K_SPR_SYS_IMMUCFGR_TEIRI_OFFSET 10
+#define OR1K_SPR_SYS_IMMUCFGR_TEIRI_MASK   0x00000400
+#define OR1K_SPR_SYS_IMMUCFGR_TEIRI_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_IMMUCFGR_TEIRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Hardware TLB Reload */
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_OFFSET 11
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_MASK   0x00000800
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* DTLB reloaded in software */
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_SW 0
+/* DTLB reloaded in hardware */
+#define OR1K_SPR_SYS_IMMUCFGR_HTR_HW 1
+
+/* Data Cache Configuration Register */
+#define OR1K_SPR_SYS_DCCFGR_INDEX OR1K_UNSIGNED(0x005)
+#define OR1K_SPR_SYS_DCCFGR_ADDR  OR1K_UNSIGNED(0x0005)
+
+/* Number of Cache Ways */
+#define OR1K_SPR_SYS_DCCFGR_NCW_LSB    0
+#define OR1K_SPR_SYS_DCCFGR_NCW_MSB    2
+#define OR1K_SPR_SYS_DCCFGR_NCW_BITS   3
+#define OR1K_SPR_SYS_DCCFGR_NCW_MASK   OR1K_UNSIGNED(0x00000007)
+#define OR1K_SPR_SYS_DCCFGR_NCW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_DCCFGR_NCW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff8)) | ((Y) << 0))
+
+/* Number of Cache Sets */
+#define OR1K_SPR_SYS_DCCFGR_NCS_LSB    3
+#define OR1K_SPR_SYS_DCCFGR_NCS_MSB    6
+#define OR1K_SPR_SYS_DCCFGR_NCS_BITS   4
+#define OR1K_SPR_SYS_DCCFGR_NCS_MASK   OR1K_UNSIGNED(0x00000078)
+#define OR1K_SPR_SYS_DCCFGR_NCS_GET(X) (((X) >> 3) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_SYS_DCCFGR_NCS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff87)) | ((Y) << 3))
+
+/* Cache Block Size */
+#define OR1K_SPR_SYS_DCCFGR_CBS_OFFSET 7
+#define OR1K_SPR_SYS_DCCFGR_CBS_MASK   0x00000080
+#define OR1K_SPR_SYS_DCCFGR_CBS_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* 16 Bytes */
+#define OR1K_SPR_SYS_DCCFGR_CBS_16 0
+/* 32 Bytes */
+#define OR1K_SPR_SYS_DCCFGR_CBS_32 1
+/* Cache Write Strategy */
+#define OR1K_SPR_SYS_DCCFGR_CWS_OFFSET 8
+#define OR1K_SPR_SYS_DCCFGR_CWS_MASK   0x00000100
+#define OR1K_SPR_SYS_DCCFGR_CWS_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CWS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Write Through */
+#define OR1K_SPR_SYS_DCCFGR_CWS_WT 0
+/* Write Back */
+#define OR1K_SPR_SYS_DCCFGR_CWS_WB 1
+/* Cache Control Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CCRI_OFFSET 9
+#define OR1K_SPR_SYS_DCCFGR_CCRI_MASK   0x00000200
+#define OR1K_SPR_SYS_DCCFGR_CCRI_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CCRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Cache Block Invalidate Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CBIRI_OFFSET 10
+#define OR1K_SPR_SYS_DCCFGR_CBIRI_MASK   0x00000400
+#define OR1K_SPR_SYS_DCCFGR_CBIRI_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBIRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Cache Block Prefetch Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CBPRI_OFFSET 11
+#define OR1K_SPR_SYS_DCCFGR_CBPRI_MASK   0x00000800
+#define OR1K_SPR_SYS_DCCFGR_CBPRI_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBPRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Cache Block Lock Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CBLRI_OFFSET 12
+#define OR1K_SPR_SYS_DCCFGR_CBLRI_MASK   0x00001000
+#define OR1K_SPR_SYS_DCCFGR_CBLRI_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBLRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Cache Block Flush Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CBFRI_OFFSET 13
+#define OR1K_SPR_SYS_DCCFGR_CBFRI_MASK   0x00002000
+#define OR1K_SPR_SYS_DCCFGR_CBFRI_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBFRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+/* Cache Block Write-back Register Implemented */
+#define OR1K_SPR_SYS_DCCFGR_CBWBRI_OFFSET 14
+#define OR1K_SPR_SYS_DCCFGR_CBWBRI_MASK   0x00004000
+#define OR1K_SPR_SYS_DCCFGR_CBWBRI_GET(X) (((X) >> 14) & 0x1)
+#define OR1K_SPR_SYS_DCCFGR_CBWBRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffbfff)) | ((!!(Y)) << 14))
+
+
+/* Instruction Cache Configuration Register */
+#define OR1K_SPR_SYS_ICCFGR_INDEX OR1K_UNSIGNED(0x006)
+#define OR1K_SPR_SYS_ICCFGR_ADDR  OR1K_UNSIGNED(0x0006)
+
+/* Number of Cache Ways */
+#define OR1K_SPR_SYS_ICCFGR_NCW_LSB    0
+#define OR1K_SPR_SYS_ICCFGR_NCW_MSB    2
+#define OR1K_SPR_SYS_ICCFGR_NCW_BITS   3
+#define OR1K_SPR_SYS_ICCFGR_NCW_MASK   OR1K_UNSIGNED(0x00000007)
+#define OR1K_SPR_SYS_ICCFGR_NCW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_ICCFGR_NCW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff8)) | ((Y) << 0))
+
+/* Number of Cache Sets */
+#define OR1K_SPR_SYS_ICCFGR_NCS_LSB    3
+#define OR1K_SPR_SYS_ICCFGR_NCS_MSB    6
+#define OR1K_SPR_SYS_ICCFGR_NCS_BITS   4
+#define OR1K_SPR_SYS_ICCFGR_NCS_MASK   OR1K_UNSIGNED(0x00000078)
+#define OR1K_SPR_SYS_ICCFGR_NCS_GET(X) (((X) >> 3) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_SYS_ICCFGR_NCS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff87)) | ((Y) << 3))
+
+/* Cache Block Size */
+#define OR1K_SPR_SYS_ICCFGR_CBS_OFFSET 7
+#define OR1K_SPR_SYS_ICCFGR_CBS_MASK   0x00000080
+#define OR1K_SPR_SYS_ICCFGR_CBS_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_ICCFGR_CBS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* 16 Bytes */
+#define OR1K_SPR_SYS_ICCFGR_CBS_16 0
+/* 32 Bytes */
+#define OR1K_SPR_SYS_ICCFGR_CBS_32 1
+/* Cache Control Register Implemented */
+#define OR1K_SPR_SYS_ICCFGR_CCRI_OFFSET 9
+#define OR1K_SPR_SYS_ICCFGR_CCRI_MASK   0x00000200
+#define OR1K_SPR_SYS_ICCFGR_CCRI_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_ICCFGR_CCRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Cache Block Invalidate Register Implemented */
+#define OR1K_SPR_SYS_ICCFGR_CBIRI_OFFSET 10
+#define OR1K_SPR_SYS_ICCFGR_CBIRI_MASK   0x00000400
+#define OR1K_SPR_SYS_ICCFGR_CBIRI_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_ICCFGR_CBIRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Cache Block Prefetch Register Implemented */
+#define OR1K_SPR_SYS_ICCFGR_CBPRI_OFFSET 11
+#define OR1K_SPR_SYS_ICCFGR_CBPRI_MASK   0x00000800
+#define OR1K_SPR_SYS_ICCFGR_CBPRI_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_ICCFGR_CBPRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Cache Block Lock Register Implemented */
+#define OR1K_SPR_SYS_ICCFGR_CBLRI_OFFSET 12
+#define OR1K_SPR_SYS_ICCFGR_CBLRI_MASK   0x00001000
+#define OR1K_SPR_SYS_ICCFGR_CBLRI_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_SYS_ICCFGR_CBLRI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+
+/* Debug Configuration Register */
+#define OR1K_SPR_SYS_DCFGR_INDEX OR1K_UNSIGNED(0x007)
+#define OR1K_SPR_SYS_DCFGR_ADDR  OR1K_UNSIGNED(0x0007)
+
+/* Number of Debug Pairs */
+#define OR1K_SPR_SYS_DCFGR_NDP_LSB    0
+#define OR1K_SPR_SYS_DCFGR_NDP_MSB    2
+#define OR1K_SPR_SYS_DCFGR_NDP_BITS   3
+#define OR1K_SPR_SYS_DCFGR_NDP_MASK   OR1K_UNSIGNED(0x00000007)
+#define OR1K_SPR_SYS_DCFGR_NDP_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_DCFGR_NDP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff8)) | ((Y) << 0))
+
+/* Watchpoint Counters Implemented */
+#define OR1K_SPR_SYS_DCFGR_WPCI_OFFSET 3
+#define OR1K_SPR_SYS_DCFGR_WPCI_MASK   0x00000008
+#define OR1K_SPR_SYS_DCFGR_WPCI_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_DCFGR_WPCI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+
+/* Performance Counters Configuration */
+#define OR1K_SPR_SYS_PCCFGR_INDEX OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_SYS_PCCFGR_ADDR  OR1K_UNSIGNED(0x0008)
+
+/* Number of Performance Counters */
+#define OR1K_SPR_SYS_PCCFGR_NPC_LSB    0
+#define OR1K_SPR_SYS_PCCFGR_NPC_MSB    2
+#define OR1K_SPR_SYS_PCCFGR_NPC_BITS   3
+#define OR1K_SPR_SYS_PCCFGR_NPC_MASK   OR1K_UNSIGNED(0x00000007)
+#define OR1K_SPR_SYS_PCCFGR_NPC_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_SYS_PCCFGR_NPC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff8)) | ((Y) << 0))
+
+
+/* Version Register 2 */
+#define OR1K_SPR_SYS_VR2_INDEX OR1K_UNSIGNED(0x009)
+#define OR1K_SPR_SYS_VR2_ADDR  OR1K_UNSIGNED(0x0009)
+
+/* Version */
+#define OR1K_SPR_SYS_VR2_VER_LSB    0
+#define OR1K_SPR_SYS_VR2_VER_MSB    23
+#define OR1K_SPR_SYS_VR2_VER_BITS   24
+#define OR1K_SPR_SYS_VR2_VER_MASK   OR1K_UNSIGNED(0x00ffffff)
+#define OR1K_SPR_SYS_VR2_VER_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00ffffff))
+#define OR1K_SPR_SYS_VR2_VER_SET(X, Y) (((X) & OR1K_UNSIGNED(0xff000000)) | ((Y) << 0))
+
+/* CPU Identification Number */
+#define OR1K_SPR_SYS_VR2_CPUID_LSB    24
+#define OR1K_SPR_SYS_VR2_CPUID_MSB    31
+#define OR1K_SPR_SYS_VR2_CPUID_BITS   8
+#define OR1K_SPR_SYS_VR2_CPUID_MASK   OR1K_UNSIGNED(0xff000000)
+#define OR1K_SPR_SYS_VR2_CPUID_GET(X) (((X) >> 24) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_VR2_CPUID_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00ffffff)) | ((Y) << 24))
+
+
+/* Architecture Version Register */
+#define OR1K_SPR_SYS_AVR_INDEX OR1K_UNSIGNED(0x00a)
+#define OR1K_SPR_SYS_AVR_ADDR  OR1K_UNSIGNED(0x000a)
+
+/* Major Architecture Version Number */
+#define OR1K_SPR_SYS_AVR_REV_LSB    8
+#define OR1K_SPR_SYS_AVR_REV_MSB    15
+#define OR1K_SPR_SYS_AVR_REV_BITS   8
+#define OR1K_SPR_SYS_AVR_REV_MASK   OR1K_UNSIGNED(0x0000ff00)
+#define OR1K_SPR_SYS_AVR_REV_GET(X) (((X) >> 8) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_AVR_REV_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffff00ff)) | ((Y) << 8))
+
+/* Minor Architecture Version Number */
+#define OR1K_SPR_SYS_AVR_MIN_LSB    16
+#define OR1K_SPR_SYS_AVR_MIN_MSB    23
+#define OR1K_SPR_SYS_AVR_MIN_BITS   8
+#define OR1K_SPR_SYS_AVR_MIN_MASK   OR1K_UNSIGNED(0x00ff0000)
+#define OR1K_SPR_SYS_AVR_MIN_GET(X) (((X) >> 16) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_AVR_MIN_SET(X, Y) (((X) & OR1K_UNSIGNED(0xff00ffff)) | ((Y) << 16))
+
+/* Architecture Revision Number */
+#define OR1K_SPR_SYS_AVR_MAJ_LSB    24
+#define OR1K_SPR_SYS_AVR_MAJ_MSB    31
+#define OR1K_SPR_SYS_AVR_MAJ_BITS   8
+#define OR1K_SPR_SYS_AVR_MAJ_MASK   OR1K_UNSIGNED(0xff000000)
+#define OR1K_SPR_SYS_AVR_MAJ_GET(X) (((X) >> 24) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_SYS_AVR_MAJ_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00ffffff)) | ((Y) << 24))
+
+
+/* Exception Vector Base Address Register */
+#define OR1K_SPR_SYS_EVBAR_INDEX OR1K_UNSIGNED(0x00b)
+#define OR1K_SPR_SYS_EVBAR_ADDR  OR1K_UNSIGNED(0x000b)
+
+/* Exception Vector Base Address */
+#define OR1K_SPR_SYS_EVBAR_EVBA_LSB    13
+#define OR1K_SPR_SYS_EVBAR_EVBA_MSB    31
+#define OR1K_SPR_SYS_EVBAR_EVBA_BITS   19
+#define OR1K_SPR_SYS_EVBAR_EVBA_MASK   OR1K_UNSIGNED(0xffffe000)
+#define OR1K_SPR_SYS_EVBAR_EVBA_GET(X) (((X) >> 13) & OR1K_UNSIGNED(0x0007ffff))
+#define OR1K_SPR_SYS_EVBAR_EVBA_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00001fff)) | ((Y) << 13))
+
+
+/* Arithmetic Exception Control Register */
+#define OR1K_SPR_SYS_AECR_INDEX OR1K_UNSIGNED(0x00c)
+#define OR1K_SPR_SYS_AECR_ADDR  OR1K_UNSIGNED(0x000c)
+
+/* Carry on Add Exception Enabled */
+#define OR1K_SPR_SYS_AECR_CYADDE_OFFSET 0
+#define OR1K_SPR_SYS_AECR_CYADDE_MASK   0x00000001
+#define OR1K_SPR_SYS_AECR_CYADDE_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_AECR_CYADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Overflow on Add Exception Enabled */
+#define OR1K_SPR_SYS_AECR_OVADDE_OFFSET 1
+#define OR1K_SPR_SYS_AECR_OVADDE_MASK   0x00000002
+#define OR1K_SPR_SYS_AECR_OVADDE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_SYS_AECR_OVADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Carry on Multiply Exception Enabled */
+#define OR1K_SPR_SYS_AECR_CYMULE_OFFSET 2
+#define OR1K_SPR_SYS_AECR_CYMULE_MASK   0x00000004
+#define OR1K_SPR_SYS_AECR_CYMULE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_SYS_AECR_CYMULE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Overflow on Multiply Exception Enabled */
+#define OR1K_SPR_SYS_AECR_OVMULE_OFFSET 3
+#define OR1K_SPR_SYS_AECR_OVMULE_MASK   0x00000008
+#define OR1K_SPR_SYS_AECR_OVMULE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_AECR_OVMULE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Divide by Zero Exception Enabled */
+#define OR1K_SPR_SYS_AECR_DBZE_OFFSET 4
+#define OR1K_SPR_SYS_AECR_DBZE_MASK   0x00000010
+#define OR1K_SPR_SYS_AECR_DBZE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_AECR_DBZE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Carry on MAC Addition Exception Enabled */
+#define OR1K_SPR_SYS_AECR_CYMACADDE_OFFSET 5
+#define OR1K_SPR_SYS_AECR_CYMACADDE_MASK   0x00000020
+#define OR1K_SPR_SYS_AECR_CYMACADDE_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_AECR_CYMACADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Overflow on MAC Addition Exception Enabled */
+#define OR1K_SPR_SYS_AECR_OVMACADDE_OFFSET 6
+#define OR1K_SPR_SYS_AECR_OVMACADDE_MASK   0x00000040
+#define OR1K_SPR_SYS_AECR_OVMACADDE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_AECR_OVMACADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+
+/* Arithmetic Exception Status Register */
+#define OR1K_SPR_SYS_AESR_INDEX OR1K_UNSIGNED(0x00d)
+#define OR1K_SPR_SYS_AESR_ADDR  OR1K_UNSIGNED(0x000d)
+
+/* Carry on Add Exception */
+#define OR1K_SPR_SYS_AESR_CYADDE_OFFSET 0
+#define OR1K_SPR_SYS_AESR_CYADDE_MASK   0x00000001
+#define OR1K_SPR_SYS_AESR_CYADDE_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_AESR_CYADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Overflow on Add Exception */
+#define OR1K_SPR_SYS_AESR_OVADDE_OFFSET 1
+#define OR1K_SPR_SYS_AESR_OVADDE_MASK   0x00000002
+#define OR1K_SPR_SYS_AESR_OVADDE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_SYS_AESR_OVADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Carry on Multiply Exception */
+#define OR1K_SPR_SYS_AESR_CYMULE_OFFSET 2
+#define OR1K_SPR_SYS_AESR_CYMULE_MASK   0x00000004
+#define OR1K_SPR_SYS_AESR_CYMULE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_SYS_AESR_CYMULE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Overflow on Multiply Exception */
+#define OR1K_SPR_SYS_AESR_OVMULE_OFFSET 3
+#define OR1K_SPR_SYS_AESR_OVMULE_MASK   0x00000008
+#define OR1K_SPR_SYS_AESR_OVMULE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_AESR_OVMULE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Divide by Zero Exception */
+#define OR1K_SPR_SYS_AESR_DBZE_OFFSET 4
+#define OR1K_SPR_SYS_AESR_DBZE_MASK   0x00000010
+#define OR1K_SPR_SYS_AESR_DBZE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_AESR_DBZE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Carry on MAC Addition Exception */
+#define OR1K_SPR_SYS_AESR_CYMACADDE_OFFSET 5
+#define OR1K_SPR_SYS_AESR_CYMACADDE_MASK   0x00000020
+#define OR1K_SPR_SYS_AESR_CYMACADDE_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_AESR_CYMACADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Overflow on MAC Addition Exception */
+#define OR1K_SPR_SYS_AESR_OVMACADDE_OFFSET 6
+#define OR1K_SPR_SYS_AESR_OVMACADDE_MASK   0x00000040
+#define OR1K_SPR_SYS_AESR_OVMACADDE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_AESR_OVMACADDE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+
+/* Next Program Counter */
+#define OR1K_SPR_SYS_NPC_INDEX OR1K_UNSIGNED(0x010)
+#define OR1K_SPR_SYS_NPC_ADDR  OR1K_UNSIGNED(0x0010)
+
+
+/* Supervision Register */
+#define OR1K_SPR_SYS_SR_INDEX OR1K_UNSIGNED(0x011)
+#define OR1K_SPR_SYS_SR_ADDR  OR1K_UNSIGNED(0x0011)
+
+/* Supervisor Mode */
+#define OR1K_SPR_SYS_SR_SM_OFFSET 0
+#define OR1K_SPR_SYS_SR_SM_MASK   0x00000001
+#define OR1K_SPR_SYS_SR_SM_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_SR_SM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Tick Timer Exception Enabled */
+#define OR1K_SPR_SYS_SR_TEE_OFFSET 1
+#define OR1K_SPR_SYS_SR_TEE_MASK   0x00000002
+#define OR1K_SPR_SYS_SR_TEE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_SYS_SR_TEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Interrupt Exception Enabled */
+#define OR1K_SPR_SYS_SR_IEE_OFFSET 2
+#define OR1K_SPR_SYS_SR_IEE_MASK   0x00000004
+#define OR1K_SPR_SYS_SR_IEE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_SYS_SR_IEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Data Cache Enabled */
+#define OR1K_SPR_SYS_SR_DCE_OFFSET 3
+#define OR1K_SPR_SYS_SR_DCE_MASK   0x00000008
+#define OR1K_SPR_SYS_SR_DCE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_SR_DCE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Instruction Cache Enabled */
+#define OR1K_SPR_SYS_SR_ICE_OFFSET 4
+#define OR1K_SPR_SYS_SR_ICE_MASK   0x00000010
+#define OR1K_SPR_SYS_SR_ICE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_SR_ICE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Data MMU Enabled */
+#define OR1K_SPR_SYS_SR_DME_OFFSET 5
+#define OR1K_SPR_SYS_SR_DME_MASK   0x00000020
+#define OR1K_SPR_SYS_SR_DME_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_SR_DME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Instruction MMU Enabled */
+#define OR1K_SPR_SYS_SR_IME_OFFSET 6
+#define OR1K_SPR_SYS_SR_IME_MASK   0x00000040
+#define OR1K_SPR_SYS_SR_IME_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_SR_IME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Little Endian Enabled */
+#define OR1K_SPR_SYS_SR_LEE_OFFSET 7
+#define OR1K_SPR_SYS_SR_LEE_MASK   0x00000080
+#define OR1K_SPR_SYS_SR_LEE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_SR_LEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* CID Enable */
+#define OR1K_SPR_SYS_SR_CE_OFFSET 8
+#define OR1K_SPR_SYS_SR_CE_MASK   0x00000100
+#define OR1K_SPR_SYS_SR_CE_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_SR_CE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Flag */
+#define OR1K_SPR_SYS_SR_F_OFFSET 9
+#define OR1K_SPR_SYS_SR_F_MASK   0x00000200
+#define OR1K_SPR_SYS_SR_F_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_SR_F_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Carry */
+#define OR1K_SPR_SYS_SR_CY_OFFSET 10
+#define OR1K_SPR_SYS_SR_CY_MASK   0x00000400
+#define OR1K_SPR_SYS_SR_CY_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_SR_CY_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Overflow */
+#define OR1K_SPR_SYS_SR_OV_OFFSET 11
+#define OR1K_SPR_SYS_SR_OV_MASK   0x00000800
+#define OR1K_SPR_SYS_SR_OV_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_SR_OV_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Overflow Exception Enabled */
+#define OR1K_SPR_SYS_SR_OVE_OFFSET 12
+#define OR1K_SPR_SYS_SR_OVE_MASK   0x00001000
+#define OR1K_SPR_SYS_SR_OVE_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_SYS_SR_OVE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Delay-slot Exception */
+#define OR1K_SPR_SYS_SR_DSX_OFFSET 13
+#define OR1K_SPR_SYS_SR_DSX_MASK   0x00002000
+#define OR1K_SPR_SYS_SR_DSX_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_SYS_SR_DSX_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+/* Exception Prefix High */
+#define OR1K_SPR_SYS_SR_EPH_OFFSET 14
+#define OR1K_SPR_SYS_SR_EPH_MASK   0x00004000
+#define OR1K_SPR_SYS_SR_EPH_GET(X) (((X) >> 14) & 0x1)
+#define OR1K_SPR_SYS_SR_EPH_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffbfff)) | ((!!(Y)) << 14))
+
+/* Fixed One */
+#define OR1K_SPR_SYS_SR_FO_OFFSET 15
+#define OR1K_SPR_SYS_SR_FO_MASK   0x00008000
+#define OR1K_SPR_SYS_SR_FO_GET(X) (((X) >> 15) & 0x1)
+#define OR1K_SPR_SYS_SR_FO_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffff7fff)) | ((!!(Y)) << 15))
+
+/* SPR User Mode Read Access */
+#define OR1K_SPR_SYS_SR_SUMRA_OFFSET 16
+#define OR1K_SPR_SYS_SR_SUMRA_MASK   0x00010000
+#define OR1K_SPR_SYS_SR_SUMRA_GET(X) (((X) >> 16) & 0x1)
+#define OR1K_SPR_SYS_SR_SUMRA_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffeffff)) | ((!!(Y)) << 16))
+
+/* Context ID */
+#define OR1K_SPR_SYS_SR_CID_LSB    28
+#define OR1K_SPR_SYS_SR_CID_MSB    31
+#define OR1K_SPR_SYS_SR_CID_BITS   4
+#define OR1K_SPR_SYS_SR_CID_MASK   OR1K_UNSIGNED(0xf0000000)
+#define OR1K_SPR_SYS_SR_CID_GET(X) (((X) >> 28) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_SYS_SR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0x0fffffff)) | ((Y) << 28))
+
+
+/* Previous Program Counter */
+#define OR1K_SPR_SYS_PPC_INDEX OR1K_UNSIGNED(0x012)
+#define OR1K_SPR_SYS_PPC_ADDR  OR1K_UNSIGNED(0x0012)
+
+
+/* Floating Point Control Status Register */
+#define OR1K_SPR_SYS_FPCSR_INDEX OR1K_UNSIGNED(0x014)
+#define OR1K_SPR_SYS_FPCSR_ADDR  OR1K_UNSIGNED(0x0014)
+
+/* Floating Point Exception Enabled */
+#define OR1K_SPR_SYS_FPCSR_FPEE_OFFSET 0
+#define OR1K_SPR_SYS_FPCSR_FPEE_MASK   0x00000001
+#define OR1K_SPR_SYS_FPCSR_FPEE_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_FPEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Rounding Mode */
+#define OR1K_SPR_SYS_FPCSR_RM_LSB    1
+#define OR1K_SPR_SYS_FPCSR_RM_MSB    2
+#define OR1K_SPR_SYS_FPCSR_RM_BITS   2
+#define OR1K_SPR_SYS_FPCSR_RM_MASK   OR1K_UNSIGNED(0x00000006)
+#define OR1K_SPR_SYS_FPCSR_RM_GET(X) (((X) >> 1) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_SYS_FPCSR_RM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff9)) | ((Y) << 1))
+
+/* Round to nearest */
+#define OR1K_SPR_SYS_FPCSR_RM_NEAREST 0
+/* Round to zero */
+#define OR1K_SPR_SYS_FPCSR_RM_ZERO 1
+/* Round to infinity+ */
+#define OR1K_SPR_SYS_FPCSR_RM_INFPLUS 2
+/* Round to infinity- */
+#define OR1K_SPR_SYS_FPCSR_RM_INFMINUS 3
+/* Overflow Flag */
+#define OR1K_SPR_SYS_FPCSR_OVF_OFFSET 3
+#define OR1K_SPR_SYS_FPCSR_OVF_MASK   0x00000008
+#define OR1K_SPR_SYS_FPCSR_OVF_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_OVF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Underflow Flag */
+#define OR1K_SPR_SYS_FPCSR_UNF_OFFSET 4
+#define OR1K_SPR_SYS_FPCSR_UNF_MASK   0x00000010
+#define OR1K_SPR_SYS_FPCSR_UNF_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_UNF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* SNAN Flag */
+#define OR1K_SPR_SYS_FPCSR_SNF_OFFSET 5
+#define OR1K_SPR_SYS_FPCSR_SNF_MASK   0x00000020
+#define OR1K_SPR_SYS_FPCSR_SNF_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_SNF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* QNAN Flag */
+#define OR1K_SPR_SYS_FPCSR_QNF_OFFSET 6
+#define OR1K_SPR_SYS_FPCSR_QNF_MASK   0x00000040
+#define OR1K_SPR_SYS_FPCSR_QNF_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_QNF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Zero Flag */
+#define OR1K_SPR_SYS_FPCSR_ZF_OFFSET 7
+#define OR1K_SPR_SYS_FPCSR_ZF_MASK   0x00000080
+#define OR1K_SPR_SYS_FPCSR_ZF_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_ZF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Inexact Flag */
+#define OR1K_SPR_SYS_FPCSR_IXF_OFFSET 8
+#define OR1K_SPR_SYS_FPCSR_IXF_MASK   0x00000100
+#define OR1K_SPR_SYS_FPCSR_IXF_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_IXF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Invalid Flag */
+#define OR1K_SPR_SYS_FPCSR_IVF_OFFSET 9
+#define OR1K_SPR_SYS_FPCSR_IVF_MASK   0x00000200
+#define OR1K_SPR_SYS_FPCSR_IVF_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_IVF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Infinity Flag */
+#define OR1K_SPR_SYS_FPCSR_INF_OFFSET 10
+#define OR1K_SPR_SYS_FPCSR_INF_MASK   0x00000400
+#define OR1K_SPR_SYS_FPCSR_INF_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_INF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Divide by Zero Flag */
+#define OR1K_SPR_SYS_FPCSR_DZF_OFFSET 11
+#define OR1K_SPR_SYS_FPCSR_DZF_MASK   0x00000800
+#define OR1K_SPR_SYS_FPCSR_DZF_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_FPCSR_DZF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+
+/* Implementation-specific Registers */
+#define OR1K_SPR_SYS_ISR_BASE     OR1K_UNSIGNED(0x015)
+#define OR1K_SPR_SYS_ISR_COUNT    OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_SYS_ISR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_ISR_INDEX(N) (OR1K_SPR_SYS_ISR_BASE + ((N) * OR1K_SPR_SYS_ISR_STEP))
+#define OR1K_SPR_SYS_ISR_ADDR(N)  ((OR1K_SPR_SYS_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_SYS_ISR_INDEX(N))
+
+
+/* Exception PC Registers */
+#define OR1K_SPR_SYS_EPCR_BASE     OR1K_UNSIGNED(0x020)
+#define OR1K_SPR_SYS_EPCR_COUNT    OR1K_UNSIGNED(0x010)
+#define OR1K_SPR_SYS_EPCR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_EPCR_INDEX(N) (OR1K_SPR_SYS_EPCR_BASE + ((N) * OR1K_SPR_SYS_EPCR_STEP))
+#define OR1K_SPR_SYS_EPCR_ADDR(N)  ((OR1K_SPR_SYS_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_SYS_EPCR_INDEX(N))
+
+
+/* Exception Effective Address Registers */
+#define OR1K_SPR_SYS_EEAR_BASE     OR1K_UNSIGNED(0x030)
+#define OR1K_SPR_SYS_EEAR_COUNT    OR1K_UNSIGNED(0x010)
+#define OR1K_SPR_SYS_EEAR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_EEAR_INDEX(N) (OR1K_SPR_SYS_EEAR_BASE + ((N) * OR1K_SPR_SYS_EEAR_STEP))
+#define OR1K_SPR_SYS_EEAR_ADDR(N)  ((OR1K_SPR_SYS_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_SYS_EEAR_INDEX(N))
+
+
+/* Exception Supervision Registers */
+#define OR1K_SPR_SYS_ESR_BASE     OR1K_UNSIGNED(0x040)
+#define OR1K_SPR_SYS_ESR_COUNT    OR1K_UNSIGNED(0x010)
+#define OR1K_SPR_SYS_ESR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_ESR_INDEX(N) (OR1K_SPR_SYS_ESR_BASE + ((N) * OR1K_SPR_SYS_ESR_STEP))
+#define OR1K_SPR_SYS_ESR_ADDR(N)  ((OR1K_SPR_SYS_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_SYS_ESR_INDEX(N))
+
+/* Supervisor Mode */
+#define OR1K_SPR_SYS_ESR_SM_OFFSET 0
+#define OR1K_SPR_SYS_ESR_SM_MASK   0x00000001
+#define OR1K_SPR_SYS_ESR_SM_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_SYS_ESR_SM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Tick Timer Exception Enabled */
+#define OR1K_SPR_SYS_ESR_TEE_OFFSET 1
+#define OR1K_SPR_SYS_ESR_TEE_MASK   0x00000002
+#define OR1K_SPR_SYS_ESR_TEE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_SYS_ESR_TEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Interrupt Exception Enabled */
+#define OR1K_SPR_SYS_ESR_IEE_OFFSET 2
+#define OR1K_SPR_SYS_ESR_IEE_MASK   0x00000004
+#define OR1K_SPR_SYS_ESR_IEE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_SYS_ESR_IEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Data Cache Enabled */
+#define OR1K_SPR_SYS_ESR_DCE_OFFSET 3
+#define OR1K_SPR_SYS_ESR_DCE_MASK   0x00000008
+#define OR1K_SPR_SYS_ESR_DCE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_SYS_ESR_DCE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Instruction Cache Enabled */
+#define OR1K_SPR_SYS_ESR_ICE_OFFSET 4
+#define OR1K_SPR_SYS_ESR_ICE_MASK   0x00000010
+#define OR1K_SPR_SYS_ESR_ICE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_SYS_ESR_ICE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Data MMU Enabled */
+#define OR1K_SPR_SYS_ESR_DME_OFFSET 5
+#define OR1K_SPR_SYS_ESR_DME_MASK   0x00000020
+#define OR1K_SPR_SYS_ESR_DME_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_SYS_ESR_DME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Instruction MMU Enabled */
+#define OR1K_SPR_SYS_ESR_IME_OFFSET 6
+#define OR1K_SPR_SYS_ESR_IME_MASK   0x00000040
+#define OR1K_SPR_SYS_ESR_IME_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_SYS_ESR_IME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Little Endian Enabled */
+#define OR1K_SPR_SYS_ESR_LEE_OFFSET 7
+#define OR1K_SPR_SYS_ESR_LEE_MASK   0x00000080
+#define OR1K_SPR_SYS_ESR_LEE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_SYS_ESR_LEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* CID Enable */
+#define OR1K_SPR_SYS_ESR_CE_OFFSET 8
+#define OR1K_SPR_SYS_ESR_CE_MASK   0x00000100
+#define OR1K_SPR_SYS_ESR_CE_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_SYS_ESR_CE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Flag */
+#define OR1K_SPR_SYS_ESR_F_OFFSET 9
+#define OR1K_SPR_SYS_ESR_F_MASK   0x00000200
+#define OR1K_SPR_SYS_ESR_F_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_SYS_ESR_F_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Carry */
+#define OR1K_SPR_SYS_ESR_CY_OFFSET 10
+#define OR1K_SPR_SYS_ESR_CY_MASK   0x00000400
+#define OR1K_SPR_SYS_ESR_CY_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_SYS_ESR_CY_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* Overflow */
+#define OR1K_SPR_SYS_ESR_OV_OFFSET 11
+#define OR1K_SPR_SYS_ESR_OV_MASK   0x00000800
+#define OR1K_SPR_SYS_ESR_OV_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_SYS_ESR_OV_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Overflow Exception Enabled */
+#define OR1K_SPR_SYS_ESR_OVE_OFFSET 12
+#define OR1K_SPR_SYS_ESR_OVE_MASK   0x00001000
+#define OR1K_SPR_SYS_ESR_OVE_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_SYS_ESR_OVE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Delay-slot Exception */
+#define OR1K_SPR_SYS_ESR_DSX_OFFSET 13
+#define OR1K_SPR_SYS_ESR_DSX_MASK   0x00002000
+#define OR1K_SPR_SYS_ESR_DSX_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_SYS_ESR_DSX_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+/* Exception Prefix High */
+#define OR1K_SPR_SYS_ESR_EPH_OFFSET 14
+#define OR1K_SPR_SYS_ESR_EPH_MASK   0x00004000
+#define OR1K_SPR_SYS_ESR_EPH_GET(X) (((X) >> 14) & 0x1)
+#define OR1K_SPR_SYS_ESR_EPH_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffbfff)) | ((!!(Y)) << 14))
+
+/* Fixed One */
+#define OR1K_SPR_SYS_ESR_FO_OFFSET 15
+#define OR1K_SPR_SYS_ESR_FO_MASK   0x00008000
+#define OR1K_SPR_SYS_ESR_FO_GET(X) (((X) >> 15) & 0x1)
+#define OR1K_SPR_SYS_ESR_FO_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffff7fff)) | ((!!(Y)) << 15))
+
+/* SPR User Mode Read Access */
+#define OR1K_SPR_SYS_ESR_SUMRA_OFFSET 16
+#define OR1K_SPR_SYS_ESR_SUMRA_MASK   0x00010000
+#define OR1K_SPR_SYS_ESR_SUMRA_GET(X) (((X) >> 16) & 0x1)
+#define OR1K_SPR_SYS_ESR_SUMRA_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffeffff)) | ((!!(Y)) << 16))
+
+/* Context ID */
+#define OR1K_SPR_SYS_ESR_CID_LSB    28
+#define OR1K_SPR_SYS_ESR_CID_MSB    31
+#define OR1K_SPR_SYS_ESR_CID_BITS   4
+#define OR1K_SPR_SYS_ESR_CID_MASK   OR1K_UNSIGNED(0xf0000000)
+#define OR1K_SPR_SYS_ESR_CID_GET(X) (((X) >> 28) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_SYS_ESR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0x0fffffff)) | ((Y) << 28))
+
+
+/* Core identifier (multicore) */
+#define OR1K_SPR_SYS_COREID_INDEX OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_SYS_COREID_ADDR  OR1K_UNSIGNED(0x0080)
+
+
+/* Number of cores (multicore) */
+#define OR1K_SPR_SYS_NUMCORES_INDEX OR1K_UNSIGNED(0x081)
+#define OR1K_SPR_SYS_NUMCORES_ADDR  OR1K_UNSIGNED(0x0081)
+
+
+/* General Purpose Registers */
+#define OR1K_SPR_SYS_GPR_BASE     OR1K_UNSIGNED(0x400)
+#define OR1K_SPR_SYS_GPR_COUNT    OR1K_UNSIGNED(0x100)
+#define OR1K_SPR_SYS_GPR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_SYS_GPR_INDEX(N) (OR1K_SPR_SYS_GPR_BASE + ((N) * OR1K_SPR_SYS_GPR_STEP))
+#define OR1K_SPR_SYS_GPR_ADDR(N)  ((OR1K_SPR_SYS_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_SYS_GPR_INDEX(N))
+
+
+/******************/
+/* Data MMU Group */
+/******************/
+#define OR1K_SPR_DMMU_GROUP 0x01
+
+/* Instruction MMU Control Register */
+#define OR1K_SPR_DMMU_DMMUCR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_DMMU_DMMUCR_ADDR  OR1K_UNSIGNED(0x0800)
+
+/* DTLB Flush */
+#define OR1K_SPR_DMMU_DMMUCR_DTF_OFFSET 0
+#define OR1K_SPR_DMMU_DMMUCR_DTF_MASK   0x00000001
+#define OR1K_SPR_DMMU_DMMUCR_DTF_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DMMUCR_DTF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Page Table Base Pointer */
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_LSB    10
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_MSB    31
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_BITS   22
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_DMMU_DMMUCR_PTBP_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Data MMU Protection Register */
+#define OR1K_SPR_DMMU_DMMUPR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DMMU_DMMUPR_ADDR  OR1K_UNSIGNED(0x0801)
+
+/* Supervisor Read Enable 1 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE1_OFFSET 0
+#define OR1K_SPR_DMMU_DMMUPR_SRE1_MASK   0x00000001
+#define OR1K_SPR_DMMU_DMMUPR_SRE1_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Supervisor Write Enable 1 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE1_OFFSET 1
+#define OR1K_SPR_DMMU_DMMUPR_SWE1_MASK   0x00000002
+#define OR1K_SPR_DMMU_DMMUPR_SWE1_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* User Read Enable 1 */
+#define OR1K_SPR_DMMU_DMMUPR_URE1_OFFSET 2
+#define OR1K_SPR_DMMU_DMMUPR_URE1_MASK   0x00000004
+#define OR1K_SPR_DMMU_DMMUPR_URE1_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* User Write Enable 1 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE1_OFFSET 3
+#define OR1K_SPR_DMMU_DMMUPR_UWE1_MASK   0x00000008
+#define OR1K_SPR_DMMU_DMMUPR_UWE1_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Supervisor Read Enable 2 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE2_OFFSET 4
+#define OR1K_SPR_DMMU_DMMUPR_SRE2_MASK   0x00000010
+#define OR1K_SPR_DMMU_DMMUPR_SRE2_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Supervisor Write Enable 2 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE2_OFFSET 5
+#define OR1K_SPR_DMMU_DMMUPR_SWE2_MASK   0x00000020
+#define OR1K_SPR_DMMU_DMMUPR_SWE2_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* User Read Enable 2 */
+#define OR1K_SPR_DMMU_DMMUPR_URE2_OFFSET 6
+#define OR1K_SPR_DMMU_DMMUPR_URE2_MASK   0x00000040
+#define OR1K_SPR_DMMU_DMMUPR_URE2_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* User Write Enable 2 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE2_OFFSET 7
+#define OR1K_SPR_DMMU_DMMUPR_UWE2_MASK   0x00000080
+#define OR1K_SPR_DMMU_DMMUPR_UWE2_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Supervisor Read Enable 3 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE3_OFFSET 8
+#define OR1K_SPR_DMMU_DMMUPR_SRE3_MASK   0x00000100
+#define OR1K_SPR_DMMU_DMMUPR_SRE3_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Supervisor Write Enable 3 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE3_OFFSET 9
+#define OR1K_SPR_DMMU_DMMUPR_SWE3_MASK   0x00000200
+#define OR1K_SPR_DMMU_DMMUPR_SWE3_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* User Read Enable 3 */
+#define OR1K_SPR_DMMU_DMMUPR_URE3_OFFSET 10
+#define OR1K_SPR_DMMU_DMMUPR_URE3_MASK   0x00000400
+#define OR1K_SPR_DMMU_DMMUPR_URE3_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* User Write Enable 3 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE3_OFFSET 11
+#define OR1K_SPR_DMMU_DMMUPR_UWE3_MASK   0x00000800
+#define OR1K_SPR_DMMU_DMMUPR_UWE3_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Supervisor Read Enable 4 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE4_OFFSET 12
+#define OR1K_SPR_DMMU_DMMUPR_SRE4_MASK   0x00001000
+#define OR1K_SPR_DMMU_DMMUPR_SRE4_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Supervisor Write Enable 4 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE4_OFFSET 13
+#define OR1K_SPR_DMMU_DMMUPR_SWE4_MASK   0x00002000
+#define OR1K_SPR_DMMU_DMMUPR_SWE4_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+/* User Read Enable 4 */
+#define OR1K_SPR_DMMU_DMMUPR_URE4_OFFSET 14
+#define OR1K_SPR_DMMU_DMMUPR_URE4_MASK   0x00004000
+#define OR1K_SPR_DMMU_DMMUPR_URE4_GET(X) (((X) >> 14) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffbfff)) | ((!!(Y)) << 14))
+
+/* User Write Enable 4 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE4_OFFSET 15
+#define OR1K_SPR_DMMU_DMMUPR_UWE4_MASK   0x00008000
+#define OR1K_SPR_DMMU_DMMUPR_UWE4_GET(X) (((X) >> 15) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffff7fff)) | ((!!(Y)) << 15))
+
+/* Supervisor Read Enable 5 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE5_OFFSET 16
+#define OR1K_SPR_DMMU_DMMUPR_SRE5_MASK   0x00010000
+#define OR1K_SPR_DMMU_DMMUPR_SRE5_GET(X) (((X) >> 16) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffeffff)) | ((!!(Y)) << 16))
+
+/* Supervisor Write Enable 5 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE5_OFFSET 17
+#define OR1K_SPR_DMMU_DMMUPR_SWE5_MASK   0x00020000
+#define OR1K_SPR_DMMU_DMMUPR_SWE5_GET(X) (((X) >> 17) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffdffff)) | ((!!(Y)) << 17))
+
+/* User Read Enable 5 */
+#define OR1K_SPR_DMMU_DMMUPR_URE5_OFFSET 18
+#define OR1K_SPR_DMMU_DMMUPR_URE5_MASK   0x00040000
+#define OR1K_SPR_DMMU_DMMUPR_URE5_GET(X) (((X) >> 18) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffbffff)) | ((!!(Y)) << 18))
+
+/* User Write Enable 5 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE5_OFFSET 19
+#define OR1K_SPR_DMMU_DMMUPR_UWE5_MASK   0x00080000
+#define OR1K_SPR_DMMU_DMMUPR_UWE5_GET(X) (((X) >> 19) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfff7ffff)) | ((!!(Y)) << 19))
+
+/* Supervisor Read Enable 6 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE6_OFFSET 20
+#define OR1K_SPR_DMMU_DMMUPR_SRE6_MASK   0x00100000
+#define OR1K_SPR_DMMU_DMMUPR_SRE6_GET(X) (((X) >> 20) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffefffff)) | ((!!(Y)) << 20))
+
+/* Supervisor Write Enable 6 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE6_OFFSET 21
+#define OR1K_SPR_DMMU_DMMUPR_SWE6_MASK   0x00200000
+#define OR1K_SPR_DMMU_DMMUPR_SWE6_GET(X) (((X) >> 21) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffdfffff)) | ((!!(Y)) << 21))
+
+/* User Read Enable 6 */
+#define OR1K_SPR_DMMU_DMMUPR_URE6_OFFSET 22
+#define OR1K_SPR_DMMU_DMMUPR_URE6_MASK   0x00400000
+#define OR1K_SPR_DMMU_DMMUPR_URE6_GET(X) (((X) >> 22) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffbfffff)) | ((!!(Y)) << 22))
+
+/* User Write Enable 6 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE6_OFFSET 23
+#define OR1K_SPR_DMMU_DMMUPR_UWE6_MASK   0x00800000
+#define OR1K_SPR_DMMU_DMMUPR_UWE6_GET(X) (((X) >> 23) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xff7fffff)) | ((!!(Y)) << 23))
+
+/* Supervisor Read Enable 7 */
+#define OR1K_SPR_DMMU_DMMUPR_SRE7_OFFSET 24
+#define OR1K_SPR_DMMU_DMMUPR_SRE7_MASK   0x01000000
+#define OR1K_SPR_DMMU_DMMUPR_SRE7_GET(X) (((X) >> 24) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SRE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfeffffff)) | ((!!(Y)) << 24))
+
+/* Supervisor Write Enable 7 */
+#define OR1K_SPR_DMMU_DMMUPR_SWE7_OFFSET 25
+#define OR1K_SPR_DMMU_DMMUPR_SWE7_MASK   0x02000000
+#define OR1K_SPR_DMMU_DMMUPR_SWE7_GET(X) (((X) >> 25) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_SWE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfdffffff)) | ((!!(Y)) << 25))
+
+/* User Read Enable 7 */
+#define OR1K_SPR_DMMU_DMMUPR_URE7_OFFSET 26
+#define OR1K_SPR_DMMU_DMMUPR_URE7_MASK   0x04000000
+#define OR1K_SPR_DMMU_DMMUPR_URE7_GET(X) (((X) >> 26) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_URE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfbffffff)) | ((!!(Y)) << 26))
+
+/* User Write Enable 7 */
+#define OR1K_SPR_DMMU_DMMUPR_UWE7_OFFSET 27
+#define OR1K_SPR_DMMU_DMMUPR_UWE7_MASK   0x08000000
+#define OR1K_SPR_DMMU_DMMUPR_UWE7_GET(X) (((X) >> 27) & 0x1)
+#define OR1K_SPR_DMMU_DMMUPR_UWE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xf7ffffff)) | ((!!(Y)) << 27))
+
+
+/* Data TLB Entry Invalidate Register */
+#define OR1K_SPR_DMMU_DTLBEIR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_DMMU_DTLBEIR_ADDR  OR1K_UNSIGNED(0x0802)
+
+
+/* Data ATB Match Registers */
+#define OR1K_SPR_DMMU_DATBMR_BASE     OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_DMMU_DATBMR_COUNT    OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_DMMU_DATBMR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DMMU_DATBMR_INDEX(N) (OR1K_SPR_DMMU_DATBMR_BASE + ((N) * OR1K_SPR_DMMU_DATBMR_STEP))
+#define OR1K_SPR_DMMU_DATBMR_ADDR(N)  ((OR1K_SPR_DMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DMMU_DATBMR_INDEX(N))
+
+/* Valid */
+#define OR1K_SPR_DMMU_DATBMR_V_OFFSET 0
+#define OR1K_SPR_DMMU_DATBMR_V_MASK   0x00000001
+#define OR1K_SPR_DMMU_DATBMR_V_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DATBMR_V_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Context ID */
+#define OR1K_SPR_DMMU_DATBMR_CID_LSB    1
+#define OR1K_SPR_DMMU_DATBMR_CID_MSB    4
+#define OR1K_SPR_DMMU_DATBMR_CID_BITS   4
+#define OR1K_SPR_DMMU_DATBMR_CID_MASK   OR1K_UNSIGNED(0x0000001e)
+#define OR1K_SPR_DMMU_DATBMR_CID_GET(X) (((X) >> 1) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_DMMU_DATBMR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffe1)) | ((Y) << 1))
+
+/* Page Size */
+#define OR1K_SPR_DMMU_DATBMR_PS_OFFSET 5
+#define OR1K_SPR_DMMU_DATBMR_PS_MASK   0x00000020
+#define OR1K_SPR_DMMU_DATBMR_PS_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DMMU_DATBMR_PS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Virtual Page Number */
+#define OR1K_SPR_DMMU_DATBMR_VPN_LSB    10
+#define OR1K_SPR_DMMU_DATBMR_VPN_MSB    31
+#define OR1K_SPR_DMMU_DATBMR_VPN_BITS   22
+#define OR1K_SPR_DMMU_DATBMR_VPN_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_DMMU_DATBMR_VPN_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_DMMU_DATBMR_VPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Data ATB Translate Registers */
+#define OR1K_SPR_DMMU_DATBTR_BASE     OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_DMMU_DATBTR_COUNT    OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_DMMU_DATBTR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DMMU_DATBTR_INDEX(N) (OR1K_SPR_DMMU_DATBTR_BASE + ((N) * OR1K_SPR_DMMU_DATBTR_STEP))
+#define OR1K_SPR_DMMU_DATBTR_ADDR(N)  ((OR1K_SPR_DMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DMMU_DATBTR_INDEX(N))
+
+/* Cache Coherency */
+#define OR1K_SPR_DMMU_DATBTR_CC_OFFSET 0
+#define OR1K_SPR_DMMU_DATBTR_CC_MASK   0x00000001
+#define OR1K_SPR_DMMU_DATBTR_CC_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_CC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Cache Inhibit */
+#define OR1K_SPR_DMMU_DATBTR_CI_OFFSET 1
+#define OR1K_SPR_DMMU_DATBTR_CI_MASK   0x00000002
+#define OR1K_SPR_DMMU_DATBTR_CI_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_CI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Write-back Cache */
+#define OR1K_SPR_DMMU_DATBTR_WBC_OFFSET 2
+#define OR1K_SPR_DMMU_DATBTR_WBC_MASK   0x00000004
+#define OR1K_SPR_DMMU_DATBTR_WBC_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_WBC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Weakly-ordered Memory */
+#define OR1K_SPR_DMMU_DATBTR_WOM_OFFSET 3
+#define OR1K_SPR_DMMU_DATBTR_WOM_MASK   0x00000008
+#define OR1K_SPR_DMMU_DATBTR_WOM_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_WOM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Accessed */
+#define OR1K_SPR_DMMU_DATBTR_A_OFFSET 4
+#define OR1K_SPR_DMMU_DATBTR_A_MASK   0x00000010
+#define OR1K_SPR_DMMU_DATBTR_A_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_A_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Dirty */
+#define OR1K_SPR_DMMU_DATBTR_D_OFFSET 5
+#define OR1K_SPR_DMMU_DATBTR_D_MASK   0x00000020
+#define OR1K_SPR_DMMU_DATBTR_D_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_D_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Supervisor Read Enable */
+#define OR1K_SPR_DMMU_DATBTR_SRE_OFFSET 6
+#define OR1K_SPR_DMMU_DATBTR_SRE_MASK   0x00000040
+#define OR1K_SPR_DMMU_DATBTR_SRE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_SRE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Supervisor Write Enable */
+#define OR1K_SPR_DMMU_DATBTR_SWE_OFFSET 7
+#define OR1K_SPR_DMMU_DATBTR_SWE_MASK   0x00000080
+#define OR1K_SPR_DMMU_DATBTR_SWE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_SWE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* User Read Enable */
+#define OR1K_SPR_DMMU_DATBTR_URE_OFFSET 8
+#define OR1K_SPR_DMMU_DATBTR_URE_MASK   0x00000100
+#define OR1K_SPR_DMMU_DATBTR_URE_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_URE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* User Write Enable */
+#define OR1K_SPR_DMMU_DATBTR_UWE_OFFSET 9
+#define OR1K_SPR_DMMU_DATBTR_UWE_MASK   0x00000200
+#define OR1K_SPR_DMMU_DATBTR_UWE_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_DMMU_DATBTR_UWE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Physical Page Number */
+#define OR1K_SPR_DMMU_DATBTR_PPN_LSB    10
+#define OR1K_SPR_DMMU_DATBTR_PPN_MSB    31
+#define OR1K_SPR_DMMU_DATBTR_PPN_BITS   22
+#define OR1K_SPR_DMMU_DATBTR_PPN_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_DMMU_DATBTR_PPN_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_DMMU_DATBTR_PPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Data TLB */
+#define OR1K_SPR_DMMU_DTLBW_BASE  OR1K_UNSIGNED(0x200)
+#define OR1K_SPR_DMMU_DTLBW_COUNT OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_DMMU_DTLBW_STEP  OR1K_UNSIGNED(0x100)
+#define OR1K_SPR_DMMU_DTLBW_SUBBASE(N0) (OR1K_SPR_DMMU_DTLBW_BASE + ((N0)*OR1K_SPR_DMMU_DTLBW_STEP))
+
+/* Data TLB Match Registers */
+#define OR1K_SPR_DMMU_DTLBW_MR_BASE  OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_DMMU_DTLBW_MR_COUNT OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_DMMU_DTLBW_MR_STEP  OR1K_UNSIGNED(0x001)
+
+#define OR1K_SPR_DMMU_DTLBW_MR_INDEX(N0, N1) (OR1K_SPR_DMMU_DTLBW_SUBBASE(N0) + OR1K_SPR_DMMU_DTLBW_MR_BASE + ((N1) * OR1K_SPR_DMMU_DTLBW_MR_STEP))
+#define OR1K_SPR_DMMU_DTLBW_MR_ADDR(N0, N1)  ((OR1K_SPR_DMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DMMU_DTLBW_MR_INDEX(N0, N1))
+
+/* Valid */
+#define OR1K_SPR_DMMU_DTLBW_MR_V_OFFSET 0
+#define OR1K_SPR_DMMU_DTLBW_MR_V_MASK   0x00000001
+#define OR1K_SPR_DMMU_DTLBW_MR_V_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_MR_V_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Page Level 1 */
+#define OR1K_SPR_DMMU_DTLBW_MR_PL1_OFFSET 1
+#define OR1K_SPR_DMMU_DTLBW_MR_PL1_MASK   0x00000002
+#define OR1K_SPR_DMMU_DTLBW_MR_PL1_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_MR_PL1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Context ID */
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_LSB    2
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_MSB    5
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_BITS   4
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_MASK   OR1K_UNSIGNED(0x0000003c)
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_DMMU_DTLBW_MR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffc3)) | ((Y) << 2))
+
+/* Least Recently Used */
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_LSB    6
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_MSB    7
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_BITS   2
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_MASK   OR1K_UNSIGNED(0x000000c0)
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_GET(X) (((X) >> 6) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DMMU_DTLBW_MR_LRU_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff3f)) | ((Y) << 6))
+
+/* Virtual Page Number */
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_LSB    13
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_MSB    31
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_BITS   19
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_MASK   OR1K_UNSIGNED(0xffffe000)
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_GET(X) (((X) >> 13) & OR1K_UNSIGNED(0x0007ffff))
+#define OR1K_SPR_DMMU_DTLBW_MR_VPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00001fff)) | ((Y) << 13))
+
+/* Data TLB Translate Registers */
+#define OR1K_SPR_DMMU_DTLBW_TR_BASE  OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_DMMU_DTLBW_TR_COUNT OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_DMMU_DTLBW_TR_STEP  OR1K_UNSIGNED(0x001)
+
+#define OR1K_SPR_DMMU_DTLBW_TR_INDEX(N0, N1) (OR1K_SPR_DMMU_DTLBW_SUBBASE(N0) + OR1K_SPR_DMMU_DTLBW_TR_BASE + ((N1) * OR1K_SPR_DMMU_DTLBW_TR_STEP))
+#define OR1K_SPR_DMMU_DTLBW_TR_ADDR(N0, N1)  ((OR1K_SPR_DMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DMMU_DTLBW_TR_INDEX(N0, N1))
+
+/* Cache Coherency */
+#define OR1K_SPR_DMMU_DTLBW_TR_CC_OFFSET 0
+#define OR1K_SPR_DMMU_DTLBW_TR_CC_MASK   0x00000001
+#define OR1K_SPR_DMMU_DTLBW_TR_CC_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_CC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Cache Inhibit */
+#define OR1K_SPR_DMMU_DTLBW_TR_CI_OFFSET 1
+#define OR1K_SPR_DMMU_DTLBW_TR_CI_MASK   0x00000002
+#define OR1K_SPR_DMMU_DTLBW_TR_CI_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_CI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Write-back Cache */
+#define OR1K_SPR_DMMU_DTLBW_TR_WBC_OFFSET 2
+#define OR1K_SPR_DMMU_DTLBW_TR_WBC_MASK   0x00000004
+#define OR1K_SPR_DMMU_DTLBW_TR_WBC_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_WBC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Weakly-ordered Memory */
+#define OR1K_SPR_DMMU_DTLBW_TR_WOM_OFFSET 3
+#define OR1K_SPR_DMMU_DTLBW_TR_WOM_MASK   0x00000008
+#define OR1K_SPR_DMMU_DTLBW_TR_WOM_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_WOM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Accessed */
+#define OR1K_SPR_DMMU_DTLBW_TR_A_OFFSET 4
+#define OR1K_SPR_DMMU_DTLBW_TR_A_MASK   0x00000010
+#define OR1K_SPR_DMMU_DTLBW_TR_A_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_A_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Dirty */
+#define OR1K_SPR_DMMU_DTLBW_TR_D_OFFSET 5
+#define OR1K_SPR_DMMU_DTLBW_TR_D_MASK   0x00000020
+#define OR1K_SPR_DMMU_DTLBW_TR_D_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_D_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* User Read Enable */
+#define OR1K_SPR_DMMU_DTLBW_TR_URE_OFFSET 6
+#define OR1K_SPR_DMMU_DTLBW_TR_URE_MASK   0x00000040
+#define OR1K_SPR_DMMU_DTLBW_TR_URE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_URE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* User Write Enable */
+#define OR1K_SPR_DMMU_DTLBW_TR_UWE_OFFSET 7
+#define OR1K_SPR_DMMU_DTLBW_TR_UWE_MASK   0x00000080
+#define OR1K_SPR_DMMU_DTLBW_TR_UWE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_UWE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Supervisor Read Enable */
+#define OR1K_SPR_DMMU_DTLBW_TR_SRE_OFFSET 8
+#define OR1K_SPR_DMMU_DTLBW_TR_SRE_MASK   0x00000100
+#define OR1K_SPR_DMMU_DTLBW_TR_SRE_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_SRE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* Supervisor Write Enable */
+#define OR1K_SPR_DMMU_DTLBW_TR_SWE_OFFSET 9
+#define OR1K_SPR_DMMU_DTLBW_TR_SWE_MASK   0x00000200
+#define OR1K_SPR_DMMU_DTLBW_TR_SWE_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_DMMU_DTLBW_TR_SWE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Physical Page Number */
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_LSB    13
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_MSB    31
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_BITS   19
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_MASK   OR1K_UNSIGNED(0xffffe000)
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_GET(X) (((X) >> 13) & OR1K_UNSIGNED(0x0007ffff))
+#define OR1K_SPR_DMMU_DTLBW_TR_PPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00001fff)) | ((Y) << 13))
+
+
+/*************************/
+/* Instruction MMU Group */
+/*************************/
+#define OR1K_SPR_IMMU_GROUP 0x02
+
+/* Instruction MMU Control Register */
+#define OR1K_SPR_IMMU_IMMUCR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_IMMU_IMMUCR_ADDR  OR1K_UNSIGNED(0x1000)
+
+/* ITLB Flush */
+#define OR1K_SPR_IMMU_IMMUCR_ITF_OFFSET 0
+#define OR1K_SPR_IMMU_IMMUCR_ITF_MASK   0x00000001
+#define OR1K_SPR_IMMU_IMMUCR_ITF_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_IMMUCR_ITF_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Page Table Base Pointer */
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_LSB    10
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_MSB    31
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_BITS   22
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_IMMU_IMMUCR_PTBP_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Instruction MMU Protection Register */
+#define OR1K_SPR_IMMU_IMMUPR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_IMMU_IMMUPR_ADDR  OR1K_UNSIGNED(0x1001)
+
+/* Supervisor Execute Enable 1 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE1_OFFSET 0
+#define OR1K_SPR_IMMU_IMMUPR_SXE1_MASK   0x00000001
+#define OR1K_SPR_IMMU_IMMUPR_SXE1_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* User Execute Enable 1 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE1_OFFSET 1
+#define OR1K_SPR_IMMU_IMMUPR_UXE1_MASK   0x00000002
+#define OR1K_SPR_IMMU_IMMUPR_UXE1_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Supervisor Execute Enable 2 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE2_OFFSET 2
+#define OR1K_SPR_IMMU_IMMUPR_SXE2_MASK   0x00000004
+#define OR1K_SPR_IMMU_IMMUPR_SXE2_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* User Execute Enable 2 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE2_OFFSET 3
+#define OR1K_SPR_IMMU_IMMUPR_UXE2_MASK   0x00000008
+#define OR1K_SPR_IMMU_IMMUPR_UXE2_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Supervisor Execute Enable 3 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE3_OFFSET 4
+#define OR1K_SPR_IMMU_IMMUPR_SXE3_MASK   0x00000010
+#define OR1K_SPR_IMMU_IMMUPR_SXE3_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* User Execute Enable 3 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE3_OFFSET 5
+#define OR1K_SPR_IMMU_IMMUPR_UXE3_MASK   0x00000020
+#define OR1K_SPR_IMMU_IMMUPR_UXE3_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Supervisor Execute Enable 4 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE4_OFFSET 6
+#define OR1K_SPR_IMMU_IMMUPR_SXE4_MASK   0x00000040
+#define OR1K_SPR_IMMU_IMMUPR_SXE4_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* User Execute Enable 4 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE4_OFFSET 7
+#define OR1K_SPR_IMMU_IMMUPR_UXE4_MASK   0x00000080
+#define OR1K_SPR_IMMU_IMMUPR_UXE4_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Supervisor Execute Enable 5 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE5_OFFSET 8
+#define OR1K_SPR_IMMU_IMMUPR_SXE5_MASK   0x00000100
+#define OR1K_SPR_IMMU_IMMUPR_SXE5_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* User Execute Enable 5 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE5_OFFSET 9
+#define OR1K_SPR_IMMU_IMMUPR_UXE5_MASK   0x00000200
+#define OR1K_SPR_IMMU_IMMUPR_UXE5_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Supervisor Execute Enable 6 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE6_OFFSET 10
+#define OR1K_SPR_IMMU_IMMUPR_SXE6_MASK   0x00000400
+#define OR1K_SPR_IMMU_IMMUPR_SXE6_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* User Execute Enable 6 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE6_OFFSET 11
+#define OR1K_SPR_IMMU_IMMUPR_UXE6_MASK   0x00000800
+#define OR1K_SPR_IMMU_IMMUPR_UXE6_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Supervisor Execute Enable 7 */
+#define OR1K_SPR_IMMU_IMMUPR_SXE7_OFFSET 12
+#define OR1K_SPR_IMMU_IMMUPR_SXE7_MASK   0x00001000
+#define OR1K_SPR_IMMU_IMMUPR_SXE7_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_SXE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* User Execute Enable 7 */
+#define OR1K_SPR_IMMU_IMMUPR_UXE7_OFFSET 13
+#define OR1K_SPR_IMMU_IMMUPR_UXE7_MASK   0x00002000
+#define OR1K_SPR_IMMU_IMMUPR_UXE7_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_IMMU_IMMUPR_UXE7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+
+/* Instruction TLB Entry Invalidate Register */
+#define OR1K_SPR_IMMU_ITLBEIR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_IMMU_ITLBEIR_ADDR  OR1K_UNSIGNED(0x1002)
+
+
+/* Instruction ATB Match Registers */
+#define OR1K_SPR_IMMU_IATBMR_BASE     OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_IMMU_IATBMR_COUNT    OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_IMMU_IATBMR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_IMMU_IATBMR_INDEX(N) (OR1K_SPR_IMMU_IATBMR_BASE + ((N) * OR1K_SPR_IMMU_IATBMR_STEP))
+#define OR1K_SPR_IMMU_IATBMR_ADDR(N)  ((OR1K_SPR_IMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_IMMU_IATBMR_INDEX(N))
+
+/* Valid */
+#define OR1K_SPR_IMMU_IATBMR_V_OFFSET 0
+#define OR1K_SPR_IMMU_IATBMR_V_MASK   0x00000001
+#define OR1K_SPR_IMMU_IATBMR_V_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_IATBMR_V_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Context ID */
+#define OR1K_SPR_IMMU_IATBMR_CID_LSB    1
+#define OR1K_SPR_IMMU_IATBMR_CID_MSB    4
+#define OR1K_SPR_IMMU_IATBMR_CID_BITS   4
+#define OR1K_SPR_IMMU_IATBMR_CID_MASK   OR1K_UNSIGNED(0x0000001e)
+#define OR1K_SPR_IMMU_IATBMR_CID_GET(X) (((X) >> 1) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_IMMU_IATBMR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffe1)) | ((Y) << 1))
+
+/* Page Size */
+#define OR1K_SPR_IMMU_IATBMR_PS_OFFSET 5
+#define OR1K_SPR_IMMU_IATBMR_PS_MASK   0x00000020
+#define OR1K_SPR_IMMU_IATBMR_PS_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_IMMU_IATBMR_PS_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Virtual Page Number */
+#define OR1K_SPR_IMMU_IATBMR_VPN_LSB    10
+#define OR1K_SPR_IMMU_IATBMR_VPN_MSB    31
+#define OR1K_SPR_IMMU_IATBMR_VPN_BITS   22
+#define OR1K_SPR_IMMU_IATBMR_VPN_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_IMMU_IATBMR_VPN_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_IMMU_IATBMR_VPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Instruction ATB Translate Registers */
+#define OR1K_SPR_IMMU_IATBTR_BASE     OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_IMMU_IATBTR_COUNT    OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_IMMU_IATBTR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_IMMU_IATBTR_INDEX(N) (OR1K_SPR_IMMU_IATBTR_BASE + ((N) * OR1K_SPR_IMMU_IATBTR_STEP))
+#define OR1K_SPR_IMMU_IATBTR_ADDR(N)  ((OR1K_SPR_IMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_IMMU_IATBTR_INDEX(N))
+
+/* Cache Coherency */
+#define OR1K_SPR_IMMU_IATBTR_CC_OFFSET 0
+#define OR1K_SPR_IMMU_IATBTR_CC_MASK   0x00000001
+#define OR1K_SPR_IMMU_IATBTR_CC_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_CC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Cache Inhibit */
+#define OR1K_SPR_IMMU_IATBTR_CI_OFFSET 1
+#define OR1K_SPR_IMMU_IATBTR_CI_MASK   0x00000002
+#define OR1K_SPR_IMMU_IATBTR_CI_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_CI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Write-back Cache */
+#define OR1K_SPR_IMMU_IATBTR_WBC_OFFSET 2
+#define OR1K_SPR_IMMU_IATBTR_WBC_MASK   0x00000004
+#define OR1K_SPR_IMMU_IATBTR_WBC_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_WBC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Weakly-ordered Memory */
+#define OR1K_SPR_IMMU_IATBTR_WOM_OFFSET 3
+#define OR1K_SPR_IMMU_IATBTR_WOM_MASK   0x00000008
+#define OR1K_SPR_IMMU_IATBTR_WOM_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_WOM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Accessed */
+#define OR1K_SPR_IMMU_IATBTR_A_OFFSET 4
+#define OR1K_SPR_IMMU_IATBTR_A_MASK   0x00000010
+#define OR1K_SPR_IMMU_IATBTR_A_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_A_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Dirty */
+#define OR1K_SPR_IMMU_IATBTR_D_OFFSET 5
+#define OR1K_SPR_IMMU_IATBTR_D_MASK   0x00000020
+#define OR1K_SPR_IMMU_IATBTR_D_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_D_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Supervisor Execute Enable */
+#define OR1K_SPR_IMMU_IATBTR_SRE_OFFSET 6
+#define OR1K_SPR_IMMU_IATBTR_SRE_MASK   0x00000040
+#define OR1K_SPR_IMMU_IATBTR_SRE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_SRE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* User Execute Enable */
+#define OR1K_SPR_IMMU_IATBTR_URE_OFFSET 7
+#define OR1K_SPR_IMMU_IATBTR_URE_MASK   0x00000080
+#define OR1K_SPR_IMMU_IATBTR_URE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_IMMU_IATBTR_URE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Physical Page Number */
+#define OR1K_SPR_IMMU_IATBTR_PPN_LSB    10
+#define OR1K_SPR_IMMU_IATBTR_PPN_MSB    31
+#define OR1K_SPR_IMMU_IATBTR_PPN_BITS   22
+#define OR1K_SPR_IMMU_IATBTR_PPN_MASK   OR1K_UNSIGNED(0xfffffc00)
+#define OR1K_SPR_IMMU_IATBTR_PPN_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x003fffff))
+#define OR1K_SPR_IMMU_IATBTR_PPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x000003ff)) | ((Y) << 10))
+
+
+/* Instruction TLB */
+#define OR1K_SPR_IMMU_ITLBW_BASE  OR1K_UNSIGNED(0x200)
+#define OR1K_SPR_IMMU_ITLBW_COUNT OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_IMMU_ITLBW_STEP  OR1K_UNSIGNED(0x100)
+#define OR1K_SPR_IMMU_ITLBW_SUBBASE(N0) (OR1K_SPR_IMMU_ITLBW_BASE + ((N0)*OR1K_SPR_IMMU_ITLBW_STEP))
+
+/* Instruction TLB Match Registers */
+#define OR1K_SPR_IMMU_ITLBW_MR_BASE  OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_IMMU_ITLBW_MR_COUNT OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_IMMU_ITLBW_MR_STEP  OR1K_UNSIGNED(0x001)
+
+#define OR1K_SPR_IMMU_ITLBW_MR_INDEX(N0, N1) (OR1K_SPR_IMMU_ITLBW_SUBBASE(N0) + OR1K_SPR_IMMU_ITLBW_MR_BASE + ((N1) * OR1K_SPR_IMMU_ITLBW_MR_STEP))
+#define OR1K_SPR_IMMU_ITLBW_MR_ADDR(N0, N1)  ((OR1K_SPR_IMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_IMMU_ITLBW_MR_INDEX(N0, N1))
+
+/* Valid */
+#define OR1K_SPR_IMMU_ITLBW_MR_V_OFFSET 0
+#define OR1K_SPR_IMMU_ITLBW_MR_V_MASK   0x00000001
+#define OR1K_SPR_IMMU_ITLBW_MR_V_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_MR_V_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Page Level 1 */
+#define OR1K_SPR_IMMU_ITLBW_MR_PL1_OFFSET 1
+#define OR1K_SPR_IMMU_ITLBW_MR_PL1_MASK   0x00000002
+#define OR1K_SPR_IMMU_ITLBW_MR_PL1_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_MR_PL1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Context ID */
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_LSB    2
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_MSB    5
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_BITS   4
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_MASK   OR1K_UNSIGNED(0x0000003c)
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x0000000f))
+#define OR1K_SPR_IMMU_ITLBW_MR_CID_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffc3)) | ((Y) << 2))
+
+/* Least Recently Used */
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_LSB    6
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_MSB    7
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_BITS   2
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_MASK   OR1K_UNSIGNED(0x000000c0)
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_GET(X) (((X) >> 6) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_IMMU_ITLBW_MR_LRU_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff3f)) | ((Y) << 6))
+
+/* Virtual Page Number */
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_LSB    13
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_MSB    31
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_BITS   19
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_MASK   OR1K_UNSIGNED(0xffffe000)
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_GET(X) (((X) >> 13) & OR1K_UNSIGNED(0x0007ffff))
+#define OR1K_SPR_IMMU_ITLBW_MR_VPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00001fff)) | ((Y) << 13))
+
+/* Instruction TLB Translate Registers */
+#define OR1K_SPR_IMMU_ITLBW_TR_BASE  OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_IMMU_ITLBW_TR_COUNT OR1K_UNSIGNED(0x080)
+#define OR1K_SPR_IMMU_ITLBW_TR_STEP  OR1K_UNSIGNED(0x001)
+
+#define OR1K_SPR_IMMU_ITLBW_TR_INDEX(N0, N1) (OR1K_SPR_IMMU_ITLBW_SUBBASE(N0) + OR1K_SPR_IMMU_ITLBW_TR_BASE + ((N1) * OR1K_SPR_IMMU_ITLBW_TR_STEP))
+#define OR1K_SPR_IMMU_ITLBW_TR_ADDR(N0, N1)  ((OR1K_SPR_IMMU_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_IMMU_ITLBW_TR_INDEX(N0, N1))
+
+/* Cache Coherency */
+#define OR1K_SPR_IMMU_ITLBW_TR_CC_OFFSET 0
+#define OR1K_SPR_IMMU_ITLBW_TR_CC_MASK   0x00000001
+#define OR1K_SPR_IMMU_ITLBW_TR_CC_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_CC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Cache Inhibit */
+#define OR1K_SPR_IMMU_ITLBW_TR_CI_OFFSET 1
+#define OR1K_SPR_IMMU_ITLBW_TR_CI_MASK   0x00000002
+#define OR1K_SPR_IMMU_ITLBW_TR_CI_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_CI_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Write-back Cache */
+#define OR1K_SPR_IMMU_ITLBW_TR_WBC_OFFSET 2
+#define OR1K_SPR_IMMU_ITLBW_TR_WBC_MASK   0x00000004
+#define OR1K_SPR_IMMU_ITLBW_TR_WBC_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_WBC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Weakly-ordered Memory */
+#define OR1K_SPR_IMMU_ITLBW_TR_WOM_OFFSET 3
+#define OR1K_SPR_IMMU_ITLBW_TR_WOM_MASK   0x00000008
+#define OR1K_SPR_IMMU_ITLBW_TR_WOM_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_WOM_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Accessed */
+#define OR1K_SPR_IMMU_ITLBW_TR_A_OFFSET 4
+#define OR1K_SPR_IMMU_ITLBW_TR_A_MASK   0x00000010
+#define OR1K_SPR_IMMU_ITLBW_TR_A_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_A_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Dirty */
+#define OR1K_SPR_IMMU_ITLBW_TR_D_OFFSET 5
+#define OR1K_SPR_IMMU_ITLBW_TR_D_MASK   0x00000020
+#define OR1K_SPR_IMMU_ITLBW_TR_D_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_D_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* User Execute Enable */
+#define OR1K_SPR_IMMU_ITLBW_TR_UXE_OFFSET 6
+#define OR1K_SPR_IMMU_ITLBW_TR_UXE_MASK   0x00000040
+#define OR1K_SPR_IMMU_ITLBW_TR_UXE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_UXE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Supervisor Execute Enable */
+#define OR1K_SPR_IMMU_ITLBW_TR_SXE_OFFSET 7
+#define OR1K_SPR_IMMU_ITLBW_TR_SXE_MASK   0x00000080
+#define OR1K_SPR_IMMU_ITLBW_TR_SXE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_IMMU_ITLBW_TR_SXE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* Physical Page Number */
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_LSB    13
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_MSB    31
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_BITS   19
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_MASK   OR1K_UNSIGNED(0xffffe000)
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_GET(X) (((X) >> 13) & OR1K_UNSIGNED(0x0007ffff))
+#define OR1K_SPR_IMMU_ITLBW_TR_PPN_SET(X, Y) (((X) & OR1K_UNSIGNED(0x00001fff)) | ((Y) << 13))
+
+
+/********************/
+/* Data Cache Group */
+/********************/
+#define OR1K_SPR_DCACHE_GROUP 0x03
+
+/* Data Cache Control Register */
+#define OR1K_SPR_DCACHE_DCCR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_DCACHE_DCCR_ADDR  OR1K_UNSIGNED(0x1800)
+
+/* Enable Ways */
+#define OR1K_SPR_DCACHE_DCCR_EW_LSB    0
+#define OR1K_SPR_DCACHE_DCCR_EW_MSB    7
+#define OR1K_SPR_DCACHE_DCCR_EW_BITS   8
+#define OR1K_SPR_DCACHE_DCCR_EW_MASK   OR1K_UNSIGNED(0x000000ff)
+#define OR1K_SPR_DCACHE_DCCR_EW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_DCACHE_DCCR_EW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff00)) | ((Y) << 0))
+
+
+/* Data Cache Block Prefetch Register */
+#define OR1K_SPR_DCACHE_DCBPR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DCACHE_DCBPR_ADDR  OR1K_UNSIGNED(0x1801)
+
+
+/* Data Cache Block Flush Register */
+#define OR1K_SPR_DCACHE_DCBFR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_DCACHE_DCBFR_ADDR  OR1K_UNSIGNED(0x1802)
+
+
+/* Data Cache Block Invalidate Register */
+#define OR1K_SPR_DCACHE_DCBIR_INDEX OR1K_UNSIGNED(0x003)
+#define OR1K_SPR_DCACHE_DCBIR_ADDR  OR1K_UNSIGNED(0x1803)
+
+
+/* Data Cache Block Write-back Register */
+#define OR1K_SPR_DCACHE_DCBWR_INDEX OR1K_UNSIGNED(0x004)
+#define OR1K_SPR_DCACHE_DCBWR_ADDR  OR1K_UNSIGNED(0x1804)
+
+
+/* Data Cache Block Lock Register */
+#define OR1K_SPR_DCACHE_DCBLR_INDEX OR1K_UNSIGNED(0x005)
+#define OR1K_SPR_DCACHE_DCBLR_ADDR  OR1K_UNSIGNED(0x1805)
+
+
+/***************************/
+/* Instruction Cache Group */
+/***************************/
+#define OR1K_SPR_ICACHE_GROUP 0x04
+
+/* Instruction Cache Control Register */
+#define OR1K_SPR_ICACHE_ICCR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_ICACHE_ICCR_ADDR  OR1K_UNSIGNED(0x2000)
+
+/* Enable Ways */
+#define OR1K_SPR_ICACHE_ICCR_EW_LSB    0
+#define OR1K_SPR_ICACHE_ICCR_EW_MSB    7
+#define OR1K_SPR_ICACHE_ICCR_EW_BITS   8
+#define OR1K_SPR_ICACHE_ICCR_EW_MASK   OR1K_UNSIGNED(0x000000ff)
+#define OR1K_SPR_ICACHE_ICCR_EW_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x000000ff))
+#define OR1K_SPR_ICACHE_ICCR_EW_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff00)) | ((Y) << 0))
+
+
+/* Instruction Cache Block Prefetch Register */
+#define OR1K_SPR_ICACHE_ICBPR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_ICACHE_ICBPR_ADDR  OR1K_UNSIGNED(0x2001)
+
+
+/* Instruction Cache Block Invalidate Register */
+#define OR1K_SPR_ICACHE_ICBIR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_ICACHE_ICBIR_ADDR  OR1K_UNSIGNED(0x2002)
+
+
+/* Instruction Cache Block Lock Register */
+#define OR1K_SPR_ICACHE_ICBLR_INDEX OR1K_UNSIGNED(0x003)
+#define OR1K_SPR_ICACHE_ICBLR_ADDR  OR1K_UNSIGNED(0x2003)
+
+
+/*********************************/
+/* Multiply and Accumulate Group */
+/*********************************/
+#define OR1K_SPR_MAC_GROUP 0x05
+
+/* MAC Result Low Word */
+#define OR1K_SPR_MAC_MACLO_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_MAC_MACLO_ADDR  OR1K_UNSIGNED(0x2801)
+
+
+/* MAC Result High Word */
+#define OR1K_SPR_MAC_MACHI_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_MAC_MACHI_ADDR  OR1K_UNSIGNED(0x2802)
+
+
+/***************/
+/* Debug Group */
+/***************/
+#define OR1K_SPR_DEBUG_GROUP 0x06
+
+/* Debug Value Registers */
+#define OR1K_SPR_DEBUG_DVR_BASE     OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_DEBUG_DVR_COUNT    OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_DEBUG_DVR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DEBUG_DVR_INDEX(N) (OR1K_SPR_DEBUG_DVR_BASE + ((N) * OR1K_SPR_DEBUG_DVR_STEP))
+#define OR1K_SPR_DEBUG_DVR_ADDR(N)  ((OR1K_SPR_DEBUG_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DEBUG_DVR_INDEX(N))
+
+
+/* Debug Control Registers */
+#define OR1K_SPR_DEBUG_DCR_BASE     OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_DEBUG_DCR_COUNT    OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_DEBUG_DCR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DEBUG_DCR_INDEX(N) (OR1K_SPR_DEBUG_DCR_BASE + ((N) * OR1K_SPR_DEBUG_DCR_STEP))
+#define OR1K_SPR_DEBUG_DCR_ADDR(N)  ((OR1K_SPR_DEBUG_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DEBUG_DCR_INDEX(N))
+
+/* DVR/DCR Present */
+#define OR1K_SPR_DEBUG_DCR_DP_OFFSET 0
+#define OR1K_SPR_DEBUG_DCR_DP_MASK   0x00000001
+#define OR1K_SPR_DEBUG_DCR_DP_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DEBUG_DCR_DP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Compare Condition */
+#define OR1K_SPR_DEBUG_DCR_CC_LSB    1
+#define OR1K_SPR_DEBUG_DCR_CC_MSB    3
+#define OR1K_SPR_DEBUG_DCR_CC_BITS   3
+#define OR1K_SPR_DEBUG_DCR_CC_MASK   OR1K_UNSIGNED(0x0000000e)
+#define OR1K_SPR_DEBUG_DCR_CC_GET(X) (((X) >> 1) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_DEBUG_DCR_CC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff1)) | ((Y) << 1))
+
+/* Masked */
+#define OR1K_SPR_DEBUG_DCR_CC_MASKED 0
+/* Equal */
+#define OR1K_SPR_DEBUG_DCR_CC_EQ 1
+/* Less than */
+#define OR1K_SPR_DEBUG_DCR_CC_LT 2
+/* Less than or equal */
+#define OR1K_SPR_DEBUG_DCR_CC_LTE 3
+/* Greater than */
+#define OR1K_SPR_DEBUG_DCR_CC_GT 4
+/* Greater than or equal */
+#define OR1K_SPR_DEBUG_DCR_CC_GTE 5
+/* Not equal */
+#define OR1K_SPR_DEBUG_DCR_CC_NEQ 6
+/* Signed Comparison */
+#define OR1K_SPR_DEBUG_DCR_SC_OFFSET 4
+#define OR1K_SPR_DEBUG_DCR_SC_MASK   0x00000010
+#define OR1K_SPR_DEBUG_DCR_SC_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DEBUG_DCR_SC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Compare To */
+#define OR1K_SPR_DEBUG_DCR_CT_LSB    5
+#define OR1K_SPR_DEBUG_DCR_CT_MSB    7
+#define OR1K_SPR_DEBUG_DCR_CT_BITS   3
+#define OR1K_SPR_DEBUG_DCR_CT_MASK   OR1K_UNSIGNED(0x000000e0)
+#define OR1K_SPR_DEBUG_DCR_CT_GET(X) (((X) >> 5) & OR1K_UNSIGNED(0x00000007))
+#define OR1K_SPR_DEBUG_DCR_CT_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff1f)) | ((Y) << 5))
+
+/* Comparison disabled */
+#define OR1K_SPR_DEBUG_DCR_CT_DISABLED 0
+/* Instruction fetch EA */
+#define OR1K_SPR_DEBUG_DCR_CT_FEA 1
+/* Load EA */
+#define OR1K_SPR_DEBUG_DCR_CT_LEA 2
+/* Store EA */
+#define OR1K_SPR_DEBUG_DCR_CT_SEA 3
+/* Load data */
+#define OR1K_SPR_DEBUG_DCR_CT_LD 4
+/* Store data */
+#define OR1K_SPR_DEBUG_DCR_CT_SD 5
+/* Load/store EA */
+#define OR1K_SPR_DEBUG_DCR_CT_LSEA 6
+/* Load/store data */
+#define OR1K_SPR_DEBUG_DCR_CT_LSD 7
+
+/* Debug Mode Register 1 */
+#define OR1K_SPR_DEBUG_DMR1_INDEX OR1K_UNSIGNED(0x010)
+#define OR1K_SPR_DEBUG_DMR1_ADDR  OR1K_UNSIGNED(0x3010)
+
+/* Chain Watchpoint 0 */
+#define OR1K_SPR_DEBUG_DMR1_CW0_LSB    0
+#define OR1K_SPR_DEBUG_DMR1_CW0_MSB    1
+#define OR1K_SPR_DEBUG_DMR1_CW0_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW0_MASK   OR1K_UNSIGNED(0x00000003)
+#define OR1K_SPR_DEBUG_DMR1_CW0_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW0_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffc)) | ((Y) << 0))
+
+/* Chain Watchpoint 1 */
+#define OR1K_SPR_DEBUG_DMR1_CW1_LSB    2
+#define OR1K_SPR_DEBUG_DMR1_CW1_MSB    3
+#define OR1K_SPR_DEBUG_DMR1_CW1_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW1_MASK   OR1K_UNSIGNED(0x0000000c)
+#define OR1K_SPR_DEBUG_DMR1_CW1_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff3)) | ((Y) << 2))
+
+/* Chain Watchpoint 2 */
+#define OR1K_SPR_DEBUG_DMR1_CW2_LSB    4
+#define OR1K_SPR_DEBUG_DMR1_CW2_MSB    5
+#define OR1K_SPR_DEBUG_DMR1_CW2_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW2_MASK   OR1K_UNSIGNED(0x00000030)
+#define OR1K_SPR_DEBUG_DMR1_CW2_GET(X) (((X) >> 4) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW2_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffcf)) | ((Y) << 4))
+
+/* Chain Watchpoint 3 */
+#define OR1K_SPR_DEBUG_DMR1_CW3_LSB    6
+#define OR1K_SPR_DEBUG_DMR1_CW3_MSB    7
+#define OR1K_SPR_DEBUG_DMR1_CW3_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW3_MASK   OR1K_UNSIGNED(0x000000c0)
+#define OR1K_SPR_DEBUG_DMR1_CW3_GET(X) (((X) >> 6) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW3_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff3f)) | ((Y) << 6))
+
+/* Chain Watchpoint 4 */
+#define OR1K_SPR_DEBUG_DMR1_CW4_LSB    9
+#define OR1K_SPR_DEBUG_DMR1_CW4_MSB    9
+#define OR1K_SPR_DEBUG_DMR1_CW4_BITS   1
+#define OR1K_SPR_DEBUG_DMR1_CW4_MASK   OR1K_UNSIGNED(0x00000200)
+#define OR1K_SPR_DEBUG_DMR1_CW4_GET(X) (((X) >> 9) & OR1K_UNSIGNED(0x00000001))
+#define OR1K_SPR_DEBUG_DMR1_CW4_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((Y) << 9))
+
+/* Chain Watchpoint 5 */
+#define OR1K_SPR_DEBUG_DMR1_CW5_LSB    10
+#define OR1K_SPR_DEBUG_DMR1_CW5_MSB    11
+#define OR1K_SPR_DEBUG_DMR1_CW5_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW5_MASK   OR1K_UNSIGNED(0x00000c00)
+#define OR1K_SPR_DEBUG_DMR1_CW5_GET(X) (((X) >> 10) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW5_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff3ff)) | ((Y) << 10))
+
+/* Chain Watchpoint 6 */
+#define OR1K_SPR_DEBUG_DMR1_CW6_LSB    12
+#define OR1K_SPR_DEBUG_DMR1_CW6_MSB    13
+#define OR1K_SPR_DEBUG_DMR1_CW6_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW6_MASK   OR1K_UNSIGNED(0x00003000)
+#define OR1K_SPR_DEBUG_DMR1_CW6_GET(X) (((X) >> 12) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW6_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffcfff)) | ((Y) << 12))
+
+/* Chain Watchpoint 7 */
+#define OR1K_SPR_DEBUG_DMR1_CW7_LSB    14
+#define OR1K_SPR_DEBUG_DMR1_CW7_MSB    15
+#define OR1K_SPR_DEBUG_DMR1_CW7_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW7_MASK   OR1K_UNSIGNED(0x0000c000)
+#define OR1K_SPR_DEBUG_DMR1_CW7_GET(X) (((X) >> 14) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW7_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffff3fff)) | ((Y) << 14))
+
+/* Chain Watchpoint 8 */
+#define OR1K_SPR_DEBUG_DMR1_CW8_LSB    16
+#define OR1K_SPR_DEBUG_DMR1_CW8_MSB    17
+#define OR1K_SPR_DEBUG_DMR1_CW8_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW8_MASK   OR1K_UNSIGNED(0x00030000)
+#define OR1K_SPR_DEBUG_DMR1_CW8_GET(X) (((X) >> 16) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW8_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffcffff)) | ((Y) << 16))
+
+/* Chain Watchpoint 9 */
+#define OR1K_SPR_DEBUG_DMR1_CW9_LSB    18
+#define OR1K_SPR_DEBUG_DMR1_CW9_MSB    19
+#define OR1K_SPR_DEBUG_DMR1_CW9_BITS   2
+#define OR1K_SPR_DEBUG_DMR1_CW9_MASK   OR1K_UNSIGNED(0x000c0000)
+#define OR1K_SPR_DEBUG_DMR1_CW9_GET(X) (((X) >> 18) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_DEBUG_DMR1_CW9_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfff3ffff)) | ((Y) << 18))
+
+/* Single-step Trace */
+#define OR1K_SPR_DEBUG_DMR1_ST_OFFSET 22
+#define OR1K_SPR_DEBUG_DMR1_ST_MASK   0x00400000
+#define OR1K_SPR_DEBUG_DMR1_ST_GET(X) (((X) >> 22) & 0x1)
+#define OR1K_SPR_DEBUG_DMR1_ST_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffbfffff)) | ((!!(Y)) << 22))
+
+/* Branch Trace */
+#define OR1K_SPR_DEBUG_DMR1_BT_OFFSET 23
+#define OR1K_SPR_DEBUG_DMR1_BT_MASK   0x00800000
+#define OR1K_SPR_DEBUG_DMR1_BT_GET(X) (((X) >> 23) & 0x1)
+#define OR1K_SPR_DEBUG_DMR1_BT_SET(X, Y) (((X) & OR1K_UNSIGNED(0xff7fffff)) | ((!!(Y)) << 23))
+
+
+/* Debug Mode Register 2 */
+#define OR1K_SPR_DEBUG_DMR2_INDEX OR1K_UNSIGNED(0x011)
+#define OR1K_SPR_DEBUG_DMR2_ADDR  OR1K_UNSIGNED(0x3011)
+
+/* Watchpoint Counter Enable 0 */
+#define OR1K_SPR_DEBUG_DMR2_WCE0_OFFSET 0
+#define OR1K_SPR_DEBUG_DMR2_WCE0_MASK   0x00000001
+#define OR1K_SPR_DEBUG_DMR2_WCE0_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DEBUG_DMR2_WCE0_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Watchpoint Counter Enable 1 */
+#define OR1K_SPR_DEBUG_DMR2_WCE1_OFFSET 1
+#define OR1K_SPR_DEBUG_DMR2_WCE1_MASK   0x00000002
+#define OR1K_SPR_DEBUG_DMR2_WCE1_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DEBUG_DMR2_WCE1_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Assign Watchpoints to Counter */
+#define OR1K_SPR_DEBUG_DMR2_AWTC_LSB    2
+#define OR1K_SPR_DEBUG_DMR2_AWTC_MSB    11
+#define OR1K_SPR_DEBUG_DMR2_AWTC_BITS   10
+#define OR1K_SPR_DEBUG_DMR2_AWTC_MASK   OR1K_UNSIGNED(0x00000ffc)
+#define OR1K_SPR_DEBUG_DMR2_AWTC_GET(X) (((X) >> 2) & OR1K_UNSIGNED(0x000003ff))
+#define OR1K_SPR_DEBUG_DMR2_AWTC_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff003)) | ((Y) << 2))
+
+/* Watchpoints Generating Breakpoint */
+#define OR1K_SPR_DEBUG_DMR2_WGB_LSB    12
+#define OR1K_SPR_DEBUG_DMR2_WGB_MSB    21
+#define OR1K_SPR_DEBUG_DMR2_WGB_BITS   10
+#define OR1K_SPR_DEBUG_DMR2_WGB_MASK   OR1K_UNSIGNED(0x003ff000)
+#define OR1K_SPR_DEBUG_DMR2_WGB_GET(X) (((X) >> 12) & OR1K_UNSIGNED(0x000003ff))
+#define OR1K_SPR_DEBUG_DMR2_WGB_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffc00fff)) | ((Y) << 12))
+
+/* Watchpoints Breakpoint Status */
+#define OR1K_SPR_DEBUG_DMR2_WBS_LSB    22
+#define OR1K_SPR_DEBUG_DMR2_WBS_MSB    31
+#define OR1K_SPR_DEBUG_DMR2_WBS_BITS   10
+#define OR1K_SPR_DEBUG_DMR2_WBS_MASK   OR1K_UNSIGNED(0xffc00000)
+#define OR1K_SPR_DEBUG_DMR2_WBS_GET(X) (((X) >> 22) & OR1K_UNSIGNED(0x000003ff))
+#define OR1K_SPR_DEBUG_DMR2_WBS_SET(X, Y) (((X) & OR1K_UNSIGNED(0x003fffff)) | ((Y) << 22))
+
+
+/* Debug Watchpoint Counter Registers */
+#define OR1K_SPR_DEBUG_DCWR_BASE     OR1K_UNSIGNED(0x012)
+#define OR1K_SPR_DEBUG_DCWR_COUNT    OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_DEBUG_DCWR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_DEBUG_DCWR_INDEX(N) (OR1K_SPR_DEBUG_DCWR_BASE + ((N) * OR1K_SPR_DEBUG_DCWR_STEP))
+#define OR1K_SPR_DEBUG_DCWR_ADDR(N)  ((OR1K_SPR_DEBUG_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_DEBUG_DCWR_INDEX(N))
+
+
+/* Debug Stop Register */
+#define OR1K_SPR_DEBUG_DSR_INDEX OR1K_UNSIGNED(0x014)
+#define OR1K_SPR_DEBUG_DSR_ADDR  OR1K_UNSIGNED(0x3014)
+
+/* Reset Exception */
+#define OR1K_SPR_DEBUG_DSR_RSTE_OFFSET 0
+#define OR1K_SPR_DEBUG_DSR_RSTE_MASK   0x00000001
+#define OR1K_SPR_DEBUG_DSR_RSTE_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_RSTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Bus Error Exception */
+#define OR1K_SPR_DEBUG_DSR_BUSEE_OFFSET 1
+#define OR1K_SPR_DEBUG_DSR_BUSEE_MASK   0x00000002
+#define OR1K_SPR_DEBUG_DSR_BUSEE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_BUSEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Data Page Fault Exception */
+#define OR1K_SPR_DEBUG_DSR_DPFE_OFFSET 2
+#define OR1K_SPR_DEBUG_DSR_DPFE_MASK   0x00000004
+#define OR1K_SPR_DEBUG_DSR_DPFE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_DPFE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Instruction Page Fault Exception */
+#define OR1K_SPR_DEBUG_DSR_IPFE_OFFSET 3
+#define OR1K_SPR_DEBUG_DSR_IPFE_MASK   0x00000008
+#define OR1K_SPR_DEBUG_DSR_IPFE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_IPFE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Tick Timer Exception */
+#define OR1K_SPR_DEBUG_DSR_TTE_OFFSET 4
+#define OR1K_SPR_DEBUG_DSR_TTE_MASK   0x00000010
+#define OR1K_SPR_DEBUG_DSR_TTE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_TTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Alignment Exception */
+#define OR1K_SPR_DEBUG_DSR_AE_OFFSET 5
+#define OR1K_SPR_DEBUG_DSR_AE_MASK   0x00000020
+#define OR1K_SPR_DEBUG_DSR_AE_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_AE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Illegal Instruction Exception */
+#define OR1K_SPR_DEBUG_DSR_IIE_OFFSET 6
+#define OR1K_SPR_DEBUG_DSR_IIE_MASK   0x00000040
+#define OR1K_SPR_DEBUG_DSR_IIE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_IIE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Interrupt Exception */
+#define OR1K_SPR_DEBUG_DSR_INTE_OFFSET 7
+#define OR1K_SPR_DEBUG_DSR_INTE_MASK   0x00000080
+#define OR1K_SPR_DEBUG_DSR_INTE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_INTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* DTLB Miss Exception */
+#define OR1K_SPR_DEBUG_DSR_DME_OFFSET 8
+#define OR1K_SPR_DEBUG_DSR_DME_MASK   0x00000100
+#define OR1K_SPR_DEBUG_DSR_DME_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_DME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* ITLB Miss Exception */
+#define OR1K_SPR_DEBUG_DSR_IME_OFFSET 9
+#define OR1K_SPR_DEBUG_DSR_IME_MASK   0x00000200
+#define OR1K_SPR_DEBUG_DSR_IME_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_IME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Range Exception */
+#define OR1K_SPR_DEBUG_DSR_RE_OFFSET 10
+#define OR1K_SPR_DEBUG_DSR_RE_MASK   0x00000400
+#define OR1K_SPR_DEBUG_DSR_RE_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_RE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* System Call Exception */
+#define OR1K_SPR_DEBUG_DSR_SCE_OFFSET 11
+#define OR1K_SPR_DEBUG_DSR_SCE_MASK   0x00000800
+#define OR1K_SPR_DEBUG_DSR_SCE_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_SCE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Floating Point Exception */
+#define OR1K_SPR_DEBUG_DSR_FPE_OFFSET 12
+#define OR1K_SPR_DEBUG_DSR_FPE_MASK   0x00001000
+#define OR1K_SPR_DEBUG_DSR_FPE_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_FPE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Trap Exception */
+#define OR1K_SPR_DEBUG_DSR_TE_OFFSET 13
+#define OR1K_SPR_DEBUG_DSR_TE_MASK   0x00002000
+#define OR1K_SPR_DEBUG_DSR_TE_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_DEBUG_DSR_TE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+
+/* Debug Reason Register */
+#define OR1K_SPR_DEBUG_DRR_INDEX OR1K_UNSIGNED(0x015)
+#define OR1K_SPR_DEBUG_DRR_ADDR  OR1K_UNSIGNED(0x3015)
+
+/* Reset Exception */
+#define OR1K_SPR_DEBUG_DRR_RSTE_OFFSET 0
+#define OR1K_SPR_DEBUG_DRR_RSTE_MASK   0x00000001
+#define OR1K_SPR_DEBUG_DRR_RSTE_GET(X) (((X) >> 0) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_RSTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffe)) | ((!!(Y)) << 0))
+
+/* Bus Error Exception */
+#define OR1K_SPR_DEBUG_DRR_BUSEE_OFFSET 1
+#define OR1K_SPR_DEBUG_DRR_BUSEE_MASK   0x00000002
+#define OR1K_SPR_DEBUG_DRR_BUSEE_GET(X) (((X) >> 1) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_BUSEE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffd)) | ((!!(Y)) << 1))
+
+/* Data Page Fault Exception */
+#define OR1K_SPR_DEBUG_DRR_DPFE_OFFSET 2
+#define OR1K_SPR_DEBUG_DRR_DPFE_MASK   0x00000004
+#define OR1K_SPR_DEBUG_DRR_DPFE_GET(X) (((X) >> 2) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_DPFE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffffb)) | ((!!(Y)) << 2))
+
+/* Instruction Page Fault Exception */
+#define OR1K_SPR_DEBUG_DRR_IPFE_OFFSET 3
+#define OR1K_SPR_DEBUG_DRR_IPFE_MASK   0x00000008
+#define OR1K_SPR_DEBUG_DRR_IPFE_GET(X) (((X) >> 3) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_IPFE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffff7)) | ((!!(Y)) << 3))
+
+/* Tick Timer Exception */
+#define OR1K_SPR_DEBUG_DRR_TTE_OFFSET 4
+#define OR1K_SPR_DEBUG_DRR_TTE_MASK   0x00000010
+#define OR1K_SPR_DEBUG_DRR_TTE_GET(X) (((X) >> 4) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_TTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffef)) | ((!!(Y)) << 4))
+
+/* Alignment Exception */
+#define OR1K_SPR_DEBUG_DRR_AE_OFFSET 5
+#define OR1K_SPR_DEBUG_DRR_AE_MASK   0x00000020
+#define OR1K_SPR_DEBUG_DRR_AE_GET(X) (((X) >> 5) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_AE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffdf)) | ((!!(Y)) << 5))
+
+/* Illegal Instruction Exception */
+#define OR1K_SPR_DEBUG_DRR_IIE_OFFSET 6
+#define OR1K_SPR_DEBUG_DRR_IIE_MASK   0x00000040
+#define OR1K_SPR_DEBUG_DRR_IIE_GET(X) (((X) >> 6) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_IIE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffffbf)) | ((!!(Y)) << 6))
+
+/* Interrupt Exception */
+#define OR1K_SPR_DEBUG_DRR_INTE_OFFSET 7
+#define OR1K_SPR_DEBUG_DRR_INTE_MASK   0x00000080
+#define OR1K_SPR_DEBUG_DRR_INTE_GET(X) (((X) >> 7) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_INTE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffff7f)) | ((!!(Y)) << 7))
+
+/* DTLB Miss Exception */
+#define OR1K_SPR_DEBUG_DRR_DME_OFFSET 8
+#define OR1K_SPR_DEBUG_DRR_DME_MASK   0x00000100
+#define OR1K_SPR_DEBUG_DRR_DME_GET(X) (((X) >> 8) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_DME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffeff)) | ((!!(Y)) << 8))
+
+/* ITLB Miss Exception */
+#define OR1K_SPR_DEBUG_DRR_IME_OFFSET 9
+#define OR1K_SPR_DEBUG_DRR_IME_MASK   0x00000200
+#define OR1K_SPR_DEBUG_DRR_IME_GET(X) (((X) >> 9) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_IME_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffdff)) | ((!!(Y)) << 9))
+
+/* Range Exception */
+#define OR1K_SPR_DEBUG_DRR_RE_OFFSET 10
+#define OR1K_SPR_DEBUG_DRR_RE_MASK   0x00000400
+#define OR1K_SPR_DEBUG_DRR_RE_GET(X) (((X) >> 10) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_RE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffffbff)) | ((!!(Y)) << 10))
+
+/* System Call Exception */
+#define OR1K_SPR_DEBUG_DRR_SCE_OFFSET 11
+#define OR1K_SPR_DEBUG_DRR_SCE_MASK   0x00000800
+#define OR1K_SPR_DEBUG_DRR_SCE_GET(X) (((X) >> 11) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_SCE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xfffff7ff)) | ((!!(Y)) << 11))
+
+/* Floating Point Exception */
+#define OR1K_SPR_DEBUG_DRR_FPE_OFFSET 12
+#define OR1K_SPR_DEBUG_DRR_FPE_MASK   0x00001000
+#define OR1K_SPR_DEBUG_DRR_FPE_GET(X) (((X) >> 12) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_FPE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffefff)) | ((!!(Y)) << 12))
+
+/* Trap Exception */
+#define OR1K_SPR_DEBUG_DRR_TE_OFFSET 13
+#define OR1K_SPR_DEBUG_DRR_TE_MASK   0x00002000
+#define OR1K_SPR_DEBUG_DRR_TE_GET(X) (((X) >> 13) & 0x1)
+#define OR1K_SPR_DEBUG_DRR_TE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xffffdfff)) | ((!!(Y)) << 13))
+
+
+/******************************/
+/* Performance Counters Group */
+/******************************/
+#define OR1K_SPR_PERF_GROUP 0x07
+
+/* Performance Counters Count Registers */
+#define OR1K_SPR_PERF_PCCR_BASE     OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_PERF_PCCR_COUNT    OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_PERF_PCCR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_PERF_PCCR_INDEX(N) (OR1K_SPR_PERF_PCCR_BASE + ((N) * OR1K_SPR_PERF_PCCR_STEP))
+#define OR1K_SPR_PERF_PCCR_ADDR(N)  ((OR1K_SPR_PERF_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_PERF_PCCR_INDEX(N))
+
+
+/* Performance Counters Mode Registers */
+#define OR1K_SPR_PERF_PCMR_BASE     OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_PERF_PCMR_COUNT    OR1K_UNSIGNED(0x008)
+#define OR1K_SPR_PERF_PCMR_STEP     OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_PERF_PCMR_INDEX(N) (OR1K_SPR_PERF_PCMR_BASE + ((N) * OR1K_SPR_PERF_PCMR_STEP))
+#define OR1K_SPR_PERF_PCMR_ADDR(N)  ((OR1K_SPR_PERF_GROUP << OR1K_SPR_GROUP_LSB) | OR1K_SPR_PERF_PCMR_INDEX(N))
+
+
+/**************************/
+/* Power Management Group */
+/**************************/
+#define OR1K_SPR_POWER_GROUP 0x08
+
+/* Power Management Register */
+#define OR1K_SPR_POWER_PMR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_POWER_PMR_ADDR  OR1K_UNSIGNED(0x4000)
+
+
+/*******************************************/
+/* Programmable Interrupt Controller Group */
+/*******************************************/
+#define OR1K_SPR_PIC_GROUP 0x09
+
+/* PIC Mask Register */
+#define OR1K_SPR_PIC_PICMR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_PIC_PICMR_ADDR  OR1K_UNSIGNED(0x4800)
+
+
+/* PIC Status Register */
+#define OR1K_SPR_PIC_PICSR_INDEX OR1K_UNSIGNED(0x002)
+#define OR1K_SPR_PIC_PICSR_ADDR  OR1K_UNSIGNED(0x4802)
+
+
+/********************/
+/* Tick Timer Group */
+/********************/
+#define OR1K_SPR_TICK_GROUP 0x0a
+
+/* Tick Timer Mode Register */
+#define OR1K_SPR_TICK_TTMR_INDEX OR1K_UNSIGNED(0x000)
+#define OR1K_SPR_TICK_TTMR_ADDR  OR1K_UNSIGNED(0x5000)
+
+/* Time Period */
+#define OR1K_SPR_TICK_TTMR_TP_LSB    0
+#define OR1K_SPR_TICK_TTMR_TP_MSB    27
+#define OR1K_SPR_TICK_TTMR_TP_BITS   28
+#define OR1K_SPR_TICK_TTMR_TP_MASK   OR1K_UNSIGNED(0x0fffffff)
+#define OR1K_SPR_TICK_TTMR_TP_GET(X) (((X) >> 0) & OR1K_UNSIGNED(0x0fffffff))
+#define OR1K_SPR_TICK_TTMR_TP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xf0000000)) | ((Y) << 0))
+
+/* Interrupt Pending */
+#define OR1K_SPR_TICK_TTMR_IP_OFFSET 28
+#define OR1K_SPR_TICK_TTMR_IP_MASK   0x10000000
+#define OR1K_SPR_TICK_TTMR_IP_GET(X) (((X) >> 28) & 0x1)
+#define OR1K_SPR_TICK_TTMR_IP_SET(X, Y) (((X) & OR1K_UNSIGNED(0xefffffff)) | ((!!(Y)) << 28))
+
+/* Interrupt Enable */
+#define OR1K_SPR_TICK_TTMR_IE_OFFSET 29
+#define OR1K_SPR_TICK_TTMR_IE_MASK   0x20000000
+#define OR1K_SPR_TICK_TTMR_IE_GET(X) (((X) >> 29) & 0x1)
+#define OR1K_SPR_TICK_TTMR_IE_SET(X, Y) (((X) & OR1K_UNSIGNED(0xdfffffff)) | ((!!(Y)) << 29))
+
+/* Mode */
+#define OR1K_SPR_TICK_TTMR_MODE_LSB    30
+#define OR1K_SPR_TICK_TTMR_MODE_MSB    31
+#define OR1K_SPR_TICK_TTMR_MODE_BITS   2
+#define OR1K_SPR_TICK_TTMR_MODE_MASK   OR1K_UNSIGNED(0xc0000000)
+#define OR1K_SPR_TICK_TTMR_MODE_GET(X) (((X) >> 30) & OR1K_UNSIGNED(0x00000003))
+#define OR1K_SPR_TICK_TTMR_MODE_SET(X, Y) (((X) & OR1K_UNSIGNED(0x3fffffff)) | ((Y) << 30))
+
+/* Disabled */
+#define OR1K_SPR_TICK_TTMR_MODE_DISABLE 0
+/* Restart counting when TTMR[TP]==TTCR */
+#define OR1K_SPR_TICK_TTMR_MODE_RESTART 1
+/* Stop counting when TTMR[TP]==TTCR */
+#define OR1K_SPR_TICK_TTMR_MODE_STOP 2
+/* Continue counting when TTMR[TP]==TTCR */
+#define OR1K_SPR_TICK_TTMR_MODE_CONTINUE 3
+
+/* Tick Timer Count Register */
+#define OR1K_SPR_TICK_TTCR_INDEX OR1K_UNSIGNED(0x001)
+#define OR1K_SPR_TICK_TTCR_ADDR  OR1K_UNSIGNED(0x5001)
+
+
+/*****************************/
+/* Floating Point Unit Group */
+/*****************************/
+#define OR1K_SPR_FPU_GROUP 0x0b
+
+#endif
diff --git a/src/bsp/lk/arch/or1k/include/arch/or1k.h b/src/bsp/lk/arch/or1k/include/arch/or1k.h
new file mode 100644
index 0000000..50636b9
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/or1k.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/or1k-sprs.h>
+
+#define mtspr(_spr, _val)                       \
+    __asm__ __volatile__(                       \
+        "l.mtspr r0, %1, %0"                    \
+        :                                       \
+        : "K" (_spr), "r" (_val)                \
+    )
+
+#define mtspr_off(_spr, _off, _val)             \
+    __asm__ __volatile__ (                      \
+        "l.mtspr %0, %1, %2"                    \
+        :                                       \
+        : "r" (_off), "r" (_val), "K" (_spr)    \
+    )
+
+#define mfspr(_spr)                             \
+({                                              \
+    uint32_t _val;                              \
+    __asm__ __volatile__(                       \
+        "l.mfspr %0, r0, %1"                    \
+        : "=r"(_val)                            \
+        : "K" (_spr)                            \
+        );                                      \
+    _val;                                       \
+})
+
+#define mfspr_off(_spr, _off)                   \
+({                                              \
+    uint32_t _val;                              \
+    __asm__ __volatile__ (                      \
+        "l.mfspr %0, %1, %2"                    \
+        : "=r" (_val)                           \
+        : "r" (_off), "K" (_spr)                \
+        );                                      \
+    _val;                                       \
+})
+
+#ifndef ASSEMBLY
+struct or1k_iframe {
+    uint32_t r2;
+    uint32_t r3;
+    uint32_t r4;
+    uint32_t r5;
+    uint32_t r6;
+    uint32_t r7;
+    uint32_t r8;
+    uint32_t r9;
+    uint32_t r10;
+    uint32_t r11;
+    uint32_t r12;
+    uint32_t r13;
+    uint32_t r14;
+    uint32_t r15;
+    uint32_t r16;
+    uint32_t r17;
+    uint32_t r18;
+    uint32_t r19;
+    uint32_t r20;
+    uint32_t r21;
+    uint32_t r22;
+    uint32_t r23;
+    uint32_t r24;
+    uint32_t r25;
+    uint32_t r26;
+    uint32_t r27;
+    uint32_t r28;
+    uint32_t r29;
+    uint32_t r30;
+    uint32_t r31;
+    uint32_t pc;
+    uint32_t sr;
+};
+#endif // !ASSEMBLY
diff --git a/src/bsp/lk/arch/or1k/include/arch/or1k/mmu.h b/src/bsp/lk/arch/or1k/include/arch/or1k/mmu.h
new file mode 100644
index 0000000..fc448ba
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/or1k/mmu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+#define SECTION_SIZE            (16U*1024U*1024U)
+
+#define OR1K_MMU_PG_FLAGS_MASK  0x7ffU
+#define OR1K_MMU_PG_PRESENT     0x400
+#define OR1K_MMU_PG_L           0x200
+#define OR1K_MMU_PG_X           0x100
+#define OR1K_MMU_PG_W           0x080
+#define OR1K_MMU_PG_U           0x040
+#define OR1K_MMU_PG_D           0x020
+#define OR1K_MMU_PG_A           0x010
+#define OR1K_MMU_PG_WOM         0x008
+#define OR1K_MMU_PG_WBC         0x004
+#define OR1K_MMU_PG_CI          0x002
+#define OR1K_MMU_PG_CC          0x001
diff --git a/src/bsp/lk/arch/or1k/include/arch/spinlock.h b/src/bsp/lk/arch/or1k/include/arch/spinlock.h
new file mode 100644
index 0000000..5c50c5b
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/include/arch/spinlock.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <stdbool.h>
+
+#if WITH_SMP
+#error microblaze does not support SMP
+#endif
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned int spin_lock_t;
+
+typedef unsigned int spin_lock_saved_state_t;
+typedef unsigned int spin_lock_save_flags_t;
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+    /* default arm flag is to just disable plain irqs */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  0
+
+enum {
+    /* private */
+    SPIN_LOCK_STATE_RESTORE_IRQ = 1,
+};
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    spin_lock_saved_state_t state = 0;
+    if (!arch_ints_disabled()) {
+        state |= SPIN_LOCK_STATE_RESTORE_IRQ;
+        arch_disable_ints();
+    }
+    *statep = state;
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    if (old_state & SPIN_LOCK_STATE_RESTORE_IRQ)
+        arch_enable_ints();
+}
+
+
+
+
diff --git a/src/bsp/lk/arch/or1k/linker.ld b/src/bsp/lk/arch/or1k/linker.ld
new file mode 100644
index 0000000..4fe442d
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/linker.ld
@@ -0,0 +1,96 @@
+OUTPUT_FORMAT("elf32-or1k", "elf32-or1k", "elf32-or1k")
+OUTPUT_ARCH(or1k)
+
+ENTRY(_start)
+SECTIONS
+{
+    . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%;
+
+    _start = .;
+
+    /* vector table goes at 0 */
+    .vectors : AT(0) {
+        KEEP(*(.vectors))
+    }
+
+    /* text/read-only data */
+    /* set the load address to physical MEMBASE */
+    .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET% + SIZEOF(.vectors)) {
+        KEEP(*(.text.boot))
+        *(.text* .gnu.linkonce.t.*)
+    }
+
+    .interp : { *(.interp) }
+    .hash : { *(.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+    .rel.text : { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+    .rela.text : { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+    .rel.data : { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+    .rela.data : { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+    .rel.rodata : { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+    .rela.rodata : { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+    .rel.got : { *(.rel.got) }
+    .rela.got : { *(.rela.got) }
+    .rel.ctors : { *(.rel.ctors) }
+    .rela.ctors : { *(.rela.ctors) }
+    .rel.dtors : { *(.rel.dtors) }
+    .rela.dtors : { *(.rela.dtors) }
+    .rel.init : { *(.rel.init) }
+    .rela.init : { *(.rela.init) }
+    .rel.fini : { *(.rel.fini) }
+    .rela.fini : { *(.rela.fini) }
+    .rel.bss : { *(.rel.bss) }
+    .rela.bss : { *(.rela.bss) }
+    .rel.plt : { *(.rel.plt) }
+    .rela.plt : { *(.rela.plt) }
+    .init : { *(.init) }
+    .plt : { *(.plt) }
+
+    .rodata : ALIGN(4) {
+        __rodata_start = .;
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+        . = ALIGN(4);
+        __rodata_end = .;
+    }
+
+
+    .data : ALIGN(4) {
+        /* writable data  */
+        __data_start_rom = .;
+        /* in one segment binaries, the rom data address is on top of the ram data address */
+        __data_start = .;
+        *(.data .data.* .gnu.linkonce.d.*)
+        __ctor_list = .;
+        KEEP(*(.ctors .init_array))
+        __ctor_end = .;
+        __dtor_list = .;
+        KEEP(*(.dtors .fini_array))
+        __dtor_end = .;
+        *(.got*)
+        *(.dynamic)
+
+        __data_end = .;
+    }
+
+
+    /* unintialized data (in same segment as writable data) */
+    .bss : ALIGN(4) {
+        KEEP(*(.bss.prebss.*))
+        . = ALIGN(4);
+        __bss_start = .;
+        *(.bss .bss.*)
+        *(.gnu.linkonce.b.*)
+        *(COMMON)
+        . = ALIGN(4);
+        __bss_end = .;
+    }
+
+    _end = .;
+
+    . = %KERNEL_BASE% + %MEMSIZE%;
+    _end_of_ram = .;
+
+    /* Strip unnecessary stuff */
+    /DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/or1k/mmu.c b/src/bsp/lk/arch/or1k/mmu.c
new file mode 100644
index 0000000..f55f866
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/mmu.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ * Based on arch/arm/arm/mmu.c
+ * Copyright (c) 2008-2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <trace.h>
+#include <debug.h>
+#include <err.h>
+#include <string.h>
+#include <arch/mmu.h>
+#include <arch/or1k.h>
+#include <arch/or1k/mmu.h>
+#include <kernel/vm.h>
+
+#define LOCAL_TRACE 0
+
+#if WITH_KERNEL_VM
+
+uint32_t or1k_kernel_translation_table[256] __ALIGNED(8192) __SECTION(".bss.prebss.translation_table");
+
+/* Pessimistic tlb invalidation, which rather invalidate too much.
+ * TODO: make it more precise. */
+void or1k_invalidate_tlb(vaddr_t vaddr, uint count)
+{
+    uint32_t dmmucfgr = mfspr(OR1K_SPR_SYS_DMMUCFGR_ADDR);
+    uint32_t immucfgr = mfspr(OR1K_SPR_SYS_IMMUCFGR_ADDR);
+    uint32_t num_dtlb_ways = OR1K_SPR_SYS_DMMUCFGR_NTW_GET(dmmucfgr) + 1;
+    uint32_t num_dtlb_sets = 1 << OR1K_SPR_SYS_DMMUCFGR_NTS_GET(dmmucfgr);
+    uint32_t num_itlb_ways = OR1K_SPR_SYS_IMMUCFGR_NTW_GET(immucfgr) + 1;
+    uint32_t num_itlb_sets = 1 << OR1K_SPR_SYS_IMMUCFGR_NTS_GET(immucfgr);
+    uint32_t offs;
+
+    for (; count; count--) {
+        offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_dtlb_sets-1);
+        switch (num_dtlb_ways) {
+            case 4:
+                mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(3, offs), 0);
+            case 3:
+                mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(2, offs), 0);
+            case 2:
+                mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(1, offs), 0);
+            case 1:
+                mtspr_off(0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(0, offs), 0);
+        }
+
+        offs = (vaddr >> PAGE_SIZE_SHIFT) & (num_itlb_sets-1);
+        switch (num_itlb_ways) {
+            case 4:
+                mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(3, offs), 0);
+            case 3:
+                mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(2, offs), 0);
+            case 2:
+                mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(1, offs), 0);
+            case 1:
+                mtspr_off(0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(0, offs), 0);
+        }
+        vaddr += PAGE_SIZE;
+    }
+}
+
+status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
+{
+    uint index = vaddr / SECTION_SIZE;
+    uint32_t pte = or1k_kernel_translation_table[index];
+    uint32_t vmask = SECTION_SIZE-1;
+
+    if (!(pte & OR1K_MMU_PG_PRESENT))
+        return ERR_NOT_FOUND;
+
+    /* not a l1 entry */
+    if (!(pte & OR1K_MMU_PG_L)) {
+        uint32_t *l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
+        index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
+        pte = l2_table[index];
+        vmask = PAGE_SIZE-1;
+    }
+
+    if (paddr)
+        *paddr = (pte & ~OR1K_MMU_PG_FLAGS_MASK) | (vaddr & vmask);
+
+    if (flags) {
+        *flags = 0;
+        if (pte & OR1K_MMU_PG_U)
+            *flags |= ARCH_MMU_FLAG_PERM_USER;
+        if (!(pte & OR1K_MMU_PG_X))
+            *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+        if (!(pte & OR1K_MMU_PG_W))
+            *flags |= ARCH_MMU_FLAG_PERM_RO;
+        if (pte & OR1K_MMU_PG_CI)
+            *flags |= ARCH_MMU_FLAG_UNCACHED;
+    }
+
+    return NO_ERROR;
+}
+
+int arch_mmu_unmap(vaddr_t vaddr, uint count)
+{
+    LTRACEF("vaddr = 0x%x, count = %d\n", vaddr, count);
+
+    if (!IS_PAGE_ALIGNED(vaddr))
+        return ERR_INVALID_ARGS;
+
+    uint unmapped = 0;
+    while (count) {
+        uint index = vaddr / SECTION_SIZE;
+        uint32_t pte = or1k_kernel_translation_table[index];
+        if (!(pte & OR1K_MMU_PG_PRESENT)) {
+            vaddr += PAGE_SIZE;
+            count--;
+            continue;
+        }
+        /* Unmapping of l2 tables is not implemented (yet) */
+        if (!(pte & OR1K_MMU_PG_L) || !IS_ALIGNED(vaddr, SECTION_SIZE) || count < SECTION_SIZE / PAGE_SIZE)
+            PANIC_UNIMPLEMENTED;
+
+        or1k_kernel_translation_table[index] = 0;
+        or1k_invalidate_tlb(vaddr, SECTION_SIZE / PAGE_SIZE);
+        vaddr += SECTION_SIZE;
+        count -= SECTION_SIZE / PAGE_SIZE;
+        unmapped += SECTION_SIZE / PAGE_SIZE;
+    }
+
+    return unmapped;
+}
+
+int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
+{
+    uint l1_index;
+    uint32_t pte;
+    uint32_t arch_flags = 0;
+
+    LTRACEF("vaddr = 0x%x, paddr = 0x%x, count = %d, flags = 0x%x\n", vaddr, paddr, count, flags);
+
+    if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
+        return ERR_INVALID_ARGS;
+
+    if (flags & ARCH_MMU_FLAG_PERM_USER)
+        arch_flags |= OR1K_MMU_PG_U;
+    if (!(flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE))
+        arch_flags |= OR1K_MMU_PG_X;
+    if (flags & ARCH_MMU_FLAG_CACHE_MASK)
+        arch_flags |= OR1K_MMU_PG_CI;
+    if (!(flags & ARCH_MMU_FLAG_PERM_RO))
+        arch_flags |= OR1K_MMU_PG_W;
+
+    uint mapped = 0;
+    while (count) {
+        l1_index = vaddr / SECTION_SIZE;
+        if (IS_ALIGNED(vaddr, SECTION_SIZE) && IS_ALIGNED(paddr, SECTION_SIZE) && count >= SECTION_SIZE / PAGE_SIZE) {
+            or1k_kernel_translation_table[l1_index] = (paddr & ~(SECTION_SIZE-1)) | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
+            count -= SECTION_SIZE / PAGE_SIZE;
+            mapped += SECTION_SIZE / PAGE_SIZE;
+            vaddr += SECTION_SIZE;
+            paddr += SECTION_SIZE;
+            continue;
+        }
+
+        uint32_t *l2_table;
+
+        pte = or1k_kernel_translation_table[l1_index];
+
+        /* FIXME: l1 already mapped as a section */
+        if (pte & OR1K_MMU_PG_PRESENT && pte & OR1K_MMU_PG_L)
+            PANIC_UNIMPLEMENTED;
+
+        if (pte & OR1K_MMU_PG_PRESENT) {
+            l2_table = paddr_to_kvaddr(pte & ~OR1K_MMU_PG_FLAGS_MASK);
+            LTRACEF("l2_table at %p\n", l2_table);
+        } else {
+            l2_table = pmm_alloc_kpage();
+            if (!l2_table) {
+                TRACEF("failed to allocate pagetable\n");
+                return mapped;
+            }
+
+            memset(l2_table, 0, PAGE_SIZE);
+            paddr_t l2_pa = kvaddr_to_paddr(l2_table);
+            LTRACEF("allocated pagetable at %p, pa 0x%lx\n", l2_table, l2_pa);
+            or1k_kernel_translation_table[l1_index] = l2_pa | arch_flags | OR1K_MMU_PG_PRESENT;
+        }
+
+        uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
+
+        LTRACEF("l2_index = 0x%x, vaddr = 0x%x, paddr = 0x%x\n", l2_index, vaddr, paddr);
+        l2_table[l2_index] = paddr | arch_flags | OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_L;
+
+        count--;
+        mapped++;
+        vaddr += PAGE_SIZE;
+        paddr += PAGE_SIZE;
+    }
+
+    return mapped;
+}
+
+#endif /* WITH_KERNEL_VM */
diff --git a/src/bsp/lk/arch/or1k/rules.mk b/src/bsp/lk/arch/or1k/rules.mk
new file mode 100644
index 0000000..af711ad
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/rules.mk
@@ -0,0 +1,54 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/start.S \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/exceptions.c \
+	$(LOCAL_DIR)/thread.c \
+	$(LOCAL_DIR)/cache-ops.c \
+	$(LOCAL_DIR)/mmu.c \
+	$(LOCAL_DIR)/faults.c
+
+GLOBAL_DEFINES += \
+	SMP_MAX_CPUS=1
+
+# set the default toolchain to or1k elf and set a #define
+ifndef TOOLCHAIN_PREFIX
+TOOLCHAIN_PREFIX := or1k-elf-
+endif
+
+
+cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
+	then echo "$(2)"; else echo "$(3)"; fi ;)
+
+ARCH_OPTFLAGS := -O2
+
+GLOBAL_LDFLAGS += -relax
+
+KERNEL_BASE ?= $(MEMBASE)
+KERNEL_LOAD_OFFSET ?= 0
+
+GLOBAL_DEFINES += \
+	KERNEL_BASE=$(KERNEL_BASE) \
+	KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET)
+
+GLOBAL_DEFINES += \
+    MEMBASE=$(MEMBASE) \
+    MEMSIZE=$(MEMSIZE)
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/linker.ld
+
+# rules for generating the linker
+$(BUILDDIR)/linker.ld: $(LOCAL_DIR)/linker.ld $(wildcard arch/*.ld)
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/" < $< > $@
+
+LINKER_SCRIPT += $(BUILDDIR)/linker.ld
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/or1k/start.S b/src/bsp/lk/arch/or1k/start.S
new file mode 100644
index 0000000..5af80fa
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/start.S
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/ops.h>
+#include <arch/or1k/mmu.h>
+#include <kernel/vm.h>
+
+#define RED_ZONE            128
+#define EXCEPTION_FRAME     (128 + RED_ZONE)
+
+/* clobbers r9 and rd, result will be in rd */
+#define get_va_to_pa_offs(rd) \
+    l.movhi rd, hi(.+12)    ;\
+    l.jal   .+8             ;\
+     l.ori  rd, rd, lo(.+4) ;\
+    l.sub   rd, rd, r9
+
+/* clobbers r9 and rd, result will be in rd */
+#define to_phys(sym, rd) \
+    get_va_to_pa_offs(rd)   ;\
+    l.movhi r9, hi(sym)     ;\
+    l.ori   r9, r9, lo(sym) ;\
+    l.sub   rd, r9, rd
+
+.macro exception_entry
+#if WITH_KERNEL_VM
+    l.sw    0(r0), r31
+    l.sw    4(r0), r9
+    get_va_to_pa_offs(r31)
+    l.sub   r1, r1, r31
+    l.lwz   r9, 4(r0)
+#endif
+    l.addi  r1, r1, -EXCEPTION_FRAME
+    l.sw    0(r1), r2
+    l.sw    4(r1), r3
+    l.sw    8(r1), r4
+    l.sw    12(r1), r5
+    l.sw    16(r1), r6
+    l.sw    20(r1), r7
+    l.sw    24(r1), r8
+    l.sw    28(r1), r9
+    l.sw    32(r1), r10
+    l.sw    36(r1), r11
+    l.sw    40(r1), r12
+    l.sw    44(r1), r13
+    l.sw    48(r1), r14
+    l.sw    52(r1), r15
+    l.sw    56(r1), r16
+    l.sw    60(r1), r17
+    l.sw    64(r1), r18
+    l.sw    68(r1), r19
+    l.sw    72(r1), r20
+    l.sw    76(r1), r21
+    l.sw    80(r1), r22
+    l.sw    84(r1), r23
+    l.sw    88(r1), r24
+    l.sw    92(r1), r25
+    l.sw    96(r1), r26
+    l.sw    100(r1), r27
+    l.sw    104(r1), r28
+    l.sw    108(r1), r29
+    l.sw    112(r1), r30
+    l.mfspr r3, r0, OR1K_SPR_SYS_EPCR_ADDR(0)
+    l.sw    120(r1), r3
+    l.mfspr r3, r0, OR1K_SPR_SYS_ESR_ADDR(0)
+    l.sw    124(r1), r3
+#if WITH_KERNEL_VM
+    l.add   r1, r1, r31
+    l.lwz   r31, 0(r0)
+
+    /* enable dmmu and immu */
+    l.mfspr r9, r0, OR1K_SPR_SYS_SR_ADDR
+    l.ori   r9, r9, OR1K_SPR_SYS_SR_DME_MASK | OR1K_SPR_SYS_SR_IME_MASK
+    l.mtspr r0, r9, OR1K_SPR_SYS_ESR_ADDR(0)
+
+    l.movhi r9, hi(.+16)
+    l.ori   r9, r9, lo(.+12)
+    l.mtspr r0, r9, OR1K_SPR_SYS_EPCR_ADDR(0)
+    l.rfe
+#endif
+    l.sw    116(r1), r31
+.endm
+
+.section ".vectors", "ax"
+.org 0x100
+.global _reset
+_reset:
+    l.jal   start
+     l.nop
+
+.org 0x200
+bus_error_exception:
+    exception_entry
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.jal   or1k_busfault_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0x300
+data_pagefault_exception:
+    exception_entry
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.jal   or1k_data_pagefault_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0x400
+instruction_pagefault_exception:
+    exception_entry
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.jal   or1k_instruction_pagefault_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0x500
+tick_timer_exception:
+    exception_entry
+    l.jal   or1k_tick
+     l.nop
+    l.j return_from_exception
+     l.nop
+
+.org 0x600
+alignment_exception:
+    exception_entry
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.jal   or1k_alignment_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0x700
+illegal_instruction_exception:
+    exception_entry
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.jal   or1k_illegal_instruction_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0x800
+external_interrupt_exception:
+    exception_entry
+    l.jal   or1k_irq
+     l.nop
+    l.j return_from_exception
+     l.nop
+
+.org 0x900
+dtlb_miss_exception:
+#if WITH_KERNEL_VM
+    l.sw    0(r0), r3
+    l.sw    4(r0), r4
+    l.sw    8(r0), r9
+
+    to_phys(or1k_kernel_translation_table, r3)
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    /* l1 index */
+    l.srli  r9, r4, 24
+    l.slli  r9, r9, 2
+
+    l.add   r3, r3, r9
+
+    l.lwz   r3, 0(r3) /* l1 entry */
+    l.andi  r9, r3, OR1K_MMU_PG_PRESENT
+    l.sfnei r9, OR1K_MMU_PG_PRESENT
+    l.bf    dtlb_miss_fault
+     l.andi r9, r3, OR1K_MMU_PG_L
+
+    l.sfeqi r9, OR1K_MMU_PG_L
+    /* l2_index */
+    l.srli  r4, r4, 13
+    l.bf    1f
+     l.andi r4, r4, 0x7ff
+    l.slli  r4, r4, 2
+    l.addi  r9, r0, 0xffffe000 /* PAGE_SIZE-1 */
+    l.and   r9, r3, r9
+    l.add   r9, r9, r4
+    l.j 2f
+     l.lwz  r9, 0(r9) /* l2 entry */
+
+/* use bits [23:13] from EEAR */
+1:  l.slli  r4, r4, 13
+    l.or    r9, r3, r4
+
+2:  l.ori   r3,r0,0xf351    /* sw emulation of dmmupr */
+    l.srli  r4,r9,4         /* get PP Index * 4 */
+    l.andi  r4,r4,0xc       /* mask everything but PPI (without X) (& 0b01100)*/
+    l.srl   r3,r3,r4        /* get protection bits from "dmmupr" */
+    /*
+    * The protection bits are unconvienently the "wrong" way in DMMUPR
+    * compared to DTLBR (UWE|URE|SWE|SRE vs SWE|SRE|UWE|URE), so we have
+    * to swap their places...
+    */
+    l.andi  r4,r3,0x3       /* SWE|SRE */
+    l.slli  r4,r4,8         /* 1:0 -> 9:8 */
+    l.andi  r3,r3,0xc       /* UWE|URE */
+    l.slli  r3,r3,4         /* 3:2 -> 7:6 */
+    l.or    r3,r3,r4
+
+    l.addi  r4,r0,0xffffe03f /* protection bit mask */
+    l.and   r4,r9,r4        /* apply the mask */
+    l.or    r9,r4,r3        /* apply protection bits */
+
+    l.mfspr r3, r0, OR1K_SPR_SYS_DMMUCFGR_ADDR
+    l.slli  r3, r3, 31-OR1K_SPR_SYS_DMMUCFGR_NTS_MSB
+    l.srli  r3, r3, 31-OR1K_SPR_SYS_DMMUCFGR_NTS_LSB
+    l.ori   r4, r0, 1
+    l.sll   r3, r4, r3
+    l.addi  r3, r3, -1
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.srli  r4, r4, 13
+    l.and   r3, r4, r3
+    l.mtspr r3, r9, OR1K_SPR_DMMU_DTLBW_TR_ADDR(0,0)
+    l.slli  r4, r4, 13
+    l.ori   r4, r4, OR1K_SPR_DMMU_DTLBW_MR_V_MASK
+    l.mtspr r3, r4, OR1K_SPR_DMMU_DTLBW_MR_ADDR(0,0)
+
+    l.lwz   r3, 0(r0)
+    l.lwz   r4, 4(r0)
+    l.lwz   r9, 8(r0)
+    l.rfe
+#endif /* WITH_KERNEL_VM */
+
+dtlb_miss_fault:
+    l.lwz   r3, 0(r0)
+    l.lwz   r4, 4(r0)
+    l.j     data_pagefault_exception
+     l.lwz   r9, 8(r0)
+
+.org 0xa00
+itlb_miss_exception:
+#if WITH_KERNEL_VM
+    l.sw    0(r0), r3
+    l.sw    4(r0), r4
+    l.sw    8(r0), r9
+
+    to_phys(or1k_kernel_translation_table, r3)
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    /* l1 index */
+    l.srli  r9, r4, 24
+    l.slli  r9, r9, 2
+
+    l.add   r3, r3, r9
+    l.lwz   r3, 0(r3) /* l1 entry */
+
+    l.andi  r9, r3, OR1K_MMU_PG_PRESENT
+    l.sfnei r9, OR1K_MMU_PG_PRESENT
+    l.bf    itlb_miss_fault
+     l.andi r9, r3, OR1K_MMU_PG_L
+    l.sfeqi r9, OR1K_MMU_PG_L
+    /* l2 index */
+    l.srli  r4, r4, 13
+    l.bf    1f
+     l.andi r4, r4, 0x7ff
+
+    l.slli  r4, r4, 2
+    l.addi  r9, r0, 0xffffe000 /* PAGE_SIZE-1 */
+    l.and   r9, r3, r9
+    l.add   r9, r9, r4
+    l.j 2f
+     l.lwz  r9, 0(r9) /* l2 entry */
+
+    /* use bits [23:13] from EEAR */
+1:  l.slli  r4, r4, 13
+    l.or    r9, r3, r4
+
+2:  l.ori   r3, r0, 0xd00   /* sw emulation of immupr */
+    l.srli  r4, r9, 5       /* get PP Index * 2 */
+    l.andi  r4, r4, 0xa     /* mask everything but PPI (without W) (& 0b1010)*/
+    l.srl   r3, r3, r4      /* get protection bits from "immupr" */
+    l.andi  r3, r3, 0x3     /* mask everything else out */
+    l.slli  r3, r3, 6       /* and put them in their spot */
+    l.addi  r4, r0, 0xffffe03f /* protection bit mask */
+    l.and   r4, r9, r4      /* apply the mask */
+    l.or    r9, r4, r3      /* apply protection bits */
+
+    l.mfspr r3, r0, OR1K_SPR_SYS_IMMUCFGR_ADDR
+    l.slli  r3, r3, 31-OR1K_SPR_SYS_IMMUCFGR_NTS_MSB
+    l.srli  r3, r3, 31-OR1K_SPR_SYS_IMMUCFGR_NTS_LSB
+    l.ori   r4, r0, 1
+    l.sll   r3, r4, r3
+    l.addi  r3, r3, -1
+    l.mfspr r4, r0, OR1K_SPR_SYS_EEAR_ADDR(0)
+    l.srli  r4, r4, 13
+    l.and   r3, r4, r3
+    l.mtspr r3, r9, OR1K_SPR_IMMU_ITLBW_TR_ADDR(0,0)
+
+    l.slli  r4, r4, 13
+    l.ori   r4, r4, OR1K_SPR_IMMU_ITLBW_MR_V_MASK
+    l.mtspr r3, r4, OR1K_SPR_IMMU_ITLBW_MR_ADDR(0,0)
+
+    l.lwz   r3, 0(r0)
+    l.lwz   r4, 4(r0)
+    l.lwz   r9, 8(r0)
+    l.rfe
+#endif /* WITH_KERNEL_VM */
+
+itlb_miss_fault:
+    l.lwz   r3, 0(r0)
+    l.lwz   r4, 4(r0)
+    l.j     instruction_pagefault_exception
+     l.lwz   r9, 8(r0)
+
+.org 0xb00
+range_exception:
+    exception_entry
+    l.ori   r4, r0, 0xb00
+    l.jal   or1k_unhandled_exception
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0xc00
+syscall_exception:
+    exception_entry
+    l.jal   or1k_syscall_handler
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0xd00
+fpu_exception:
+    exception_entry
+    l.ori   r4, r0, 0xd00
+    l.jal   or1k_unhandled_exception
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.org 0xe00
+trap_exception:
+    exception_entry
+    l.ori   r4, r0, 0xe00
+    l.jal   or1k_unhandled_exception
+     l.ori  r3, r1, 0
+    l.j return_from_exception
+     l.nop
+
+.section ".text.boot"
+FUNCTION(start)
+    /* set stack pointer to point at top of default stack */
+    l.movhi r1, hi(default_stack_top)
+    l.ori   r1, r1, lo(default_stack_top)
+
+#if WITH_KERNEL_VM
+    /* invalidate tlbs */
+    l.ori   r3, r0, OR1K_SPR_DMMU_DTLBW_MR_ADDR(0, 0)
+    l.ori   r4, r0, OR1K_SPR_IMMU_ITLBW_MR_ADDR(0, 0)
+    l.addi  r6, r0, 3 /* Maximum number of ways - 1 */
+
+1:  l.addi  r5, r0, 127 /* Maximum number of sets - 1 */
+2:  l.mtspr r3, r0, 0x0
+    l.mtspr r4, r0, 0x0
+
+    l.addi  r3, r3, 1
+    l.addi  r4, r4, 1
+    l.sfeq  r5, r0
+    l.bnf   2b
+     l.addi r5, r5, -1
+
+    l.addi  r3, r3, 128
+    l.addi  r4, r4, 128
+
+    l.sfeq  r6, r0
+    l.bnf   1b
+     l.addi r6, r6, -1
+
+    /* setup initial mappings */
+    get_va_to_pa_offs(r3)
+    l.movhi r4, hi(or1k_kernel_translation_table)
+    l.ori   r4, r4, lo(or1k_kernel_translation_table)
+    l.sub   r4, r4, r3 /* to phys */
+    l.movhi r5, hi(mmu_initial_mappings)
+    l.ori   r5, r5, lo(mmu_initial_mappings)
+    l.sub   r5, r5, r3 /* to phys */
+
+    /* clear the translation table */
+    l.addi  r3, r4, 255*4
+0:  l.sw    0(r3), r0
+    l.sfeq  r3, r4
+    l.bnf   0b
+     l.addi r3, r3, -4
+
+1:  l.lwz   r6, 0(r5) /* phys */
+    l.lwz   r7, 4(r5) /* virt */
+    l.lwz   r8, 8(r5) /* size */
+    l.lwz   r9, 12(r5) /* flags */
+    l.lwz   r10, 16(r5) /* name */
+    l.addi  r5, r5, 20
+
+    /* divide with 16MB */
+    l.srli  r6, r6, 24
+    l.srli  r7, r7, 24
+    l.srli  r8, r8, 24
+
+    l.sfeqi r8, 0
+    l.bf    .Linitial_mapping_done
+     l.nop
+
+2:  l.slli  r3, r7, 2
+    l.add   r3, r4, r3
+    l.slli  r10, r6, 24
+    l.ori   r10, r10, OR1K_MMU_PG_PRESENT | OR1K_MMU_PG_X | OR1K_MMU_PG_W | OR1K_MMU_PG_L
+    l.sfeqi r9, MMU_INITIAL_MAPPING_FLAG_UNCACHED
+    l.bf    3f
+     l.sfeqi r9, MMU_INITIAL_MAPPING_FLAG_DEVICE
+    l.bnf   4f
+     l.nop
+3:  l.ori   r10, r10, OR1K_MMU_PG_CI
+4:  l.sw    0(r3), r10
+    l.addi  r6, r6, 1
+    l.addi  r8, r8, -1
+    l.sfeqi r8, 0
+    l.bnf   2b
+     l.addi r7, r7, 1
+
+    l.j 1b
+     l.nop
+
+.Linitial_mapping_done:
+    /* enable mmu */
+    l.mfspr r3, r0, OR1K_SPR_SYS_SR_ADDR
+    l.ori   r3, r3, OR1K_SPR_SYS_SR_DME_MASK | OR1K_SPR_SYS_SR_IME_MASK
+    l.mtspr r0, r3, OR1K_SPR_SYS_ESR_ADDR(0)
+    /* setup pc to use virtual addresses */
+    l.movhi r3, hi(.+16)
+    l.ori   r3, r3, lo(.+12)
+    l.mtspr r0, r3, OR1K_SPR_SYS_EPCR_ADDR(0)
+    l.rfe
+#endif
+
+    /* invalidate and enable caches */
+    l.jal   arch_invalidate_cache_all
+    l.nop
+    l.jal   arch_enable_cache
+    l.ori  r3, r0, UCACHE
+
+    /* clear bss */
+    l.movhi r3, hi(__bss_start)
+    l.ori   r3, r3, lo(__bss_start)
+    l.movhi r4, hi(__bss_end)
+    l.ori   r4, r4, lo(__bss_end)
+1:  l.sw    0(r3), r0
+    l.sfltu r3, r4
+    l.bf    1b
+     l.addi r3, r3, 4
+
+    /* arguments to main */
+    l.ori   r3, r0, 1
+    l.ori   r4, r0, 2
+    l.ori   r5, r0, 3
+    l.jal   lk_main
+     l.ori  r6, r0, 4
+
+    /* shouldn't happen, but loop if it does */
+    l.j 0
+     l.nop
+
+FUNCTION(return_from_exception)
+    l.lwz   r3, 120(r1)
+    l.mtspr r0, r3, OR1K_SPR_SYS_EPCR_BASE
+    l.lwz   r3, 124(r1)
+    l.mtspr r0, r3, OR1K_SPR_SYS_ESR_BASE
+    l.lwz   r2, 0(r1)
+    l.lwz   r3, 4(r1)
+    l.lwz   r4, 8(r1)
+    l.lwz   r5, 12(r1)
+    l.lwz   r6, 16(r1)
+    l.lwz   r7, 20(r1)
+    l.lwz   r8, 24(r1)
+    l.lwz   r9, 28(r1)
+    l.lwz   r10, 32(r1)
+    l.lwz   r11, 36(r1)
+    l.lwz   r12, 40(r1)
+    l.lwz   r13, 44(r1)
+    l.lwz   r14, 48(r1)
+    l.lwz   r15, 52(r1)
+    l.lwz   r16, 56(r1)
+    l.lwz   r17, 60(r1)
+    l.lwz   r18, 64(r1)
+    l.lwz   r19, 68(r1)
+    l.lwz   r20, 72(r1)
+    l.lwz   r21, 76(r1)
+    l.lwz   r22, 80(r1)
+    l.lwz   r23, 84(r1)
+    l.lwz   r24, 88(r1)
+    l.lwz   r25, 92(r1)
+    l.lwz   r26, 96(r1)
+    l.lwz   r27, 100(r1)
+    l.lwz   r28, 104(r1)
+    l.lwz   r29, 108(r1)
+    l.lwz   r30, 112(r1)
+    l.lwz   r31, 116(r1)
+    l.addi  r1, r1, EXCEPTION_FRAME
+    l.rfe
+
+.section ".bss"
+.align 8
+LOCAL_DATA(default_stack)
+.skip 8192
+LOCAL_DATA(default_stack_top)
diff --git a/src/bsp/lk/arch/or1k/thread.c b/src/bsp/lk/arch/or1k/thread.c
new file mode 100644
index 0000000..df776db
--- /dev/null
+++ b/src/bsp/lk/arch/or1k/thread.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 Stefan Kristiansson
+ * Based on arch/microblaze/thread.c
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <trace.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <kernel/thread.h>
+
+#define LOCAL_TRACE 0
+
+struct thread *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    thread_t *ct = get_current_thread();
+
+#if LOCAL_TRACE
+    LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
+    dump_thread(ct);
+#endif
+
+    /* exit the implicit critical section we're within */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    int ret = ct->entry(ct->arg);
+
+    LTRACEF("thread %p exiting with %d\n", ct, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    LTRACEF("t %p (%s)\n", t, t->name);
+
+    /* some registers we want to clone for the new thread */
+    register uint32_t r2 asm("r2");
+
+    /* zero out the thread context */
+    memset(&t->arch.cs_frame, 0, sizeof(t->arch.cs_frame));
+
+    t->arch.cs_frame.r1 = (vaddr_t)t->stack + t->stack_size;
+    t->arch.cs_frame.r2 = r2;
+    t->arch.cs_frame.r9 = (vaddr_t)initial_thread_func;
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+    LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
+
+    or1k_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%x\n", t->arch.cs_frame.r1);
+    }
+}
diff --git a/src/bsp/lk/arch/x86-64/arch.c b/src/bsp/lk/arch/x86-64/arch.c
new file mode 100755
index 0000000..4bfdafe
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/arch.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/x86.h>
+#include <arch/x86/mmu.h>
+#include <arch/x86/descriptor.h>
+#include <arch/fpu.h>
+#include <platform.h>
+#include <sys/types.h>
+#include <string.h>
+
+static tss_t system_tss;
+
+void arch_early_init(void)
+{
+    /* enable caches here for now */
+    clear_in_cr0(X86_CR0_NW | X86_CR0_CD);
+
+    memset(&system_tss, 0, sizeof(tss_t));
+
+    set_global_desc(TSS_SELECTOR, &system_tss, sizeof(tss_t), 1, 0, 0, SEG_TYPE_TSS, 0, 0);
+    x86_ltr(TSS_SELECTOR);
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
+
+void arch_init(void)
+{
+#if X86_WITH_FPU
+    fpu_init();
+#endif
+}
diff --git a/src/bsp/lk/arch/x86-64/asm.S b/src/bsp/lk/arch/x86-64/asm.S
new file mode 100644
index 0000000..2443023
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/asm.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+/* void x86_64_context_switch(uint64_t *oldsp, uint64_t newsp) */
+FUNCTION(x86_64_context_switch)
+    /* save the old context and restore the new */
+    pushf
+    pushq %rbx
+    pushq %rbp
+    pushq %r12
+    pushq %r13
+    pushq %r14
+    pushq %r15
+
+    movq %rsp,(%rdi)
+    movq %rsi,%rsp
+
+    popq %r15
+    popq %r14
+    popq %r13
+    popq %r12
+    popq %rbp
+    popq %rbx
+    popf
+
+    retq
+
diff --git a/src/bsp/lk/arch/x86-64/cache-ops.S b/src/bsp/lk/arch/x86-64/cache-ops.S
new file mode 100644
index 0000000..aa62593
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/cache-ops.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/ops.h>
+#include <arch/defines.h>
+
+.text
+
+/* stubs */
+
+FUNCTION(arch_disable_cache)
+    ret
+
+FUNCTION(arch_enable_cache)
+    ret
+
+FUNCTION(arch_clean_cache_range)
+    ret
+
+FUNCTION(arch_clean_invalidate_cache_range)
+    ret
+
diff --git a/src/bsp/lk/arch/x86-64/cache.c b/src/bsp/lk/arch/x86-64/cache.c
new file mode 100644
index 0000000..c1a876a
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/cache.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/ops.h>
+
+/* nothing to do to sync I & D cache on x86-64 */
+void arch_sync_cache_range(addr_t start, size_t len)
+{
+}
diff --git a/src/bsp/lk/arch/x86-64/crt0.S b/src/bsp/lk/arch/x86-64/crt0.S
new file mode 100644
index 0000000..789dbfd
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/crt0.S
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* The magic number for the Multiboot header. */
+#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
+
+/* The flags for the Multiboot header. */
+#if defined(__ELF__) && 0
+#define MULTIBOOT_HEADER_FLAGS 0x00000002
+#else
+#define MULTIBOOT_HEADER_FLAGS 0x00010002
+#endif
+
+/* The magic number passed by a Multiboot-compliant boot loader. */
+#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002
+
+#define NUM_INT 0x31
+#define NUM_EXC 0x14
+
+#define MSR_EFER 0xc0000080
+#define EFER_LME 0x00000100
+
+.section ".text.boot"
+.code32
+.global _start
+_start:
+    jmp real_start
+
+.align 8
+
+.type multiboot_header,STT_OBJECT
+multiboot_header:
+    /* magic */
+    .int MULTIBOOT_HEADER_MAGIC
+    /* flags */
+    .int MULTIBOOT_HEADER_FLAGS
+    /* checksum */
+    .int -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
+
+#if !defined(__ELF__) || 1
+    /* header_addr */
+    .int multiboot_header
+    /* load_addr */
+    .int _start
+    /* load_end_addr */
+    .int __data_end
+    /* bss_end_addr */
+    .int __bss_end
+    /* entry_addr */
+    .int real_start
+#endif
+
+real_start:
+    cmpl $MULTIBOOT_BOOTLOADER_MAGIC, %eax
+    jne 0f
+    movl %ebx, (_multiboot_info)
+0:
+    /* setup isr stub descriptors in the idt */
+    movl $_isr, %esi
+    movl $_idt, %edi
+    movl $NUM_INT, %ecx
+
+.Lloop:
+    movl %esi, %ebx
+    movw %bx, (%edi)        /* low word in IDT(n).low */
+    shrl $16, %ebx
+    movw %bx, 6(%edi)       /* high word in IDT(n).high */
+    shrl $16, %ebx
+    movl $0, %ebx
+    movl %ebx, 8(%edi)
+    shrl $16, %ebx
+    movl %ebx, 12(%edi)
+    shrl $16, %ebx
+
+    addl $isr_stub_len, %esi    /* index the next ISR stub */
+    addl $16, %edi          /* index the next IDT entry */
+
+
+    loop .Lloop
+
+
+    lidt _idtr
+    xorl %eax, %eax
+    mov %eax, %cr3
+
+    lgdt _gdtr
+
+
+    movw $datasel, %ax
+    movw %ax, %ds
+    movw %ax, %es
+    movw %ax, %fs
+    movw %ax, %ss
+    movw %ax, %gs
+    movw %ax, %ss
+
+    movl $_kstack, %esp
+
+    /* We need to jump to our sane 32 bit  CS
+     executing a far jump using retf */
+    movl $codesel_32, %ecx
+        pushl %ecx
+        movl $farjump, %ecx
+        pushl %ecx
+        xorl %ecx, %ecx
+        retf
+
+farjump:
+    movl $tsssel, %eax
+    ltr %ax
+    /* zero the bss section */
+    movl $__bss_start, %edi /* starting address of the bss */
+    movl $__bss_end, %ecx   /* find the length of the bss in bytes */
+    subl %edi, %ecx
+    shrl $2, %ecx           /* convert to 32 bit words, since the bss is aligned anyway */
+2:
+    movl $0, (%edi)
+    addl $4, %edi
+    loop 2b
+
+
+    /* Preparing 64 bit paging, we will use 2MB pages covering 1GB
+    for initial bootstrap, this page table will be 1 to 1  */
+
+    /* PAE bit must be enabled  for 64 bit paging*/
+    mov %cr4, %eax
+    btsl $(5), %eax
+    mov %eax, %cr4  /*Enabling PAE*/
+
+    movl $pml4, %eax
+    mov %eax, %cr3
+
+    /* Long Mode Enabled at this point*/
+    movl $MSR_EFER ,%ecx
+    rdmsr
+    orl $EFER_LME,%eax
+    wrmsr
+
+    /* Setting the First PML4E with a PDP table reference*/
+    xorl %eax,  %eax
+    movl $pdp,   %eax
+    orl  $0x7, %eax
+    movl %eax, (pml4)
+
+    /* Setting the First PDPTE with a Page table reference*/
+    xorl %eax, %eax
+    movl $pte, %eax
+    orl  $0x7, %eax
+    movl %eax, (pdp)
+
+    movl $pte, %esi
+    movl $0x1ff, %ecx
+
+    /* We need 256 entries of 2MB each to cover 1GB */
+fill_pte:
+    movl $0x1ff, %eax
+    subl %ecx, %eax
+    shll $21,%eax
+    orl  $0x87, %eax
+    movl %eax, (%esi)
+    addl $8,%esi
+    loop fill_pte
+
+    /* Enabling Paging and from this point we are in
+    32 bit compatibility mode*/
+    mov %cr0,  %eax
+    btsl $(31), %eax
+    mov %eax,  %cr0
+
+    /* Flushing TLB's */
+    mov %cr3,%eax
+    mov %eax,%cr3
+
+    /* Using another long jump to be on 64 bit mode
+    after this we will be on real 64 bit mode */
+    movl $codesel_64, %ecx     /*Need to put it in a the right CS*/
+    pushl %ecx
+    movl $farjump64, %ecx
+    pushl %ecx
+    retf
+
+.align 8
+.code64
+farjump64:
+    lidt _idtr
+    /* call the main module */
+    call lk_main
+
+0:                          /* just sit around waiting for interrupts */
+    hlt                     /* interrupts will unhalt the processor */
+    pause
+    jmp 0b                  /* so jump back to halt to conserve power */
+
+/* interrupt service routine stubs */
+_isr:
+.set i, 0
+.rept NUM_INT
+
+.set isr_stub_start, .
+
+.if i == 8 || (i >= 10 && i <= 14) || i == 17
+        nop                                     /* error code pushed by exception */
+        nop                                     /* 2 nops are the same length as push byte */
+        pushq $i                                /* interrupt number */
+        jmp interrupt_common
+.else
+        pushq $0                                /* fill in error code in iframe */
+        pushq $i                                /* interrupt number */
+        jmp interrupt_common
+.endif
+
+/* figure out the length of a single isr stub (usually 6 or 9 bytes) */
+.set isr_stub_len, . - isr_stub_start
+
+.set i, i + 1
+.endr
+
+/* annoying, but force AS to use the same (longer) encoding of jmp for all of the stubs */
+.fill 256
+
+interrupt_common:
+
+    /* save general purpose registers */
+    pushq %r15
+    pushq %r14
+    pushq %r13
+    pushq %r12
+    pushq %r11
+    pushq %r10
+    pushq %r9
+    pushq %r8
+    pushq %rax
+    pushq %rcx
+    pushq %rdx
+    pushq %rbx
+    pushq %rbp
+    pushq %rsi
+    pushq %rdi
+
+    /* store stack switch pivot.
+           push rsp has errata on some cpus, so use mov/push */
+    movq %rsp, %rax
+    pushq %rax
+    movq %rsp, %rdi     /* pass the  iframe using rdi */
+
+    call platform_irq
+
+    cmpq $0,%rax
+    je 0f
+    call thread_preempt
+
+0:
+    /* restore task_rsp, stack switch can occur here
+           if task_rsp is modified */
+    popq %rax
+    movq %rax, %rsp
+
+    /* restore general purpose registers */
+    popq %rdi
+    popq %rsi
+    popq %rbp
+    popq %rbx
+    popq %rdx
+    popq %rcx
+    popq %rax
+    popq %r8
+    popq %r9
+    popq %r10
+    popq %r11
+    popq %r12
+    popq %r13
+    popq %r14
+    popq %r15
+
+    /* drop vector number and error code*/
+    addq $16, %rsp
+    iretq
+
+.data
+.align 8
+
+/* define the heap end as read-write data containing the default end of the
+ * heap. dynamic memory length discovery can update this value during init.
+ * other archs can define this statically based on the memory layout of the
+ * platform.
+ */
+#.global _heap_end
+#_heap_end:
+#   .int 4096*1024  /* default to 4MB total */
+
+.global _multiboot_info
+_multiboot_info:
+    .int 0
+    .int 0
+.align 8
+.global _gdtr
+_gdtr:
+    .short _gdt_end - _gdt - 1
+    .int _gdt
+.align 8
+.global _gdt
+_gdt:
+    .int 0              /* NULL Descriptor_L*/
+    .int 0              /* NULL Descriptor_H */
+/* ring 0 descriptors */
+.set codesel_32, . - _gdt
+_code_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10011010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) D(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set datasel, . - _gdt
+_data_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10010010       /* P(1) DPL(00) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set videosel, . - _gdt
+_video_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x8000           /* base 15:00 */
+    .byte  0x0b         /* base 23:16 */
+    .byte  0b10010010       /* P(1) DPL(00) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_codesel_32, . - _gdt
+_user_code_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11111010       /* P(1) DPL(11) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) D(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_datasel, . - _gdt
+_user_data_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11110010       /* P(1) DPL(11) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set codesel_64, . - _gdt
+_code_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10011010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b10101111       /* G(1) D(0) L(1) AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set datasel_64, . - _gdt
+_data_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10010010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+    .quad  0x0000000000000000
+    .quad  0x0000000000000000
+
+.set user_codesel_64, . - _gdt
+_user_code_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11111010       /* P(1) DPL(11) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b10101111       /* G(1) D(1) L(0) AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_datasel_64, . - _gdt
+_user_data_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11110010       /* P(1) DPL(11) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+/* TSS descriptor */
+.set tsssel, . - _gdt
+_tss_gde:
+    .short 0                /* limit 15:00 */
+    .short 0                /* base 15:00 */
+    .byte  0                /* base 23:16 */
+    .byte  0x89             /* P(1) DPL(11) 0 10 B(0) 1 */
+    .byte  0x80             /* G(0) 0 0 AVL(0) limit 19:16 */
+    .byte  0                /* base 31:24 */
+    .quad 0x0000000000000000
+.global _gdt_end
+_gdt_end:
+
+.align 8
+.global _idtr
+_idtr:
+    .short _idt_end - _idt - 1  /* IDT limit */
+    .long _idt
+.fill 8
+.align 8
+/* interrupt descriptor table (IDT) */
+.global _idt
+_idt:
+
+.set i, 0
+.rept NUM_INT-1
+    .short 0        /* low 16 bits of ISR offset (_isr#i & 0FFFFh) */
+    .short codesel_64   /* selector */
+    .byte  0
+    .byte  0x8e     /* present, ring 0, 64-bit interrupt gate */
+    .short  0       /* high 16 bits of ISR offset (_isr#i / 65536) */
+    .short  0       /* ISR offset */
+    .short  0       /* ISR offset */
+    .short  0       /* 32bits Reserved */
+    .short  0       /* 32bits Reserved */
+
+
+.set i, i + 1
+.endr
+
+.global _idt_end
+_idt_end:
+
+/* Memory for the initial page table, we will use 3 pages for a
+   1 to 1 mapping that covers 1GB of physycal memory */
+.align 4096
+pml4:
+.fill 4096
+pdp:
+.fill 4096
+pte:
+.fill 4096
+
+.bss
+.align 4096
+
+.global _kstack
+.fill 4096
+_kstack:
diff --git a/src/bsp/lk/arch/x86-64/descriptor.c b/src/bsp/lk/arch/x86-64/descriptor.c
new file mode 100644
index 0000000..d249986
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/descriptor.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <compiler.h>
+#include <arch/x86/descriptor.h>
+
+/* not the best way to do this, but easy for now */
+typedef struct {
+    uint16_t limit_15_0;
+    uint16_t base_15_0;
+    uint8_t base_23_16;
+
+    uint8_t type : 4;
+    uint8_t s : 1;
+    uint8_t dpl : 2;
+    uint8_t p : 1;
+
+    uint8_t limit_19_16 : 4;
+    uint8_t avl : 1;
+    uint8_t reserved_0 : 1;
+    uint8_t d_b : 1;
+    uint8_t g : 1;
+
+    uint8_t base_31_24;
+} __PACKED seg_desc_t;
+
+extern seg_desc_t _gdt[];
+
+void set_global_desc(seg_sel_t sel, void *base, uint32_t limit,
+                     uint8_t present, uint8_t ring, uint8_t sys, uint8_t type, uint8_t gran, uint8_t bits)
+{
+    // convert selector into index
+    uint16_t index = sel >> 3;
+
+    _gdt[index].limit_15_0 = limit & 0x0000ffff;
+    _gdt[index].limit_19_16 = (limit & 0x000f0000) >> 16;
+
+    _gdt[index].base_15_0 = ((uintptr_t) base) & 0x0000ffff;
+    _gdt[index].base_23_16 = (((uintptr_t) base) & 0x00ff0000) >> 16;
+    _gdt[index].base_31_24 = (((uintptr_t) base) & 0xff000000) >> 24;
+
+    _gdt[index].type = type & 0x0f; // segment type
+    _gdt[index].p = present != 0;   // present
+    _gdt[index].dpl = ring & 0x03;  // descriptor privilege level
+    _gdt[index].g = gran != 0;      // granularity
+    _gdt[index].s = sys != 0;       // system / non-system
+    _gdt[index].d_b = bits != 0;    // 16 / 32 bit
+}
diff --git a/src/bsp/lk/arch/x86-64/faults.c b/src/bsp/lk/arch/x86-64/faults.c
new file mode 100644
index 0000000..12af284
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/faults.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch/x86.h>
+#include <kernel/thread.h>
+#include <arch/arch_ops.h>
+
+static void dump_fault_frame(struct x86_iframe *frame)
+{
+
+    dprintf(CRITICAL, " CS:     %04llx EIP: %08llx EFL: %08llx CR2: %08llx\n",
+            frame->cs, frame->rip, frame->rflags, x86_get_cr2());
+    /*  dprintf(CRITICAL, "EAX: %08x ECX: %08x EDX: %08x EBX: %08x\n",
+                frame->rax, frame->rcx, frame->rdx, frame->rbx);
+        dprintf(CRITICAL, "ESP: %08x EBP: %08x ESI: %08x EDI: %08x\n",
+                frame->rsp, frame->rbp, frame->rsi, frame->rdi);
+        dprintf(CRITICAL, " DS:     %04x  ES:     %04x  FS:     %04x  GS:     %04x\n",
+                frame->ds, frame->es, frame->fs, frame->gs);
+    */
+
+    // dump the bottom of the current stack
+    addr_t stack = (addr_t) frame; //(addr_t) (((uint32_t *) frame) + (sizeof(struct x86_iframe) / sizeof(uint32_t) - 1));
+
+    if (stack != 0) {
+        dprintf(CRITICAL, "bottom of stack at 0x%08x:\n", (unsigned int)stack);
+        hexdump((void *)stack, 192);
+    }
+}
+
+static void exception_die(struct x86_iframe *frame, const char *msg)
+{
+    dprintf(CRITICAL, msg);
+    dump_fault_frame(frame);
+
+    for (;;) {
+        x86_cli();
+        x86_hlt();
+    }
+}
+
+void x86_syscall_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled syscall, halting\n");
+}
+
+void x86_gpf_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled gpf, halting\n");
+}
+
+void x86_invop_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled invalid op, halting\n");
+}
+
+void x86_unhandled_exception(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled exception, halting\n");
+}
+
+/*
+ *  Page fault handler for x86-64
+ */
+void x86_pfe_handler(struct x86_iframe *frame)
+{
+    /* Handle a page fault exception */
+    uint32_t error_code;
+    thread_t *current_thread;
+    error_code = frame->err_code;
+
+#ifdef PAGE_FAULT_DEBUG_INFO
+    uint64_t v_addr, ssp, esp, ip, rip;
+
+    v_addr = x86_get_cr2();
+    ssp = frame->user_ss & X86_8BYTE_MASK;
+    esp = frame->user_rsp;
+    ip  = frame->cs & X86_8BYTE_MASK;
+    rip = frame->rip;
+
+    dprintf(SPEW, "<PAGE FAULT> Instruction Pointer   = 0x%x:0x%x\n",
+            (unsigned int)ip,
+            (unsigned int)rip);
+    dprintf(SPEW, "<PAGE FAULT> Stack Pointer         = 0x%x:0x%x\n",
+            (unsigned int)ssp,
+            (unsigned int)esp);
+    dprintf(SPEW, "<PAGE FAULT> Fault Linear Address = 0x%x\n",
+            (unsigned int)v_addr);
+    dprintf(SPEW, "<PAGE FAULT> Error Code Value      = 0x%x\n",
+            error_code);
+    dprintf(SPEW, "<PAGE FAULT> Error Code Type = %s %s %s%s, %s\n",
+            error_code & PFEX_U ? "user" : "supervisor",
+            error_code & PFEX_W ? "write" : "read",
+            error_code & PFEX_I ? "instruction" : "data",
+            error_code & PFEX_RSV ? " rsv" : "",
+            error_code & PFEX_P ? "protection violation" : "page not present");
+#endif
+
+    current_thread = get_current_thread();
+    dump_thread(current_thread);
+
+    if (error_code & PFEX_U) {
+        // User mode page fault
+        switch (error_code) {
+            case 4:
+            case 5:
+            case 6:
+            case 7:
+#ifdef PAGE_FAULT_DEBUG_INFO
+                thread_detach(current_thread);
+#else
+                thread_exit(current_thread->retcode);
+#endif
+                break;
+        }
+    } else {
+        // Supervisor mode page fault
+        switch (error_code) {
+            case 0:
+            case 1:
+            case 2:
+            case 3:
+                exception_die(frame, "Page Fault exception, halting\n");
+                break;
+        }
+    }
+}
diff --git a/src/bsp/lk/arch/x86-64/fpu.c b/src/bsp/lk/arch/x86-64/fpu.c
new file mode 100755
index 0000000..b2b7b84
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/fpu.c
@@ -0,0 +1,160 @@
+/*
+* Copyright (c) 2015 Intel Corporation
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files
+* (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge,
+* publish, distribute, sublicense, and/or sell copies of the Software,
+* and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <arch/x86.h>
+#include <arch/fpu.h>
+#include <kernel/thread.h>
+
+#if X86_WITH_FPU
+
+/* CPUID EAX = 1 return values */
+
+#define ECX_SSE3    (0x00000001 << 0)
+#define ECX_SSSE3   (0x00000001 << 9)
+#define ECX_SSE4_1  (0x00000001 << 19)
+#define ECX_SSE4_2  (0x00000001 << 20)
+#define EDX_FXSR    (0x00000001 << 24)
+#define EDX_SSE     (0x00000001 << 25)
+#define EDX_SSE2    (0x00000001 << 26)
+#define EDX_FPU     (0x00000001 << 0)
+
+#define FPU_CAP(ecx, edx) ((edx & EDX_FPU) != 0)
+
+#define SSE_CAP(ecx, edx) ( \
+    ((ecx & (ECX_SSE3 | ECX_SSSE3 | ECX_SSE4_1 | ECX_SSE4_2)) != 0) || \
+    ((edx & (EDX_SSE | EDX_SSE2)) != 0) \
+    )
+
+#define FXSAVE_CAP(ecx, edx) ((edx & EDX_FXSR) != 0)
+
+static int fp_supported;
+static thread_t *fp_owner;
+
+/* FXSAVE area comprises 512 bytes starting with 16-byte aligned */
+static uint8_t __ALIGNED(16) fpu_init_states[512]={0};
+
+static void get_cpu_cap(uint32_t *ecx, uint32_t *edx)
+{
+    uint32_t eax = 1;
+
+    __asm__ __volatile__
+    ("cpuid" : "=c" (*ecx), "=d" (*edx) : "a" (eax));
+}
+
+void fpu_init(void)
+{
+    uint32_t ecx = 0, edx = 0;
+    uint16_t fcw;
+    uint32_t mxcsr;
+
+#ifdef ARCH_X86_64
+    uint64_t x;
+#else
+    uint32_t x;
+#endif
+
+    fp_supported = 0;
+    fp_owner = NULL;
+
+    get_cpu_cap(&ecx, &edx);
+
+    if (!FPU_CAP(ecx, edx) || !SSE_CAP(ecx, edx) || !FXSAVE_CAP(ecx, edx))
+        return;
+
+    fp_supported = 1;
+
+    /* No x87 emul, monitor co-processor */
+
+    x = x86_get_cr0();
+    x &= ~X86_CR0_EM;
+    x |= X86_CR0_NE;
+    x |= X86_CR0_MP;
+    x86_set_cr0(x);
+
+    /* Init x87 and unmask all exceptions */
+
+    __asm__ __volatile__ ("finit");
+    __asm__ __volatile__("fstcw %0" : "=m" (fcw));
+    fcw &= 0xffc0;
+    __asm__ __volatile__("fldcw %0" : : "m" (fcw));
+
+    /* Init SSE and unmask all exceptions */
+
+    x = x86_get_cr4();
+    x |= X86_CR4_OSXMMEXPT;
+    x |= X86_CR4_OSFXSR;
+    x &= ~X86_CR4_OSXSAVE;
+    x86_set_cr4(x);
+
+    __asm__ __volatile__("stmxcsr %0" : "=m" (mxcsr));
+    mxcsr &= 0x0000003f;
+    __asm__ __volatile__("ldmxcsr %0" : : "m" (mxcsr));
+
+    /* save fpu initial states, and used when new thread creates */
+    __asm__ __volatile__("fxsave %0" : "=m" (fpu_init_states));
+
+    x86_set_cr0(x86_get_cr0() | X86_CR0_TS);
+    return;
+}
+
+void fpu_init_thread_states(thread_t *t)
+{
+    t->arch.fpu_states = (vaddr_t *)ROUNDUP(((vaddr_t)t->arch.fpu_buffer), 16);
+    memcpy(t->arch.fpu_states,fpu_init_states,sizeof(t->arch.fpu_buffer));
+}
+
+void fpu_context_switch(thread_t *old_thread, thread_t *new_thread)
+{
+    if (fp_supported == 0)
+        return;
+
+    if (new_thread != fp_owner)
+        x86_set_cr0(x86_get_cr0() | X86_CR0_TS);
+    else
+        x86_set_cr0(x86_get_cr0() & ~X86_CR0_TS);
+
+    return;
+}
+
+void fpu_dev_na_handler(void)
+{
+    thread_t *self;
+
+    x86_set_cr0(x86_get_cr0() & ~X86_CR0_TS);
+
+    if (fp_supported == 0)
+        return;
+
+    self = get_current_thread();
+
+    if ((fp_owner != NULL) && (fp_owner != self)) {
+        __asm__ __volatile__("fxsave %0" : "=m" (*fp_owner->arch.fpu_states));
+        __asm__ __volatile__("fxrstor %0" : : "m" (*self->arch.fpu_states));
+    }
+
+    fp_owner = self;
+    return;
+}
+#endif
+
+/* End of file */
diff --git a/src/bsp/lk/arch/x86-64/include/arch/arch_ops.h b/src/bsp/lk/arch/x86-64/include/arch/arch_ops.h
new file mode 100644
index 0000000..4dcd30a
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/arch_ops.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_X86_64_OPS_H
+#define __ARCH_X86_64_OPS_H
+
+#include <compiler.h>
+
+#ifndef ASSEMBLY
+
+#include <arch/x86.h>
+
+/* override of some routines */
+static inline void arch_enable_ints(void)
+{
+    CF;
+    __asm__ volatile("sti");
+}
+
+static inline inline void arch_disable_ints(void)
+{
+    __asm__ volatile("cli");
+    CF;
+}
+
+static inline inline bool arch_ints_disabled(void)
+{
+    uint64_t state;
+
+    __asm__ volatile(
+        "pushfq;"
+        "popq %%rax"
+        : "=a" (state)
+        :: "memory");
+
+    return !(state & (1<<9));
+}
+
+int _atomic_and(volatile int *ptr, int val);
+int _atomic_or(volatile int *ptr, int val);
+int _atomic_cmpxchg(volatile int *ptr, int oldval, int newval);
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    __asm__ volatile(
+        "lock xaddl %[val], %[ptr];"
+        : [val]"=a" (val)
+        : "a" (val), [ptr]"m" (*ptr)
+        : "memory"
+    );
+
+    return val;
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    __asm__ volatile(
+        "xchgl %[val], %[ptr];"
+        : [val]"=a" (val)
+        : "a" (val), [ptr]"m" (*ptr)
+        : "memory"
+    );
+
+    return val;
+}
+
+
+static inline int atomic_and(volatile int *ptr, int val) { return _atomic_and(ptr, val); }
+static inline int atomic_or(volatile int *ptr, int val) { return _atomic_or(ptr, val); }
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) { return _atomic_cmpxchg(ptr, oldval, newval); }
+
+static inline uint32_t arch_cycle_count(void)
+{
+    uint32_t timestamp;
+    rdtscl(timestamp);
+
+    return timestamp;
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+
+#endif // !ASSEMBLY
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86-64/include/arch/arch_thread.h b/src/bsp/lk/arch/x86-64/include/arch/arch_thread.h
new file mode 100755
index 0000000..79c5c0b
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/arch_thread.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __X86_ARCH_THREAD_H
+#define __X86_ARCH_THREAD_H
+
+#include <sys/types.h>
+
+struct arch_thread {
+    vaddr_t rsp;
+#if X86_WITH_FPU
+    vaddr_t *fpu_states;
+    uint8_t fpu_buffer[512 + 16];
+#endif
+};
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86-64/include/arch/defines.h b/src/bsp/lk/arch/x86-64/include/arch/defines.h
new file mode 100644
index 0000000..21ba91f
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/defines.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_CPU_H
+#define __ARCH_CPU_H
+
+#define PAGE_SIZE 4096
+#define PAGE_SIZE_SHIFT 12
+
+#define CACHE_LINE 32
+#define ARCH_DEFAULT_STACK_SIZE 8192
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86-64/include/arch/fpu.h b/src/bsp/lk/arch/x86-64/include/arch/fpu.h
new file mode 100755
index 0000000..144db5a
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/fpu.h
@@ -0,0 +1,33 @@
+/*
+* Copyright (c) 2015 Intel Corporation
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files
+* (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge,
+* publish, distribute, sublicense, and/or sell copies of the Software,
+* and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <kernel/thread.h>
+#include <stdlib.h>
+#include <string.h>
+
+void fpu_init(void);
+void fpu_init_thread_states(thread_t *t);
+void fpu_context_switch(thread_t *old_thread, thread_t *new_thread);
+void fpu_dev_na_handler(void);
+
+/* End of file */
diff --git a/src/bsp/lk/arch/x86-64/include/arch/spinlock.h b/src/bsp/lk/arch/x86-64/include/arch/spinlock.h
new file mode 100644
index 0000000..b001b62
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/spinlock.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <arch/x86.h>
+#include <stdbool.h>
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned long spin_lock_t;
+
+typedef uint64_t spin_lock_saved_state_t;
+typedef uint spin_lock_save_flags_t;
+
+/* simple implementation of spinlocks for no smp support */
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+/* flags are unused on x86 */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  0
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    *statep = x86_save_rflags();
+    arch_disable_ints();
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    x86_restore_rflags(old_state);
+}
+
+
diff --git a/src/bsp/lk/arch/x86-64/include/arch/x86.h b/src/bsp/lk/arch/x86-64/include/arch/x86.h
new file mode 100644
index 0000000..147fc4e
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/x86.h
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_X86_H
+#define __ARCH_X86_H
+
+#include <compiler.h>
+#include <sys/types.h>
+
+__BEGIN_CDECLS
+
+#define PFEX_P 0x01
+#define PFEX_W 0x02
+#define PFEX_U 0x04
+#define PFEX_RSV 0x08
+#define PFEX_I 0x10
+#define X86_8BYTE_MASK 0xFFFFFFFF
+#define X86_CPUID_ADDR_WIDTH 0x80000008
+
+void arch_mmu_init(void);
+
+struct x86_context_switch_frame {
+    uint64_t r15, r14, r13, r12;
+    uint64_t rbp;
+    uint64_t rbx;
+    uint64_t rflags;
+    uint64_t rip;
+};
+
+void x86_64_context_switch(vaddr_t *oldsp, vaddr_t newsp);
+
+struct x86_iframe {
+    uint64_t pivot;                                     // stack switch pivot
+    uint64_t rdi, rsi, rbp, rbx, rdx, rcx, rax;         // pushed by common handler
+    uint64_t r8, r9, r10, r11, r12, r13, r14, r15;      // pushed by common handler
+    uint64_t vector;                                    // pushed by stub
+    uint64_t err_code;                                  // pushed by interrupt or stub
+    uint64_t rip, cs, rflags;                           // pushed by interrupt
+    uint64_t user_rsp, user_ss;                         // pushed by interrupt if priv change occurs
+};
+/*
+ * x86 TSS structure
+ */
+typedef struct __PACKED tss_64 {
+    uint32_t rsvd0;
+    uint64_t rsp0;
+    uint64_t rsp1;
+    uint64_t rsp2;
+    uint32_t rsvd1;
+    uint32_t rsvd2;
+    uint64_t ist1;
+    uint64_t ist2;
+    uint64_t ist3;
+    uint64_t ist4;
+    uint64_t ist5;
+    uint64_t ist6;
+    uint64_t ist7;
+    uint32_t rsvd3;
+    uint32_t rsvd4;
+    uint16_t rsvd5;
+    uint16_t iomap_base;
+} __PACKED tss_t;
+
+#define X86_CR0_PE 0x00000001 /* protected mode enable */
+#define X86_CR0_MP 0x00000002 /* monitor coprocessor */
+#define X86_CR0_EM 0x00000004 /* emulation */
+#define X86_CR0_NE 0x00000020 /* enable x87 exception */
+#define X86_CR0_TS 0x00000008 /* task switched */
+#define X86_CR0_WP 0x00010000 /* supervisor write protect */
+#define X86_CR0_NW 0x20000000 /* not write-through */
+#define X86_CR0_CD 0x40000000 /* cache disable */
+#define X86_CR0_PG 0x80000000 /* enable paging */
+#define X86_CR4_OSFXSR 0x00000200 /* os supports fxsave */
+#define X86_CR4_OSXMMEXPT 0x00000400 /* os supports xmm exception */
+#define X86_CR4_OSXSAVE 0x00040000 /* os supports xsave */
+#define X86_CR4_SMEP 0x00100000 /* SMEP protection enabling */
+#define X86_CR4_SMAP 0x00200000 /* SMAP protection enabling */
+#define x86_EFER_NXE 0x00000800 /* to enable execute disable bit */
+#define x86_MSR_EFER 0xc0000080 /* EFER Model Specific Register id */
+
+static inline void set_in_cr0(uint32_t mask)
+{
+    __asm__ __volatile__ (
+        "movl %%cr0,%%eax	\n\t"
+        "orl %0,%%eax		\n\t"
+        "movl %%eax,%%cr0	\n\t"
+        : : "irg" (mask)
+        :"ax");
+}
+
+static inline void clear_in_cr0(uint32_t mask)
+{
+    __asm__ __volatile__ (
+        "movq %%cr0, %%rax	\n\t"
+        "andq %0, %%rax		\n\t"
+        "movq %%rax, %%cr0	\n\t"
+        : : "irg" (~mask)
+        : "ax");
+}
+
+static inline void x86_clts(void) {__asm__ __volatile__ ("clts"); }
+static inline void x86_hlt(void) {__asm__ __volatile__ ("hlt"); }
+static inline void x86_sti(void) {__asm__ __volatile__ ("sti"); }
+static inline void x86_cli(void) {__asm__ __volatile__ ("cli"); }
+static inline void x86_ltr(uint16_t sel)
+{
+    __asm__ __volatile__ ("ltr %%ax" :: "a" (sel));
+}
+
+static inline uint64_t x86_get_cr2(void)
+{
+    uint64_t rv;
+
+    __asm__ __volatile__ (
+        "movq %%cr2, %0"
+        : "=r" (rv)
+    );
+
+    return rv;
+}
+
+static inline uint64_t x86_save_rflags(void)
+{
+    uint64_t state;
+
+    __asm__ volatile(
+        "pushfq;"
+        "popq %0"
+        : "=rm" (state)
+        :: "memory");
+
+    return state;
+}
+
+static inline void x86_restore_rflags(uint64_t rflags)
+{
+    __asm__ volatile(
+        "pushq %0;"
+        "popfq"
+        :: "g" (rflags)
+        : "memory", "cc");
+}
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+static inline uint8_t inp(uint16_t _port)
+{
+    uint8_t rv;
+    __asm__ __volatile__ ("inb %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline uint16_t inpw (uint16_t _port)
+{
+    uint16_t rv;
+    __asm__ __volatile__ ("inw %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline uint32_t inpd(uint16_t _port)
+{
+    uint32_t rv;
+    __asm__ __volatile__ ("inl %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline void outp(uint16_t _port, uint8_t _data)
+{
+    __asm__ __volatile__ ("outb %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void outpw(uint16_t _port, uint16_t _data)
+{
+    __asm__ __volatile__ ("outw %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void outpd(uint16_t _port, uint32_t _data)
+{
+    __asm__ __volatile__ ("outl %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void inprep(uint16_t _port, uint8_t *_buffer, uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insb \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outprep(uint16_t _port, uint8_t *_buffer, uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsb \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline void inpwrep(uint16_t _port, uint16_t *_buffer, uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insw \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outpwrep(uint16_t _port, uint16_t *_buffer,
+                            uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsw \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline void inpdrep(uint16_t _port, uint32_t *_buffer,
+                           uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insl \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outpdrep(uint16_t _port, uint32_t *_buffer,
+                            uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushfq \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsl \n\t"
+                          "popfq \n\t"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline uint64_t read_msr (uint32_t msr_id)
+{
+    uint64_t msr_read_val = 0;
+    uint32_t low_val = 0;
+    uint32_t high_val = 0;
+
+    __asm__ __volatile__ (
+        "rdmsr \n\t"
+        : "=a" (low_val), "=d"(high_val)
+        : "c" (msr_id));
+
+    msr_read_val = high_val;
+    msr_read_val = (msr_read_val << 32) | low_val;
+
+    return msr_read_val;
+}
+
+static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val)
+{
+    uint32_t low_val = (uint32_t)msr_write_val;
+    uint32_t high_val = (uint32_t)(msr_write_val >> 32);
+
+    __asm__ __volatile__ (
+        "wrmsr \n\t"
+        : : "c" (msr_id), "a" (low_val), "d"(high_val));
+}
+
+static inline uint64_t x86_get_cr3(void)
+{
+    uint64_t rv;
+
+    __asm__ __volatile__ (
+        "movq %%cr3, %0"
+        : "=r" (rv));
+    return rv;
+}
+
+static inline void x86_set_cr3(uint64_t in_val)
+{
+    __asm__ __volatile__ (
+        "movq %0,%%cr3 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline uint64_t x86_get_cr4(void)
+{
+    uint64_t rv;
+
+    __asm__ __volatile__ (
+        "movq %%cr4, %0 \n\t"
+        : "=r" (rv));
+    return rv;
+}
+
+static inline void x86_set_cr4(uint64_t in_val)
+{
+    __asm__ __volatile__ (
+        "movq %0,%%cr4 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline uint64_t x86_get_cr0(void)
+{
+    uint64_t rv;
+
+    __asm__ __volatile__ (
+        "movq %%cr0, %0 \n\t"
+        : "=r" (rv));
+    return rv;
+}
+
+static inline void x86_set_cr0(uint64_t in_val)
+{
+    __asm__ __volatile__ (
+        "movq %0,%%cr0 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline uint32_t x86_get_address_width(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=a" (rv)
+        :"a" (X86_CPUID_ADDR_WIDTH));
+
+    /*
+     Extracting bit 15:0 from eax register
+     Bits 07-00: #Physical Address Bits
+     Bits 15-08: #Linear Address Bits
+    */
+    return (rv & 0x0000ffff);
+}
+
+static inline uint64_t check_smep_avail(void)
+{
+    uint64_t reg_a = 0x07;
+    uint64_t reg_b = 0x0;
+    uint64_t reg_c = 0x0;
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=b" (reg_b)
+        :"a" (reg_a),"c" (reg_c));
+    return ((reg_b>>0x06) & 0x1);
+}
+
+static inline uint64_t check_smap_avail(void)
+{
+    uint64_t reg_a = 0x07;
+    uint64_t reg_b = 0x0;
+    uint64_t reg_c = 0x0;
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=b" (reg_b)
+        :"a" (reg_a),"c" (reg_c));
+    return ((reg_b>>0x13) & 0x1);
+}
+
+__END_CDECLS
+
+#endif
diff --git a/src/bsp/lk/arch/x86-64/include/arch/x86/descriptor.h b/src/bsp/lk/arch/x86-64/include/arch/x86/descriptor.h
new file mode 100644
index 0000000..cd989ef
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/x86/descriptor.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_DESCRIPTOR_H
+#define __ARCH_DESCRIPTOR_H
+
+#include <sys/types.h>
+
+/*
+ * System Selectors
+ */
+#define CODE_SELECTOR   0x10
+#define DATA_SELECTOR   0x18
+#define VIDEO_SELECTOR  0x20
+#define TSS_SELECTOR    0x40
+
+#define USER_CODE_SELECTOR 0x23
+#define USER_DATA_SELECTOR 0x2b
+
+/*
+ * Descriptor Types
+ */
+#define SEG_TYPE_TSS        0x9
+#define SEG_TYPE_TSS_BUSY   0xb
+#define SEG_TYPE_TASK_GATE  0x5
+#define SEG_TYPE_INT_GATE   0xe     // 32 bit
+#define SEG_TYPE_DATA_RW    0x2
+#define SEG_TYPE_CODE_RW    0xa
+
+typedef uint16_t seg_sel_t;
+
+void set_global_desc(seg_sel_t sel, void *base, uint32_t limit,
+                     uint8_t present, uint8_t ring, uint8_t sys, uint8_t type, uint8_t gran, uint8_t bits);
+
+#endif
diff --git a/src/bsp/lk/arch/x86-64/include/arch/x86/mmu.h b/src/bsp/lk/arch/x86-64/include/arch/x86/mmu.h
new file mode 100644
index 0000000..e822862
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/include/arch/x86/mmu.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/types.h>
+#include <compiler.h>
+
+__BEGIN_CDECLS
+
+void x86_mmu_init(void);
+
+#define X86_MMU_PG_P        0x001       /* P    Valid           */
+#define X86_MMU_PG_RW       0x002       /* R/W  Read/Write      */
+#define X86_MMU_PG_U        0x004       /* U/S  User/Supervisor     */
+#define X86_MMU_PG_PS       0x080       /* PS   Page size (0=4k,1=2M)   */
+#define X86_MMU_PG_PTE_PAT  0x080       /* PAT  PAT index       */
+#define X86_MMU_PG_G        0x100       /* G    Global          */
+#define X86_MMU_PG_NX       (1ul << 63) /* NX   No Execute      */
+#define X86_MMU_CACHE_DISABLE   0x010       /* C Cache disable */
+#define X86_MMU_CLEAR       0x0
+#define X86_DIRTY_ACCESS_MASK   0xf9f
+#define X86_PG_FRAME        (0x000ffffffffff000ul)
+#define X86_PHY_ADDR_MASK   (0x000ffffffffffffful)
+#define X86_FLAGS_MASK      (0x8000000000000ffful)
+#define X86_PTE_NOT_PRESENT (0xFFFFFFFFFFFFFFFEul)
+#define X86_2MB_PAGE_FRAME  (0x000fffffffe00000ul)
+#define PAGE_OFFSET_MASK_4KB    (0x0000000000000ffful)
+#define PAGE_OFFSET_MASK_2MB    (0x00000000001ffffful)
+
+#define PAGE_SIZE       4096
+#define PAGING_LEVELS       4
+#define PAGE_DIV_SHIFT      12
+#define PML4_SHIFT      39
+#define PDP_SHIFT       30
+#define PD_SHIFT        21
+#define PT_SHIFT        12
+#define ADDR_OFFSET     9
+
+#define X86_PHYS_TO_VIRT(x) (x)
+#define X86_VIRT_TO_PHYS(x) (x)
+#define X86_SET_FLAG(x)     (x=1)
+
+/* Different page table levels in the page table mgmt hirerachy */
+enum page_table_levels {
+    PF_L,
+    PT_L,
+    PD_L,
+    PDP_L,
+    PML4_L
+} page_level;
+
+struct map_range {
+    vaddr_t start_vaddr;
+    paddr_t start_paddr;
+    uint32_t size;
+};
+
+typedef uint64_t map_addr_t;
+typedef uint64_t arch_flags_t;
+
+status_t x86_mmu_map_range (addr_t pml4, struct map_range *range, arch_flags_t flags);
+status_t x86_mmu_check_mapping (addr_t pml4, paddr_t paddr,
+                                vaddr_t vaddr, arch_flags_t in_flags,
+                                uint32_t *ret_level, arch_flags_t *ret_flags,
+                                map_addr_t *last_valid_entry);
+status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
+                             vaddr_t vaddr, arch_flags_t flags);
+status_t x86_mmu_unmap(addr_t pml4, vaddr_t vaddr, uint count);
+addr_t *x86_create_new_cr3(void);
+map_addr_t get_kernel_cr3(void);
+
+__END_CDECLS
diff --git a/src/bsp/lk/arch/x86-64/kernel.ld b/src/bsp/lk/arch/x86-64/kernel.ld
new file mode 100644
index 0000000..0f73ac2
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/kernel.ld
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2013 Travis Geiselbrecht
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ENTRY(_start)
+SECTIONS
+{
+    . = 0x0200000;
+
+	.text : {
+		__code_start = .;
+		KEEP(*(.text.boot))
+		*(.text* .sram.text)
+		*(.gnu.linkonce.t.*)
+		__code_end = .;
+	} =0x9090
+
+	.rodata : ALIGN(4096) {
+		__rodata_start = .;
+		*(.rodata*)
+		*(.gnu.linkonce.r.*)
+		. = ALIGN(8);
+	}
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+	.data : ALIGN(4096) {
+		__data_start = .;
+		*(.data .data.* .gnu.linkonce.d.*)
+	}
+
+	.ctors : ALIGN(4) {
+		__ctor_list = .;
+		KEEP(*(.ctors .init_array))
+		__ctor_end = .;
+	}
+	.dtors : ALIGN(4) {
+		__dtor_list = .;
+		KEEP(*(.dtors .fini_array))
+		__dtor_end = .;
+	}
+
+	.stab   : { *(.stab) }
+	.stabst : { *(.stabstr) }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+	.bss : ALIGN(4096) {
+		__bss_start = .;
+		*(.bss*)
+		*(.gnu.linkonce.b.*)
+		*(COMMON)
+		. = ALIGN(8);
+		__bss_end = .;
+	}
+
+	_end = .;
+
+	/* put a symbol arbitrarily 4MB past the end of the kernel */
+	/* used by the heap and other early boot time allocators */
+	_end_of_ram = . + (4*1024*1024);
+
+	/DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/x86-64/mmu.c b/src/bsp/lk/arch/x86-64/mmu.c
new file mode 100644
index 0000000..e2ac83e
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/mmu.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <sys/types.h>
+#include <compiler.h>
+#include <arch.h>
+#include <arch/x86.h>
+#include <arch/x86/mmu.h>
+#include <stdlib.h>
+#include <string.h>
+#include <arch/mmu.h>
+#include <assert.h>
+#include <err.h>
+#include <arch/arch_ops.h>
+
+extern map_addr_t g_CR3;
+
+/* Address width */
+extern uint8_t g_vaddr_width;
+extern uint8_t g_paddr_width;
+
+
+/**
+ * @brief  check if the virtual address is aligned and canonical
+ *
+ */
+static bool x86_mmu_check_vaddr(vaddr_t vaddr)
+{
+    uint64_t addr = (uint64_t)vaddr;
+    uint64_t max_vaddr_lohalf,
+             min_vaddr_hihalf;
+
+    /* Check to see if the address is PAGE aligned */
+    if (!IS_ALIGNED(addr, PAGE_SIZE))
+        return false;
+
+    /* get max address in lower-half canonical addr space */
+    /* e.g. if width is 48, then 0x00007FFF_FFFFFFFF */
+    max_vaddr_lohalf = ((uint64_t)1ull << (g_vaddr_width - 1)) - 1;
+
+    /* get min address in higher-half canonical addr space */
+    /* e.g. if width is 48, then 0xFFFF8000_00000000*/
+    min_vaddr_hihalf = ~ max_vaddr_lohalf;
+
+    /* Check to see if the address in a canonical address */
+    if ((addr > max_vaddr_lohalf) && (addr < min_vaddr_hihalf))
+        return false;
+
+    return true;
+}
+
+
+/**
+ * @brief  check if the physical address is valid and aligned
+ *
+ */
+static bool x86_mmu_check_paddr(paddr_t paddr)
+{
+    uint64_t addr = (uint64_t)paddr;
+    uint64_t max_paddr;
+
+    /* Check to see if the address is PAGE aligned */
+    if (!IS_ALIGNED(addr, PAGE_SIZE))
+        return false;
+
+    max_paddr = ((uint64_t)1ull << g_paddr_width) - 1;
+
+    return addr <= max_paddr;
+}
+
+
+static inline uint64_t get_pml4_entry_from_pml4_table(vaddr_t vaddr, addr_t pml4_addr)
+{
+    uint32_t pml4_index;
+    uint64_t *pml4_table = (uint64_t *)X86_PHYS_TO_VIRT(pml4_addr);
+
+    pml4_index = (((uint64_t)vaddr >> PML4_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    return X86_PHYS_TO_VIRT(pml4_table[pml4_index]);
+}
+
+static inline uint64_t get_pdp_entry_from_pdp_table(vaddr_t vaddr, uint64_t pml4e)
+{
+    uint32_t pdp_index;
+    uint64_t *pdpe;
+
+    pdp_index = (((uint64_t)vaddr >> PDP_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pdpe = (uint64_t *)(pml4e & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pdpe[pdp_index]);
+}
+
+static inline uint64_t get_pd_entry_from_pd_table(vaddr_t vaddr, uint64_t pdpe)
+{
+    uint32_t pd_index;
+    uint64_t *pde;
+
+    pd_index = (((uint64_t)vaddr >> PD_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pde = (uint64_t *)(pdpe & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pde[pd_index]);
+}
+
+static inline uint64_t get_pt_entry_from_pt_table(vaddr_t vaddr, uint64_t pde)
+{
+    uint32_t pt_index;
+    uint64_t *pte;
+
+    pt_index = (((uint64_t)vaddr >> PT_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pte = (uint64_t *)(pde & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pte[pt_index]);
+}
+
+static inline uint64_t get_pfn_from_pte(uint64_t pte)
+{
+    uint64_t pfn;
+
+    pfn = (pte & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pfn);
+}
+
+static inline uint64_t get_pfn_from_pde(uint64_t pde)
+{
+    uint64_t pfn;
+
+    pfn = (pde & X86_2MB_PAGE_FRAME);
+    return X86_PHYS_TO_VIRT(pfn);
+}
+
+static void map_zero_page(addr_t *ptr)
+{
+    if (ptr)
+        memset(ptr, 0, PAGE_SIZE);
+}
+
+/**
+ * @brief Returning the x86 arch flags from generic mmu flags
+ */
+arch_flags_t get_x86_arch_flags(arch_flags_t flags)
+{
+    arch_flags_t arch_flags = 0;
+
+    if (!(flags & ARCH_MMU_FLAG_PERM_RO))
+        arch_flags |= X86_MMU_PG_RW;
+
+    if (flags & ARCH_MMU_FLAG_PERM_USER)
+        arch_flags |= X86_MMU_PG_U;
+
+    if (flags & ARCH_MMU_FLAG_UNCACHED)
+        arch_flags |= X86_MMU_CACHE_DISABLE;
+
+    if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)
+        arch_flags |= X86_MMU_PG_NX;
+
+    return arch_flags;
+}
+
+/**
+ * @brief Returning the generic mmu flags from x86 arch flags
+ */
+uint get_arch_mmu_flags(arch_flags_t flags)
+{
+    arch_flags_t mmu_flags = 0;
+
+    if (!(flags & X86_MMU_PG_RW))
+        mmu_flags |= ARCH_MMU_FLAG_PERM_RO;
+
+    if (flags & X86_MMU_PG_U)
+        mmu_flags |= ARCH_MMU_FLAG_PERM_USER;
+
+    if (flags & X86_MMU_CACHE_DISABLE)
+        mmu_flags |= ARCH_MMU_FLAG_UNCACHED;
+
+    if (flags & X86_MMU_PG_NX)
+        mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+
+    return (uint)mmu_flags;
+}
+
+/**
+ * @brief  Walk the page table structures
+ *
+ * In this scenario, we are considering the paging scheme to be a PAE mode with
+ * 4KB pages.
+ *
+ */
+static status_t x86_mmu_get_mapping(addr_t pml4, vaddr_t vaddr, uint32_t *ret_level,
+                                    arch_flags_t *mmu_flags, map_addr_t *last_valid_entry)
+{
+    uint64_t pml4e, pdpe, pde, pte;
+
+    DEBUG_ASSERT(pml4);
+    if ((!ret_level) || (!last_valid_entry) || (!mmu_flags)) {
+        return ERR_INVALID_ARGS;
+    }
+
+    *ret_level = PML4_L;
+    *last_valid_entry = pml4;
+    *mmu_flags = 0;
+
+    pml4e = get_pml4_entry_from_pml4_table(vaddr, pml4);
+    if ((pml4e & X86_MMU_PG_P) == 0) {
+        return ERR_NOT_FOUND;
+    }
+
+    pdpe = get_pdp_entry_from_pdp_table(vaddr, pml4e);
+    if ((pdpe & X86_MMU_PG_P) == 0) {
+        *ret_level = PDP_L;
+        *last_valid_entry = pml4e;
+        return ERR_NOT_FOUND;
+    }
+
+    pde = get_pd_entry_from_pd_table(vaddr, pdpe);
+    if ((pde & X86_MMU_PG_P) == 0) {
+        *ret_level = PD_L;
+        *last_valid_entry = pdpe;
+        return ERR_NOT_FOUND;
+    }
+
+    /* 2 MB pages */
+    if (pde & X86_MMU_PG_PS) {
+        /* Getting the Page frame & adding the 4KB page offset from the vaddr */
+        *last_valid_entry = get_pfn_from_pde(pde) + ((uint64_t)vaddr & PAGE_OFFSET_MASK_2MB);
+        *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pde)) & X86_FLAGS_MASK);
+        goto last;
+    }
+
+    /* 4 KB pages */
+    pte = get_pt_entry_from_pt_table(vaddr, pde);
+    if ((pte & X86_MMU_PG_P) == 0) {
+        *ret_level = PT_L;
+        *last_valid_entry = pde;
+        return ERR_NOT_FOUND;
+    }
+
+    /* Getting the Page frame & adding the 4KB page offset from the vaddr */
+    *last_valid_entry = get_pfn_from_pte(pte) + ((uint64_t)vaddr & PAGE_OFFSET_MASK_4KB);
+    *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pte)) & X86_FLAGS_MASK);
+
+last:
+    *ret_level = PF_L;
+    return NO_ERROR;
+}
+
+/**
+ * Walk the page table structures to see if the mapping between a virtual address
+ * and a physical address exists. Also, check the flags.
+ *
+ */
+status_t x86_mmu_check_mapping(addr_t pml4, paddr_t paddr,
+                               vaddr_t vaddr, arch_flags_t in_flags,
+                               uint32_t *ret_level, arch_flags_t *ret_flags,
+                               map_addr_t *last_valid_entry)
+{
+    status_t status;
+    arch_flags_t existing_flags = 0;
+
+    DEBUG_ASSERT(pml4);
+    if ((!ret_level) || (!last_valid_entry) || (!ret_flags) ||
+            (!x86_mmu_check_vaddr(vaddr)) ||
+            (!x86_mmu_check_paddr(paddr))) {
+        return ERR_INVALID_ARGS;
+    }
+
+    status = x86_mmu_get_mapping(pml4, vaddr, ret_level, &existing_flags, last_valid_entry);
+    if (status || ((*last_valid_entry) != (uint64_t)paddr)) {
+        /* We did not reach till we check the access flags for the mapping */
+        *ret_flags = in_flags;
+        return ERR_NOT_FOUND;
+    }
+
+    /* Checking the access flags for the mapped address. If it is not zero, then
+     * the access flags are different & the return flag will have those access bits
+     * which are different.
+     */
+    *ret_flags = (in_flags ^ get_x86_arch_flags(existing_flags)) & X86_DIRTY_ACCESS_MASK;
+
+    if (!(*ret_flags))
+        return NO_ERROR;
+
+    return ERR_NOT_FOUND;
+}
+
+static void update_pt_entry(vaddr_t vaddr, paddr_t paddr,  uint64_t pde, arch_flags_t flags)
+{
+    uint32_t pt_index;
+
+    uint64_t *pt_table = (uint64_t *)(pde & X86_PG_FRAME);
+    pt_index = (((uint64_t)vaddr >> PT_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pt_table[pt_index] = (uint64_t)paddr;
+    pt_table[pt_index] |= flags | X86_MMU_PG_P;
+    if (!(flags & X86_MMU_PG_U))
+        pt_table[pt_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+static void update_pd_entry(vaddr_t vaddr, uint64_t pdpe, addr_t *m, arch_flags_t flags)
+{
+    uint32_t pd_index;
+
+    uint64_t *pd_table = (uint64_t *)(pdpe & X86_PG_FRAME);
+    pd_index = (((uint64_t)vaddr >> PD_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pd_table[pd_index] = (uint64_t)m;
+    pd_table[pd_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
+    if (flags & X86_MMU_PG_U)
+        pd_table[pd_index] |= X86_MMU_PG_U;
+    else
+        pd_table[pd_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+static void update_pdp_entry(vaddr_t vaddr, uint64_t pml4e, addr_t *m, arch_flags_t flags)
+{
+    uint32_t pdp_index;
+
+    uint64_t *pdp_table = (uint64_t *)(pml4e & X86_PG_FRAME);
+    pdp_index = (((uint64_t)vaddr >> PDP_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pdp_table[pdp_index] = (uint64_t)m;
+    pdp_table[pdp_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
+    if (flags & X86_MMU_PG_U)
+        pdp_table[pdp_index] |= X86_MMU_PG_U;
+    else
+        pdp_table[pdp_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+static void update_pml4_entry(vaddr_t vaddr, addr_t pml4_addr, addr_t *m, arch_flags_t flags)
+{
+    uint32_t pml4_index;
+    uint64_t *pml4_table = (uint64_t *)(pml4_addr);
+
+    pml4_index = (((uint64_t)vaddr >> PML4_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+    pml4_table[pml4_index] = (uint64_t)m;
+    pml4_table[pml4_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
+    if (flags & X86_MMU_PG_U)
+        pml4_table[pml4_index] |= X86_MMU_PG_U;
+    else
+        pml4_table[pml4_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+/**
+ * @brief Allocating a new page table
+ */
+static addr_t *_map_alloc(size_t size)
+{
+    addr_t *page_ptr = memalign(PAGE_SIZE, size);
+    if (page_ptr)
+        map_zero_page(page_ptr);
+    return page_ptr;
+}
+
+/**
+ * @brief Creating a new CR3 and copying all the kernel mappings
+ */
+addr_t *x86_create_new_cr3(void)
+{
+    map_addr_t *kernel_table, *new_table = NULL;
+
+    if (!g_CR3)
+        return 0;
+
+    kernel_table = (map_addr_t *)X86_PHYS_TO_VIRT(g_CR3);
+
+    /* Allocate a new Page to generate a new paging structure for a new CR3 */
+    new_table = (map_addr_t *)_map_alloc(PAGE_SIZE);
+    ASSERT(new_table);
+
+    /* Copying the kernel mapping as-is */
+    memcpy(new_table, kernel_table, PAGE_SIZE);
+
+    return (addr_t *)new_table;
+}
+
+/**
+ * @brief Returning the kernel CR3
+ */
+map_addr_t get_kernel_cr3()
+{
+    return g_CR3;
+}
+
+/**
+ * @brief  Add a new mapping for the given virtual address & physical address
+ *
+ * This is a API which handles the mapping b/w a virtual address & physical address
+ * either by checking if the mapping already exists and is valid OR by adding a
+ * new mapping with the required flags.
+ *
+ * In this scenario, we are considering the paging scheme to be a PAE mode with
+ * 4KB pages.
+ *
+ */
+status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
+                             vaddr_t vaddr, arch_flags_t mmu_flags)
+{
+    uint32_t pd_new = 0, pdp_new = 0;
+    uint64_t pml4e, pdpe, pde;
+    addr_t *m = NULL;
+    status_t ret = NO_ERROR;
+
+    DEBUG_ASSERT(pml4);
+    if ((!x86_mmu_check_vaddr(vaddr)) || (!x86_mmu_check_paddr(paddr)) )
+        return ERR_INVALID_ARGS;
+
+    pml4e = get_pml4_entry_from_pml4_table(vaddr, pml4);
+
+    if ((pml4e & X86_MMU_PG_P) == 0) {
+        /* Creating a new pdp table */
+        m = _map_alloc(PAGE_SIZE);
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            goto clean;
+        }
+
+        update_pml4_entry(vaddr, pml4, m, get_x86_arch_flags(mmu_flags));
+        pml4e = (uint64_t)m;
+        X86_SET_FLAG(pdp_new);
+    }
+
+    if (!pdp_new)
+        pdpe = get_pdp_entry_from_pdp_table(vaddr, pml4e);
+
+    if (pdp_new || (pdpe & X86_MMU_PG_P) == 0) {
+        /* Creating a new pd table  */
+        m  = _map_alloc(PAGE_SIZE);
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            if (pdp_new)
+                goto clean_pdp;
+            goto clean;
+        }
+
+        update_pdp_entry(vaddr, pml4e, m, get_x86_arch_flags(mmu_flags));
+        pdpe = (uint64_t)m;
+        X86_SET_FLAG(pd_new);
+    }
+
+    if (!pd_new)
+        pde = get_pd_entry_from_pd_table(vaddr, pdpe);
+
+    if (pd_new || (pde & X86_MMU_PG_P) == 0) {
+        /* Creating a new pt */
+        m  = _map_alloc(PAGE_SIZE);
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            if (pd_new)
+                goto clean_pd;
+            goto clean;
+        }
+
+        update_pd_entry(vaddr, pdpe, m, get_x86_arch_flags(mmu_flags));
+        pde = (uint64_t)m;
+    }
+
+    /* Updating the page table entry with the paddr and access flags required for the mapping */
+    update_pt_entry(vaddr, paddr, pde, get_x86_arch_flags(mmu_flags));
+    ret = NO_ERROR;
+    goto clean;
+
+clean_pd:
+    if (pd_new)
+        free((addr_t *)pdpe);
+
+clean_pdp:
+    if (pdp_new)
+        free((addr_t *)pml4e);
+
+clean:
+    return ret;
+}
+
+/**
+ * @brief  x86-64 MMU unmap an entry in the page tables recursively and clear out tables
+ *
+ */
+static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry)
+{
+    uint32_t offset = 0, next_level_offset = 0;
+    vaddr_t *table, *next_table_addr, value;
+
+    next_table_addr = NULL;
+    table = (vaddr_t *)(X86_VIRT_TO_PHYS(table_entry) & X86_PG_FRAME);
+
+    switch (level) {
+        case PML4_L:
+            offset = (((uint64_t)vaddr >> PML4_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+            next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P)== 0)
+                return;
+            break;
+        case PDP_L:
+            offset = (((uint64_t)vaddr >> PDP_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+            next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+        case PD_L:
+            offset = (((uint64_t)vaddr >> PD_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+            next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+        case PT_L:
+            offset = (((uint64_t)vaddr >> PT_SHIFT) & ((1ul << ADDR_OFFSET) - 1));
+            next_table_addr = (vaddr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+        case PF_L:
+            /* Reached page frame, Let's go back */
+        default:
+            return;
+    }
+
+    level -= 1;
+    x86_mmu_unmap_entry(vaddr, level,(vaddr_t)next_table_addr);
+    level += 1;
+
+    next_table_addr = (vaddr_t*)((vaddr_t)(X86_VIRT_TO_PHYS(next_table_addr)) & X86_PG_FRAME);
+    if (level > PT_L) {
+        /* Check all entries of next level table for present bit */
+        for (next_level_offset = 0; next_level_offset < (PAGE_SIZE/8); next_level_offset++) {
+            if ((next_table_addr[next_level_offset] & X86_MMU_PG_P) != 0)
+                return; /* There is an entry in the next level table */
+        }
+        free(next_table_addr);
+    }
+    /* All present bits for all entries in next level table for this address are 0 */
+    if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) != 0) {
+        arch_disable_ints();
+        value = table[offset];
+        value = value & X86_PTE_NOT_PRESENT;
+        table[offset] = value;
+        arch_enable_ints();
+    }
+}
+
+status_t x86_mmu_unmap(addr_t pml4, vaddr_t vaddr, uint count)
+{
+    vaddr_t next_aligned_v_addr;
+
+    DEBUG_ASSERT(pml4);
+    if (!(x86_mmu_check_vaddr(vaddr)))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    next_aligned_v_addr = vaddr;
+    while (count > 0) {
+        x86_mmu_unmap_entry(next_aligned_v_addr, PAGING_LEVELS, X86_PHYS_TO_VIRT(pml4));
+        next_aligned_v_addr += PAGE_SIZE;
+        count--;
+    }
+    return NO_ERROR;
+}
+
+int arch_mmu_unmap(vaddr_t vaddr, uint count)
+{
+    addr_t current_cr3_val;
+
+    if (!(x86_mmu_check_vaddr(vaddr)))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    current_cr3_val = (addr_t)x86_get_cr3();
+
+    return (x86_mmu_unmap(current_cr3_val, vaddr, count));
+}
+
+/**
+ * @brief  Mapping a section/range with specific permissions
+ *
+ */
+status_t x86_mmu_map_range(addr_t pml4, struct map_range *range, arch_flags_t flags)
+{
+    vaddr_t next_aligned_v_addr;
+    paddr_t next_aligned_p_addr;
+    status_t map_status;
+    uint32_t no_of_pages, index;
+
+    DEBUG_ASSERT(pml4);
+    if (!range)
+        return ERR_INVALID_ARGS;
+
+    /* Calculating the number of 4k pages */
+    if (IS_ALIGNED(range->size, PAGE_SIZE))
+        no_of_pages = (range->size) >> PAGE_DIV_SHIFT;
+    else
+        no_of_pages = ((range->size) >> PAGE_DIV_SHIFT) + 1;
+
+    next_aligned_v_addr = range->start_vaddr;
+    next_aligned_p_addr = range->start_paddr;
+
+    for (index = 0; index < no_of_pages; index++) {
+        map_status = x86_mmu_add_mapping(pml4, next_aligned_p_addr, next_aligned_v_addr, flags);
+        if (map_status) {
+            dprintf(SPEW, "Add mapping failed with err=%d\n", map_status);
+            /* Unmap the partial mapping - if any */
+            x86_mmu_unmap(pml4, range->start_vaddr, index);
+            return map_status;
+        }
+        next_aligned_v_addr += PAGE_SIZE;
+        next_aligned_p_addr += PAGE_SIZE;
+    }
+    return NO_ERROR;
+}
+
+status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
+{
+    addr_t current_cr3_val;
+    uint32_t ret_level;
+    map_addr_t last_valid_entry;
+    arch_flags_t ret_flags;
+    status_t stat;
+
+    if (!paddr)
+        return ERR_INVALID_ARGS;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    current_cr3_val = (addr_t)x86_get_cr3();
+
+    stat = x86_mmu_get_mapping(current_cr3_val, vaddr, &ret_level, &ret_flags, &last_valid_entry);
+    if (stat)
+        return stat;
+
+    *paddr = (paddr_t)(last_valid_entry);
+
+    /* converting x86 arch specific flags to arch mmu flags */
+    if (flags)
+        *flags = ret_flags;
+
+    return NO_ERROR;
+}
+
+int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
+{
+    addr_t current_cr3_val;
+    struct map_range range;
+
+    if ((!x86_mmu_check_paddr(paddr)) || (!x86_mmu_check_vaddr(vaddr)))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    current_cr3_val = (addr_t)x86_get_cr3();
+
+    range.start_vaddr = vaddr;
+    range.start_paddr = paddr;
+    range.size = count * PAGE_SIZE;
+
+    return (x86_mmu_map_range(current_cr3_val, &range, flags));
+}
+
+/**
+ * @brief  x86-64 MMU basic initialization
+ *
+ */
+void arch_mmu_init(void)
+{
+    volatile uint64_t efer_msr, cr0, cr4;
+
+    /* Set WP bit in CR0*/
+    cr0 = x86_get_cr0();
+    cr0 |= X86_CR0_WP;
+    x86_set_cr0(cr0);
+
+    /* Setting the SMEP & SMAP bit in CR4 */
+    cr4 = x86_get_cr4();
+    if (check_smep_avail())
+        cr4 |= X86_CR4_SMEP;
+    if (check_smap_avail())
+        cr4 |=X86_CR4_SMAP;
+    x86_set_cr4(cr4);
+
+    /* Set NXE bit in MSR_EFER*/
+    efer_msr = read_msr(x86_MSR_EFER);
+    efer_msr |= x86_EFER_NXE;
+    write_msr(x86_MSR_EFER, efer_msr);
+}
diff --git a/src/bsp/lk/arch/x86-64/ops.S b/src/bsp/lk/arch/x86-64/ops.S
new file mode 100644
index 0000000..10b55bd
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/ops.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+.text
+
+/* This follows the x86-64 ABI, the parameters are stored in registers in the following order*/
+/*
+%rdi used to pass 1st argument
+%rsi used to pass 2nd argument
+%rdx used to pass 3rd argument and 2nd return register
+%rcx used to pass 4th argument
+%r8 used to pass 5th argument
+%r9 used to pass 6th argument
+%rax 1st return register
+*/
+
+/* int _atomic_and(int *ptr, int val); */
+FUNCTION(_atomic_and)
+    movq (%rdi), %rax
+0:
+    movq %rax, %rcx
+    andq %rsi, %rcx
+    lock
+    cmpxchgq %rcx, (%rdi)
+    jnz 1f                  /* static prediction: branch forward not taken */
+    ret
+1:
+    jmp 0b
+
+
+/* int _atomic_or(int *ptr, int val); */
+FUNCTION(_atomic_or)
+
+    movq (%rdi), %rax
+0:
+    movq %rax, %rcx
+    orq %rsi, %rcx
+    lock
+    cmpxchgq %rcx, (%rdi)
+    jnz 1f                  /* static prediction: branch forward not taken */
+    ret
+1:
+    jmp 0b
+
+/* void arch_idle(); */
+FUNCTION(arch_idle)
+    pushf
+    popq %rax
+    andq $0x200, %rax
+    test %rax, %rax
+    je 1f                   /* don't halt if local interrupts are disabled */
+    hlt
+1:
+    ret
+
diff --git a/src/bsp/lk/arch/x86-64/rules.mk b/src/bsp/lk/arch/x86-64/rules.mk
new file mode 100644
index 0000000..8950e13
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/rules.mk
@@ -0,0 +1,62 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+WITH_KERNEL_VM=1
+GLOBAL_DEFINES += \
+	MEMBASE=0x00200000U \
+	KERNEL_ASPACE_BASE=0x00200000U \
+	KERNEL_ASPACE_SIZE=0x7fe00000U \
+	IS_64BIT=1 \
+	SMP_MAX_CPUS=1 \
+	X86_WITH_FPU=1
+
+KERNEL_BASE ?= 0x00200000
+KERNEL_LOAD_OFFSET ?= 0x0
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/crt0.S \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/cache.c \
+	$(LOCAL_DIR)/cache-ops.S \
+	$(LOCAL_DIR)/ops.S \
+	$(LOCAL_DIR)/thread.c \
+	$(LOCAL_DIR)/mmu.c \
+	$(LOCAL_DIR)/faults.c \
+	$(LOCAL_DIR)/descriptor.c \
+	$(LOCAL_DIR)/fpu.c
+
+# set the default toolchain to x86 elf and set a #define
+ifndef TOOLCHAIN_PREFIX
+TOOLCHAIN_PREFIX := x86_64-elf-
+endif
+
+LIBGCC := $(shell $(TOOLCHAIN_PREFIX)gcc $(CFLAGS) -print-libgcc-file-name)
+#$(info LIBGCC = $(LIBGCC))
+
+cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
+	then echo "$(2)"; else echo "$(3)"; fi ;)
+
+# disable SSP if the compiler supports it; it will break stuff
+GLOBAL_CFLAGS += $(call cc-option,$(CC),-fno-stack-protector,)
+
+GLOBAL_COMPILEFLAGS += -fasynchronous-unwind-tables
+GLOBAL_COMPILEFLAGS += -gdwarf-2
+GLOBAL_COMPILEFLAGS += -fno-stack-protector
+GLOBAL_LDFLAGS += -z max-page-size=4096
+
+ARCH_OPTFLAGS := -O2
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/kernel.ld
+
+# rules for generating the linker scripts
+
+$(BUILDDIR)/kernel.ld: $(LOCAL_DIR)/kernel.ld $(wildcard arch/*.ld)
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)cp $< $@
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/x86-64/thread.c b/src/bsp/lk/arch/x86-64/thread.c
new file mode 100755
index 0000000..3d60e06
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/thread.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Intel Corporation
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <kernel/thread.h>
+#include <kernel/spinlock.h>
+#include <arch/x86.h>
+#include <arch/x86/descriptor.h>
+#include <arch/fpu.h>
+
+/* we're uniprocessor at this point for x86-64, so store a global pointer to the current thread */
+struct thread *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    ret = _current_thread->entry(_current_thread->arg);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    /* create a default stack frame on the stack */
+    vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
+
+    /* make sure the top of the stack is 8 byte aligned
+              for EABI compliance */
+
+    stack_top = ROUNDDOWN(stack_top, 8);
+
+    struct x86_context_switch_frame *frame =
+        (struct x86_context_switch_frame *)(stack_top);
+    frame--;
+
+    /* fill it in */
+    memset(frame, 0, sizeof(*frame));
+
+    frame->rip = (vaddr_t) &initial_thread_func;
+    frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
+
+    /* set the stack pointer */
+    t->arch.rsp = (vaddr_t)frame;
+#if X86_WITH_FPU
+    fpu_init_thread_states(t);
+#endif
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx\n", t->arch.rsp);
+    }
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+#if X86_WITH_FPU
+    fpu_context_switch(oldthread, newthread);
+#endif
+
+    x86_64_context_switch(&oldthread->arch.rsp, newthread->arch.rsp);
+}
diff --git a/src/bsp/lk/arch/x86/arch.c b/src/bsp/lk/arch/x86/arch.c
new file mode 100755
index 0000000..2f90e87
--- /dev/null
+++ b/src/bsp/lk/arch/x86/arch.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <debug.h>
+#include <arch.h>
+#include <arch/ops.h>
+#include <arch/x86.h>
+#include <arch/x86/mmu.h>
+#include <arch/x86/descriptor.h>
+#include <arch/fpu.h>
+#include <arch/fpu.h>
+#include <platform.h>
+#include <sys/types.h>
+#include <string.h>
+
+tss_t system_tss;
+
+void arch_early_init(void)
+{
+    /* enable caches here for now */
+    clear_in_cr0(X86_CR0_NW | X86_CR0_CD);
+
+    memset(&system_tss, 0, sizeof(tss_t));
+
+    system_tss.esp0 = 0;
+    system_tss.ss0 = DATA_SELECTOR;
+    system_tss.ss1 = 0;
+    system_tss.ss2 = 0;
+    system_tss.eflags = 0x00003002;
+    system_tss.bitmap = offsetof(tss_t, tss_bitmap);
+    system_tss.trace = 1; // trap on hardware task switch
+    set_global_desc(TSS_SELECTOR, &system_tss, sizeof(tss_t), 1, 0, 0, SEG_TYPE_TSS, 0, 0);
+    x86_ltr(TSS_SELECTOR);
+}
+
+void arch_init(void)
+{
+#ifdef X86_WITH_FPU
+    fpu_init();
+#endif
+}
+
+void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3)
+{
+    PANIC_UNIMPLEMENTED;
+}
diff --git a/src/bsp/lk/arch/x86/asm.S b/src/bsp/lk/arch/x86/asm.S
new file mode 100644
index 0000000..8916243
--- /dev/null
+++ b/src/bsp/lk/arch/x86/asm.S
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
diff --git a/src/bsp/lk/arch/x86/cache-ops.S b/src/bsp/lk/arch/x86/cache-ops.S
new file mode 100644
index 0000000..aa62593
--- /dev/null
+++ b/src/bsp/lk/arch/x86/cache-ops.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/ops.h>
+#include <arch/defines.h>
+
+.text
+
+/* stubs */
+
+FUNCTION(arch_disable_cache)
+    ret
+
+FUNCTION(arch_enable_cache)
+    ret
+
+FUNCTION(arch_clean_cache_range)
+    ret
+
+FUNCTION(arch_clean_invalidate_cache_range)
+    ret
+
diff --git a/src/bsp/lk/arch/x86/cache.c b/src/bsp/lk/arch/x86/cache.c
new file mode 100644
index 0000000..8f5cf5b
--- /dev/null
+++ b/src/bsp/lk/arch/x86/cache.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <arch/ops.h>
+
+/* nothing to do to sync I & D cache on x86 */
+void arch_sync_cache_range(addr_t start, size_t len)
+{
+}
diff --git a/src/bsp/lk/arch/x86/crt0.S b/src/bsp/lk/arch/x86/crt0.S
new file mode 100644
index 0000000..e8c3268
--- /dev/null
+++ b/src/bsp/lk/arch/x86/crt0.S
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* The magic number for the Multiboot header. */
+#define MULTIBOOT_HEADER_MAGIC 0x1BADB002
+
+/* The flags for the Multiboot header. */
+#if defined(__ELF__) && 0
+#define MULTIBOOT_HEADER_FLAGS 0x00000002
+#else
+#define MULTIBOOT_HEADER_FLAGS 0x00010002
+#endif
+
+/* The magic number passed by a Multiboot-compliant boot loader. */
+#define MULTIBOOT_BOOTLOADER_MAGIC 0x2BADB002
+
+#define NUM_INT 0x31
+#define NUM_EXC 0x14
+
+.section ".text.boot"
+.global _start
+_start:
+    jmp real_start
+
+.align 4
+
+.type multiboot_header,STT_OBJECT
+multiboot_header:
+    /* magic */
+    .int MULTIBOOT_HEADER_MAGIC
+    /* flags */
+    .int MULTIBOOT_HEADER_FLAGS
+    /* checksum */
+    .int -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
+
+#if !defined(__ELF__) || 1
+    /* header_addr */
+    .int multiboot_header
+    /* load_addr */
+    .int _start
+    /* load_end_addr */
+    .int __data_end
+    /* bss_end_addr */
+    .int __bss_end
+    /* entry_addr */
+    .int real_start
+#endif
+
+real_start:
+    cmpl $MULTIBOOT_BOOTLOADER_MAGIC, %eax
+    jne 0f
+    movl %ebx, (_multiboot_info)
+0:
+    /* setup isr stub descriptors in the idt */
+    movl $_isr, %esi
+    movl $_idt, %edi
+    movl $NUM_INT, %ecx
+
+.Lloop:
+    movl %esi, %ebx
+    movw %bx, (%edi)        /* low word in IDT(n).low */
+    shrl $16, %ebx
+    movw %bx, 6(%edi)       /* high word in IDT(n).high */
+
+    addl $isr_stub_len, %esi/* index the next ISR stub */
+    addl $8, %edi           /* index the next IDT entry */
+
+    loop .Lloop
+
+    lidt _idtr
+    xorl %eax, %eax
+    movl %eax, %cr3
+
+    lgdt _gdtr
+
+    movw $datasel, %ax
+    movw %ax, %ds
+    movw %ax, %es
+    movw %ax, %fs
+    movw %ax, %ss
+    movw %ax, %gs
+    movw %ax, %ss
+
+    movl $_kstack, %esp
+    /*We jumped here in protected mode in a code segment that migh not longer
+      be valid , do a long jump to our code segment, we use retf instead of
+      ljmp to be able to use relative labels */
+    movl $codesel_32, %ecx     /*Pushing our code segment */
+    push %ecx
+    movl $farjump, %ecx /*and jump address */
+    push %ecx
+    xorl %ecx, %ecx
+    retf    /*This instruction will jump to codesel:farjump */
+farjump:
+    /* zero the bss section */
+    movl $__bss_start, %edi /* starting address of the bss */
+    movl $__bss_end, %ecx   /* find the length of the bss in bytes */
+    subl %edi, %ecx
+    shrl $2, %ecx       /* convert to 32 bit words, since the bss is aligned anyway */
+2:
+    movl $0, (%edi)
+    addl $4, %edi
+    loop 2b
+
+#ifdef PAE_MODE_ENABLED
+
+    /* Preparing PAE paging, we will use 2MB pages covering 1GB
+    for initial bootstrap, this page table will be 1 to 1 */
+
+    /* Setting the First PDPTE with a PD table reference*/
+    xorl %eax,  %eax
+    movl $pdp,   %eax
+    orl  $0x01, %eax
+    movl %eax, (pdpt)
+
+    movl $pdp, %esi
+    movl $0x1ff, %ecx
+
+fill_pdp:
+    movl $0x1ff, %eax
+    subl %ecx, %eax
+    shll $21,%eax
+    orl  $0x83, %eax
+    movl %eax, (%esi)
+    addl $8,%esi
+    loop fill_pdp
+
+    /* Set PDPT in CR3 */
+    movl $pdpt, %eax
+    mov %eax, %cr3
+
+    /* Enabling PAE*/
+    mov %cr4, %eax
+    btsl $(5), %eax
+    mov %eax, %cr4
+
+    /* Enabling Paging and from this point we are in
+    32 bit compatibility mode */
+    mov %cr0,  %eax
+    btsl $(31), %eax
+    mov %eax,  %cr0
+
+#else
+    /* Set PD in CR3 */
+    movl $pd, %eax
+    mov %eax, %cr3
+
+    movl $pd, %esi
+    movl $0x100, %ecx
+
+fill_pd:
+    xor %eax, %eax
+    mov $0x100, %eax
+    sub %ecx, %eax
+    shll $22,%eax
+    orl  $0x87, %eax
+    movl %eax, (%esi)
+    addl $4,%esi
+    loop fill_pd
+
+    /* Enabling Paging and from this point we are in */
+    xorl %eax, %eax
+    mov %cr4, %eax
+    orl $0x10, %eax
+    mov %eax, %cr4
+    xorl %eax, %eax
+    mov %cr0,  %eax
+    btsl $(31), %eax
+    mov %eax, %cr0
+#endif
+
+    /* Flushing TLB's */
+        mov %cr3,%eax
+        mov %eax,%cr3
+
+main_lk:
+    /* call the main module */
+    call lk_main
+0:                          /* just sit around waiting for interrupts */
+    hlt                     /* interrupts will unhalt the processor */
+    pause
+    jmp 0b                  /* so jump back to halt to conserve power */
+
+/* interrupt service routine stubs */
+_isr:
+
+.set i, 0
+.rept NUM_INT
+
+.set isr_stub_start, .
+
+.if i == 8 || (i >= 10 && i <= 14) || i == 17
+    nop                     /* error code pushed by exception */
+    nop                     /* 2 nops are the same length as push byte */
+    pushl $i                /* interrupt number */
+    jmp interrupt_common
+.else
+    pushl $0                /* fill in error code in iframe */
+    pushl $i                /* interrupt number */
+    jmp interrupt_common
+.endif
+
+/* figure out the length of a single isr stub (usually 6 or 9 bytes) */
+.set isr_stub_len, . - isr_stub_start
+
+.set i, i + 1
+.endr
+
+/* annoying, but force AS to use the same (longer) encoding of jmp for all of the stubs */
+.fill 256
+
+interrupt_common:
+    pushl %gs               /* save segment registers */
+    pushl %fs
+    pushl %es
+    pushl %ds
+    pusha                   /* save general purpose registers */
+    movl $datasel, %eax     /* put known good value in segment registers */
+    movl %eax, %gs
+    movl %eax, %fs
+    movl %eax, %es
+    movl %eax, %ds
+    movl %esp, %eax         /* store stack switch pivot. push esp has errata on some cpus, so use mov/push */
+    pushl %eax
+    movl %esp, %eax         /* store pointer to iframe, using same method */
+    pushl %eax
+
+
+    call platform_irq
+
+    cmpl $0,%eax
+    je 0f
+    call thread_preempt
+
+0:
+
+    popl %eax               /* drop pointer to iframe */
+    popl %eax               /* restore task_esp, stack switch can occur here if task_esp is modified */
+    movl %eax, %esp
+    popa                    /* restore general purpose registers */
+    popl %ds                /* restore segment registers */
+    popl %es
+    popl %fs
+    popl %gs
+    addl $8, %esp           /* drop exception number and error code */
+    iret
+
+.data
+.align 4
+
+.global _multiboot_info
+_multiboot_info:
+    .int 0
+
+_gdtr:
+    .short _gdt_end - _gdt - 1
+    .int _gdt
+
+.global _gdt
+_gdt:
+    .int 0
+    .int 0
+
+/* ring 0 descriptors */
+.set codesel_32, . - _gdt
+_code_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10011010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) D(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set datasel, . - _gdt
+_data_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10010010       /* P(1) DPL(00) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_codesel_32, . - _gdt
+_user_code_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11111010       /* P(1) DPL(11) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) D(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_datasel, . - _gdt
+_user_data_32_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11110010       /* P(1) DPL(11) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set codesel_64, . - _gdt
+_code_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10011010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b10101111       /* G(1) D(0) L(1) AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set datasel_64, . - _gdt
+_data_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b10010010       /* P(1) DPL(00) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+    .quad  0x0000000000000000
+    .quad  0x0000000000000000
+
+.set user_codesel_64, . - _gdt
+_user_code_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11111010       /* P(1) DPL(11) S(1) 1 C(0) R(1) A(0) */
+    .byte  0b10101111       /* G(1) D(1) L(0) AVL(0) limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set user_datasel_64, . - _gdt
+_user_data_64_gde:
+    .short 0xffff           /* limit 15:00 */
+    .short 0x0000           /* base 15:00 */
+    .byte  0x00         /* base 23:16 */
+    .byte  0b11110010       /* P(1) DPL(11) S(1) 0 E(0) W(1) A(0) */
+    .byte  0b11001111       /* G(1) B(1) 0 0 limit 19:16 */
+    .byte  0x0          /* base 31:24 */
+
+.set null_2, . - _gdt
+_null_2:
+    .int 0
+    .int 0
+
+/* TSS descriptor */
+.set tsssel, . - _gdt
+_tss_gde:
+    .short 0                /* limit 15:00 */
+    .short 0                /* base 15:00 */
+    .byte  0                /* base 23:16 */
+    .byte  0x89             /* P(1) DPL(11) 0 10 B(0) 1 */
+    .byte  0x80             /* G(0) 0 0 AVL(0) limit 19:16 */
+    .short  0               /* base 31:24 */
+.global _gdt_end
+_gdt_end:
+
+.align 8
+.global _idtr
+_idtr:
+    .short _idt_end - _idt - 1  /* IDT limit */
+    .int _idt
+
+/* interrupt descriptor table (IDT) */
+.global _idt
+_idt:
+
+.set i, 0
+.rept NUM_INT-1
+    .short 0                /* low 16 bits of ISR offset (_isr#i & 0FFFFh) */
+    .short codesel_32           /* selector */
+    .byte  0
+    .byte  0x8e             /* present, ring 0, 32-bit interrupt gate */
+    .short 0                /* high 16 bits of ISR offset (_isr#i / 65536) */
+
+.set i, i + 1
+.endr
+
+/* syscall int (ring 3) */
+_idt30:
+    .short 0                /* low 16 bits of ISR offset (_isr#i & 0FFFFh) */
+    .short codesel_32           /* selector */
+    .byte  0
+    .byte  0xee             /* present, ring 3, 32-bit interrupt gate */
+    .short 0                /* high 16 bits of ISR offset (_isr#i / 65536) */
+
+.global _idt_end
+_idt_end:
+
+/* Memory for the initial page table, we will use 2 pages for a
+   1 to 1 mapping that covers 1GB of physical memory */
+.align 4096
+.fill 4096
+
+#ifdef PAE_MODE_ENABLED
+.align 4096
+pdpt:
+.fill 4096
+pdp:
+.fill 4096
+#else
+.align 4096
+pd:
+.fill 4096
+#endif
+
+.align 4096
+.fill 4096
+
+.bss
+.align 4096
+
+.global _kstack
+.fill 4096
+_kstack:
diff --git a/src/bsp/lk/arch/x86/descriptor.c b/src/bsp/lk/arch/x86/descriptor.c
new file mode 100644
index 0000000..5fa6555
--- /dev/null
+++ b/src/bsp/lk/arch/x86/descriptor.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <compiler.h>
+#include <arch/x86/descriptor.h>
+
+/* not the best way to do this, but easy for now */
+typedef struct {
+    uint16_t limit_15_0;
+    uint16_t base_15_0;
+    uint8_t base_23_16;
+
+    uint8_t type : 4;
+    uint8_t s : 1;
+    uint8_t dpl : 2;
+    uint8_t p : 1;
+
+    uint8_t limit_19_16 : 4;
+    uint8_t avl : 1;
+    uint8_t reserved_0 : 1;
+    uint8_t d_b : 1;
+    uint8_t g : 1;
+
+    uint8_t base_31_24;
+} __PACKED seg_desc_t;
+
+extern seg_desc_t _gdt[];
+
+void set_global_desc(seg_sel_t sel, void *base, uint32_t limit,
+                     uint8_t present, uint8_t ring, uint8_t sys, uint8_t type, uint8_t gran, uint8_t bits)
+{
+    // convert selector into index
+    uint16_t index = sel >> 3;
+
+    _gdt[index].limit_15_0 = limit & 0x0000ffff;
+    _gdt[index].limit_19_16 = (limit & 0x000f0000) >> 16;
+
+    _gdt[index].base_15_0 = ((uint32_t) base) & 0x0000ffff;
+    _gdt[index].base_23_16 = (((uint32_t) base) & 0x00ff0000) >> 16;
+    _gdt[index].base_31_24 = ((uint32_t) base) >> 24;
+
+    _gdt[index].type = type & 0x0f; // segment type
+    _gdt[index].p = present != 0;   // present
+    _gdt[index].dpl = ring & 0x03;  // descriptor privilege level
+    _gdt[index].g = gran != 0;      // granularity
+    _gdt[index].s = sys != 0;       // system / non-system
+    _gdt[index].d_b = bits != 0;    // 16 / 32 bit
+}
diff --git a/src/bsp/lk/arch/x86/faults.c b/src/bsp/lk/arch/x86/faults.c
new file mode 100644
index 0000000..c2592c7
--- /dev/null
+++ b/src/bsp/lk/arch/x86/faults.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <arch/x86.h>
+#include <kernel/thread.h>
+
+
+static void dump_fault_frame(struct x86_iframe *frame)
+{
+    dprintf(CRITICAL, " CS:     %04x EIP: %08x EFL: %08x CR2: %08x\n",
+            frame->cs, frame->eip, frame->eflags, x86_get_cr2());
+    dprintf(CRITICAL, "EAX: %08x ECX: %08x EDX: %08x EBX: %08x\n",
+            frame->eax, frame->ecx, frame->edx, frame->ebx);
+    dprintf(CRITICAL, "ESP: %08x EBP: %08x ESI: %08x EDI: %08x\n",
+            frame->esp, frame->ebp, frame->esi, frame->edi);
+    dprintf(CRITICAL, " DS:     %04x  ES:     %04x  FS:     %04x  GS:     %04x\n",
+            frame->ds, frame->es, frame->fs, frame->gs);
+
+    // dump the bottom of the current stack
+    addr_t stack = (addr_t) frame; //(addr_t) (((uint32_t *) frame) + (sizeof(struct x86_iframe) / sizeof(uint32_t) - 1));
+
+    if (stack != 0) {
+        dprintf(CRITICAL, "bottom of stack at 0x%08x:\n", (unsigned int)stack);
+        hexdump((void *)stack, 192);
+    }
+}
+
+static void exception_die(struct x86_iframe *frame, const char *msg)
+{
+    dprintf(CRITICAL, msg);
+    dump_fault_frame(frame);
+
+    for (;;) {
+        x86_cli();
+        x86_hlt();
+    }
+}
+
+void x86_syscall_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled syscall, halting\n");
+}
+
+void x86_gpf_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled gpf, halting\n");
+}
+
+void x86_invop_handler(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled invalid op, halting\n");
+}
+
+void x86_unhandled_exception(struct x86_iframe *frame)
+{
+    exception_die(frame, "unhandled exception, halting\n");
+}
+
+void x86_pfe_handler(struct x86_iframe *frame)
+{
+    /* Handle a page fault exception */
+    uint32_t error_code;
+    thread_t *current_thread;
+    error_code = frame->err_code;
+
+#ifdef PAGE_FAULT_DEBUG_INFO
+    addr_t v_addr, ssp, esp, ip, rip;
+    v_addr = x86_get_cr2();
+
+    ssp = frame->user_ss & X86_8BYTE_MASK;
+    esp = frame->user_esp;
+    ip  = frame->cs & X86_8BYTE_MASK;
+    rip = frame->eip;
+
+    dprintf(CRITICAL, "<PAGE FAULT> Instruction Pointer   = 0x%x:0x%x\n",
+            (unsigned int)ip,
+            (unsigned int)rip);
+    dprintf(CRITICAL, "<PAGE FAULT> Stack Pointer         = 0x%x:0x%x\n",
+            (unsigned int)ssp,
+            (unsigned int)esp);
+    dprintf(CRITICAL, "<PAGE FAULT> Fault Linear Address = 0x%x\n",
+            (unsigned int)v_addr);
+    dprintf(CRITICAL, "<PAGE FAULT> Error Code Value      = 0x%x\n",
+            error_code);
+    dprintf(CRITICAL, "<PAGE FAULT> Error Code Type = %s %s %s%s, %s\n",
+            error_code & PFEX_U ? "user" : "supervisor",
+            error_code & PFEX_W ? "write" : "read",
+            error_code & PFEX_I ? "instruction" : "data",
+            error_code & PFEX_RSV ? " rsv" : "",
+            error_code & PFEX_P ? "protection violation" : "page not present");
+#endif
+
+    current_thread = get_current_thread();
+    dump_thread(current_thread);
+
+    if (error_code & PFEX_U) {
+        // User mode page fault
+        switch (error_code) {
+            case 4:
+            case 5:
+            case 6:
+            case 7:
+#ifdef PAGE_FAULT_DEBUG_INFO
+                thread_detach(current_thread);
+#else
+                thread_exit(current_thread->retcode);
+#endif
+                break;
+        }
+    } else {
+        // Supervisor mode page fault
+        switch (error_code) {
+
+            case 0:
+            case 1:
+            case 2:
+            case 3:
+                exception_die(frame, "Page Fault exception, halting\n");
+                break;
+        }
+    }
+}
diff --git a/src/bsp/lk/arch/x86/fpu.c b/src/bsp/lk/arch/x86/fpu.c
new file mode 100755
index 0000000..a589a79
--- /dev/null
+++ b/src/bsp/lk/arch/x86/fpu.c
@@ -0,0 +1,148 @@
+/*
+* Copyright (c) 2015 Intel Corporation
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files
+* (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge,
+* publish, distribute, sublicense, and/or sell copies of the Software,
+* and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <arch/x86.h>
+#include <arch/fpu.h>
+#include <kernel/thread.h>
+
+#if X86_WITH_FPU
+
+/* CPUID EAX = 1 return values */
+
+#define ECX_SSE3    (0x00000001 << 0)
+#define ECX_SSSE3   (0x00000001 << 9)
+#define ECX_SSE4_1  (0x00000001 << 19)
+#define ECX_SSE4_2  (0x00000001 << 20)
+#define EDX_FXSR    (0x00000001 << 24)
+#define EDX_SSE     (0x00000001 << 25)
+#define EDX_SSE2    (0x00000001 << 26)
+#define EDX_FPU     (0x00000001 << 0)
+
+#define FPU_CAP(ecx, edx) ((edx & EDX_FPU) != 0)
+
+#define SSE_CAP(ecx, edx) ( \
+    ((ecx & (ECX_SSE3 | ECX_SSSE3 | ECX_SSE4_1 | ECX_SSE4_2)) != 0) || \
+    ((edx & (EDX_SSE | EDX_SSE2)) != 0) \
+    )
+
+#define FXSAVE_CAP(ecx, edx) ((edx & EDX_FXSR) != 0)
+
+static int fp_supported;
+static thread_t *fp_owner;
+
+static void get_cpu_cap(uint32_t *ecx, uint32_t *edx)
+{
+    uint32_t eax = 1;
+
+    __asm__ __volatile__
+    ("cpuid" : "=c" (*ecx), "=d" (*edx) : "a" (eax));
+}
+
+void fpu_init(void)
+{
+    uint32_t ecx = 0, edx = 0;
+    uint16_t fcw;
+    uint32_t mxcsr;
+
+#ifdef ARCH_X86_64
+    uint64_t x;
+#else
+    uint32_t x;
+#endif
+
+    fp_supported = 0;
+    fp_owner = NULL;
+
+    get_cpu_cap(&ecx, &edx);
+
+    if (!FPU_CAP(ecx, edx) || !SSE_CAP(ecx, edx) || !FXSAVE_CAP(ecx, edx))
+        return;
+
+    fp_supported = 1;
+
+    /* No x87 emul, monitor co-processor */
+
+    x = x86_get_cr0();
+    x &= ~X86_CR0_EM;
+    x |= X86_CR0_NE;
+    x |= X86_CR0_MP;
+    x86_set_cr0(x);
+
+    /* Init x87 and unmask all exceptions */
+
+    __asm__ __volatile__ ("finit");
+    __asm__ __volatile__("fstcw %0" : "=m" (fcw));
+    fcw &= 0xffc0;
+    __asm__ __volatile__("fldcw %0" : : "m" (fcw));
+
+    /* Init SSE and unmask all exceptions */
+
+    x = x86_get_cr4();
+    x |= X86_CR4_OSXMMEXPT;
+    x |= X86_CR4_OSFXSR;
+    x &= ~X86_CR4_OSXSAVE;
+    x86_set_cr4(x);
+
+    __asm__ __volatile__("stmxcsr %0" : "=m" (mxcsr));
+    mxcsr &= 0x0000003f;
+    __asm__ __volatile__("ldmxcsr %0" : : "m" (mxcsr));
+
+    x86_set_cr0(x86_get_cr0() | X86_CR0_TS);
+    return;
+}
+
+void fpu_context_switch(thread_t *old_thread, thread_t *new_thread)
+{
+    if (fp_supported == 0)
+        return;
+
+    if (new_thread != fp_owner)
+        x86_set_cr0(x86_get_cr0() | X86_CR0_TS);
+    else
+        x86_set_cr0(x86_get_cr0() & ~X86_CR0_TS);
+
+    return;
+}
+
+void fpu_dev_na_handler(void)
+{
+    thread_t *self;
+
+    x86_set_cr0(x86_get_cr0() & ~X86_CR0_TS);
+
+    if (fp_supported == 0)
+        return;
+
+    self = get_current_thread();
+
+    if ((fp_owner != NULL) && (fp_owner != self)) {
+        __asm__ __volatile__("fxsave %0" : "=m" (*fp_owner->arch.fpu_states));
+        __asm__ __volatile__("fxrstor %0" : : "m" (*self->arch.fpu_states));
+    }
+
+    fp_owner = self;
+    return;
+}
+#endif
+
+/* End of file */
diff --git a/src/bsp/lk/arch/x86/include/arch/arch_ops.h b/src/bsp/lk/arch/x86/include/arch/arch_ops.h
new file mode 100644
index 0000000..91a5271
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/arch_ops.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_X86_OPS_H
+#define __ARHC_X86_OPS_H
+
+#include <compiler.h>
+
+#ifndef ASSEMBLY
+
+#include <arch/x86.h>
+
+/* override of some routines */
+static inline void arch_enable_ints(void)
+{
+    CF;
+    __asm__ volatile("sti");
+}
+
+static inline void arch_disable_ints(void)
+{
+    __asm__ volatile("cli");
+    CF;
+}
+
+static inline bool arch_ints_disabled(void)
+{
+    unsigned int state;
+
+    __asm__ volatile(
+        "pushfl;"
+        "popl %%eax"
+        : "=a" (state)
+        :: "memory");
+
+    return !(state & (1<<9));
+}
+
+int _atomic_and(volatile int *ptr, int val);
+int _atomic_or(volatile int *ptr, int val);
+int _atomic_cmpxchg(volatile int *ptr, int oldval, int newval);
+
+static inline int atomic_add(volatile int *ptr, int val)
+{
+    __asm__ volatile(
+        "lock xaddl %[val], %[ptr];"
+        : [val]"=a" (val)
+        : "a" (val), [ptr]"m" (*ptr)
+        : "memory"
+    );
+
+    return val;
+}
+
+static inline int atomic_swap(volatile int *ptr, int val)
+{
+    __asm__ volatile(
+        "xchgl %[val], %[ptr];"
+        : [val]"=a" (val)
+        : "a" (val), [ptr]"m" (*ptr)
+        : "memory"
+    );
+
+    return val;
+}
+
+
+static inline int atomic_and(volatile int *ptr, int val) { return _atomic_and(ptr, val); }
+static inline int atomic_or(volatile int *ptr, int val) { return _atomic_or(ptr, val); }
+static inline int atomic_cmpxchg(volatile int *ptr, int oldval, int newval) { return _atomic_cmpxchg(ptr, oldval, newval); }
+
+static inline uint32_t arch_cycle_count(void)
+{
+    uint32_t timestamp;
+    rdtscl(timestamp);
+
+    return timestamp;
+}
+
+/* use a global pointer to store the current_thread */
+extern struct thread *_current_thread;
+
+static inline struct thread *get_current_thread(void)
+{
+    return _current_thread;
+}
+
+static inline void set_current_thread(struct thread *t)
+{
+    _current_thread = t;
+}
+
+static inline uint arch_curr_cpu_num(void)
+{
+    return 0;
+}
+
+#endif // !ASSEMBLY
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86/include/arch/arch_thread.h b/src/bsp/lk/arch/x86/include/arch/arch_thread.h
new file mode 100755
index 0000000..8002c22
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/arch_thread.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __X86_ARCH_THREAD_H
+#define __X86_ARCH_THREAD_H
+
+#include <sys/types.h>
+
+struct arch_thread {
+    vaddr_t esp;
+#if X86_WITH_FPU
+    vaddr_t *fpu_states;
+    uint8_t fpu_buffer[512 + 16];
+#endif
+};
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86/include/arch/defines.h b/src/bsp/lk/arch/x86/include/arch/defines.h
new file mode 100644
index 0000000..3dc00cc
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/defines.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_CPU_H
+#define __ARCH_CPU_H
+
+#define PAGE_SIZE 4096
+#define PAGE_SIZE_SHIFT 12
+
+#define CACHE_LINE 32
+
+#define ARCH_DEFAULT_STACK_SIZE 8192
+#define DEFAULT_TSS 4096
+
+#endif
+
diff --git a/src/bsp/lk/arch/x86/include/arch/fpu.h b/src/bsp/lk/arch/x86/include/arch/fpu.h
new file mode 100644
index 0000000..0cb71d8
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/fpu.h
@@ -0,0 +1,30 @@
+/*
+* Copyright (c) 2015 Intel Corporation
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files
+* (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge,
+* publish, distribute, sublicense, and/or sell copies of the Software,
+* and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <kernel/thread.h>
+
+void fpu_init(void);
+void fpu_context_switch(thread_t *old_thread, thread_t *new_thread);
+void fpu_dev_na_handler(void);
+
+/* End of file */
diff --git a/src/bsp/lk/arch/x86/include/arch/spinlock.h b/src/bsp/lk/arch/x86/include/arch/spinlock.h
new file mode 100644
index 0000000..42d35a7
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/spinlock.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include <arch/ops.h>
+#include <arch/x86.h>
+#include <stdbool.h>
+
+#define SPIN_LOCK_INITIAL_VALUE (0)
+
+typedef unsigned long spin_lock_t;
+
+typedef uint32_t spin_lock_saved_state_t;
+typedef uint spin_lock_save_flags_t;
+
+/* simple implementation of spinlocks for no smp support */
+static inline void arch_spin_lock_init(spin_lock_t *lock)
+{
+    *lock = SPIN_LOCK_INITIAL_VALUE;
+}
+
+static inline bool arch_spin_lock_held(spin_lock_t *lock)
+{
+    return *lock != 0;
+}
+
+static inline void arch_spin_lock(spin_lock_t *lock)
+{
+    *lock = 1;
+}
+
+static inline int arch_spin_trylock(spin_lock_t *lock)
+{
+    return 0;
+}
+
+static inline void arch_spin_unlock(spin_lock_t *lock)
+{
+    *lock = 0;
+}
+
+/* flags are unused on x86 */
+#define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS  0
+
+static inline void
+arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags)
+{
+    *statep = x86_save_eflags();
+    arch_disable_ints();
+}
+
+static inline void
+arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags)
+{
+    x86_restore_eflags(old_state);
+}
+
+
diff --git a/src/bsp/lk/arch/x86/include/arch/x86.h b/src/bsp/lk/arch/x86/include/arch/x86.h
new file mode 100644
index 0000000..1129576
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/x86.h
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_X86_H
+#define __ARCH_X86_H
+
+#include <compiler.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+__BEGIN_CDECLS
+
+#define PFEX_P 0x01
+#define PFEX_W 0x02
+#define PFEX_U 0x04
+#define PFEX_RSV 0x08
+#define PFEX_I 0x10
+#define X86_8BYTE_MASK 0xFFFFFFFF
+#define X86_CPUID_ADDR_WIDTH 0x80000008
+
+void arch_mmu_init(void);
+addr_t *x86_create_new_cr3(void);
+
+struct x86_iframe {
+    uint32_t pivot;                                     // stack switch pivot
+    uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;    // pushed by common handler using pusha
+    uint32_t ds, es, fs, gs;                            // pushed by common handler
+    uint32_t vector;                                    // pushed by stub
+    uint32_t err_code;                                  // pushed by interrupt or stub
+    uint32_t eip, cs, eflags;                           // pushed by interrupt
+    uint32_t user_esp, user_ss;                         // pushed by interrupt if priv change occurs
+};
+
+/*
+ * x86 TSS structure
+ */
+typedef struct {
+    uint16_t    backlink, __blh;
+    uint32_t    esp0;
+    uint16_t    ss0, __ss0h;
+    uint32_t    esp1;
+    uint16_t    ss1, __ss1h;
+    uint32_t    esp2;
+    uint16_t    ss2, __ss2h;
+    uint32_t    cr3;
+    uint32_t    eip;
+    uint32_t    eflags;
+    uint32_t    eax, ecx, edx, ebx;
+    uint32_t    esp, ebp, esi, edi;
+    uint16_t    es, __esh;
+    uint16_t    cs, __csh;
+    uint16_t    ss, __ssh;
+    uint16_t    ds, __dsh;
+    uint16_t    fs, __fsh;
+    uint16_t    gs, __gsh;
+    uint16_t    ldt, __ldth;
+    uint16_t    trace, bitmap;
+
+    uint8_t tss_bitmap[8192];
+} __PACKED tss_t;
+
+#define X86_CR0_PE 0x00000001 /* protected mode enable */
+#define X86_CR0_MP 0x00000002 /* monitor coprocessor */
+#define X86_CR0_EM 0x00000004 /* emulation */
+#define X86_CR0_NE 0x00000020 /* enable x87 exception */
+#define X86_CR0_TS 0x00000008 /* task switched */
+#define X86_CR0_WP 0x00010000 /* supervisor write protect */
+#define X86_CR0_NW 0x20000000 /* not write-through */
+#define X86_CR0_CD 0x40000000 /* cache disable */
+#define X86_CR0_PG 0x80000000 /* enable paging */
+#define X86_CR4_OSFXSR 0x00000200 /* os supports fxsave */
+#define X86_CR4_OSXMMEXPT 0x00000400 /* os supports xmm exception */
+#define X86_CR4_OSXSAVE 0x00040000 /* os supports xsave */
+#define X86_CR4_SMEP 0x00100000 /* SMEP protection enabling */
+#define X86_CR4_SMAP 0x00200000 /* SMAP protection enabling */
+#define X86_CR4_PAE 0x00000020 /* PAE paging */
+#define x86_EFER_NXE 0x00000800 /* to enable execute disable bit */
+#define x86_MSR_EFER 0xc0000080 /* EFER Model Specific Register id */
+#define X86_CR4_PSE 0xffffffef /* Disabling PSE bit in the CR4 */
+
+static inline void set_in_cr0(uint32_t mask)
+{
+    __asm__ __volatile__ (
+        "movl %%cr0,%%eax	\n\t"
+        "orl %0,%%eax		\n\t"
+        "movl %%eax,%%cr0	\n\t"
+        : : "irg" (mask)
+        :"ax");
+}
+
+static inline void clear_in_cr0(uint32_t mask)
+{
+    __asm__ __volatile__ (
+        "movl %%cr0, %%eax	\n\t"
+        "andl %0, %%eax		\n\t"
+        "movl %%eax, %%cr0	\n\t"
+        : : "irg" (~mask)
+        : "ax");
+}
+
+static inline void x86_clts(void) {__asm__ __volatile__ ("clts"); }
+static inline void x86_hlt(void) {__asm__ __volatile__ ("hlt"); }
+static inline void x86_sti(void) {__asm__ __volatile__ ("sti"); }
+static inline void x86_cli(void) {__asm__ __volatile__ ("cli"); }
+static inline void x86_ltr(uint16_t sel)
+{
+    __asm__ __volatile__ ("ltr %%ax" :: "a" (sel));
+}
+
+static inline uint32_t x86_get_cr2(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "movl %%cr2, %0"
+        : "=r" (rv)
+    );
+
+    return rv;
+}
+
+static inline uint32_t x86_save_eflags(void)
+{
+    unsigned int state;
+
+    __asm__ volatile(
+        "pushfl;"
+        "popl %0"
+        : "=rm" (state)
+        :: "memory");
+
+    return state;
+}
+
+static inline void x86_restore_eflags(uint32_t eflags)
+{
+    __asm__ volatile(
+        "pushl %0;"
+        "popfl"
+        :: "g" (eflags)
+        : "memory", "cc");
+}
+
+#define rdtsc(low,high) \
+     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+     __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+static inline uint8_t inp(uint16_t _port)
+{
+    uint8_t rv;
+    __asm__ __volatile__ ("inb %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline uint16_t inpw (uint16_t _port)
+{
+    uint16_t rv;
+    __asm__ __volatile__ ("inw %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline uint32_t inpd(uint16_t _port)
+{
+    uint32_t rv;
+    __asm__ __volatile__ ("inl %1, %0"
+                          : "=a" (rv)
+                          : "d" (_port));
+    return (rv);
+}
+
+static inline void outp(uint16_t _port, uint8_t _data)
+{
+    __asm__ __volatile__ ("outb %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void outpw(uint16_t _port, uint16_t _data)
+{
+    __asm__ __volatile__ ("outw %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void outpd(uint16_t _port, uint32_t _data)
+{
+    __asm__ __volatile__ ("outl %1, %0"
+                          :
+                          : "d" (_port),
+                          "a" (_data));
+}
+
+static inline void inprep(uint16_t _port, uint8_t *_buffer, uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insb \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outprep(uint16_t _port, uint8_t *_buffer, uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsb \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline void inpwrep(uint16_t _port, uint16_t *_buffer, uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insw \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outpwrep(uint16_t _port, uint16_t *_buffer,
+                            uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsw \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline void inpdrep(uint16_t _port, uint32_t *_buffer,
+                           uint32_t _reads)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep insl \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "D" (_buffer),
+                          "c" (_reads));
+}
+
+static inline void outpdrep(uint16_t _port, uint32_t *_buffer,
+                            uint32_t _writes)
+{
+    __asm__ __volatile__ ("pushal \n\t"
+                          "pushfl \n\t"
+                          "cli \n\t"
+                          "cld \n\t"
+                          "rep outsl \n\t"
+                          "popfl \n\t"
+                          "popal"
+                          :
+                          : "d" (_port),
+                          "S" (_buffer),
+                          "c" (_writes));
+}
+
+static inline uint64_t read_msr (uint32_t msr_id)
+{
+    uint64_t msr_read_val = 0;
+    uint32_t low_val = 0;
+    uint32_t high_val = 0;
+
+    __asm__ __volatile__ (
+        "rdmsr \n\t"
+        : "=a" (low_val), "=d"(high_val)
+        : "c" (msr_id));
+
+    msr_read_val = high_val;
+    msr_read_val = (msr_read_val << 32) | low_val;
+
+    return msr_read_val;
+}
+
+static inline void write_msr (uint32_t msr_id, uint64_t msr_write_val)
+{
+    uint32_t low_val = (uint32_t)msr_write_val;
+    uint32_t high_val = (uint32_t)(msr_write_val >> 32);
+
+    __asm__ __volatile__ (
+        "wrmsr \n\t"
+        : : "c" (msr_id), "a" (low_val), "d"(high_val));
+}
+
+static inline uint32_t x86_get_cr3(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "mov %%cr3, %0"
+        : "=r" (rv));
+    return rv;
+}
+
+static inline void x86_set_cr3(uint32_t in_val)
+{
+    __asm__ __volatile__ (
+        "mov %0,%%cr3 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline uint32_t x86_get_cr0(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "mov %%cr0, %0 \n\t"
+        : "=r" (rv));
+    return rv;
+}
+
+static inline uint32_t x86_get_cr4(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "mov %%cr4, %0 \n\t"
+        : "=r" (rv));
+    return rv;
+}
+
+
+static inline void x86_set_cr0(uint32_t in_val)
+{
+    __asm__ __volatile__ (
+        "mov %0,%%cr0 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline void x86_set_cr4(uint32_t in_val)
+{
+    __asm__ __volatile__ (
+        "mov %0,%%cr4 \n\t"
+        :
+        :"r" (in_val));
+}
+
+static inline uint32_t x86_get_address_width(void)
+{
+    uint32_t rv;
+
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=a" (rv)
+        :"a" (X86_CPUID_ADDR_WIDTH));
+
+    /* Extracting bit 15:8 from eax register */
+    return ((rv >> 8) & 0x0ff);
+}
+
+static inline bool x86_is_paging_enabled(void)
+{
+    if (x86_get_cr0() & X86_CR0_PG)
+        return true;
+
+    return false;
+}
+
+static inline uint32_t x86_is_PAE_enabled(void)
+{
+    if (x86_is_paging_enabled() == false)
+        return false;
+
+    if (!(x86_get_cr4() & X86_CR4_PAE))
+        return false;
+
+    return true;
+}
+
+static inline uint32_t check_smep_avail(void)
+{
+    uint32_t reg_a = 0x07;
+    uint32_t reg_b = 0x0;
+    uint32_t reg_c = 0x0;
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=b" (reg_b)
+        :"a" (reg_a),"c" (reg_c));
+    return ((reg_b>>0x06) & 0x1);
+}
+
+static inline uint32_t check_smap_avail(void)
+{
+    uint32_t reg_a = 0x07;
+    uint32_t reg_b = 0x0;
+    uint32_t reg_c = 0x0;
+    __asm__ __volatile__ (
+        "cpuid \n\t"
+        :"=b" (reg_b)
+        :"a" (reg_a),"c" (reg_c));
+    return ((reg_b>>0x13) & 0x1);
+}
+
+__END_CDECLS
+#endif
diff --git a/src/bsp/lk/arch/x86/include/arch/x86/descriptor.h b/src/bsp/lk/arch/x86/include/arch/x86/descriptor.h
new file mode 100644
index 0000000..d66900d
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/x86/descriptor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __ARCH_DESCRIPTOR_H
+#define __ARCH_DESCRIPTOR_H
+
+#include <sys/types.h>
+
+/*
+ * System Selectors
+ */
+#define NULL_SELECTOR       0x00
+
+/********* x86 selectors *********/
+#define CODE_SELECTOR       0x08
+#define DATA_SELECTOR       0x10
+#define USER_CODE_32_SELECTOR   0x18
+#define USER_DATA_32_SELECTOR   0x20
+#define NULL_2_SELECTOR     0x28
+
+/******* x86-64 selectors ********/
+#define CODE_64_SELECTOR    0x30
+#define STACK_64_SELECTOR   0x38
+#define USER_CODE_64_SELECTOR   0x50
+#define USER_DATA_64_SELECTOR   0x58
+
+#define TSS_SELECTOR        0x60
+/*
+ * Descriptor Types
+ */
+#define SEG_TYPE_TSS        0x9
+#define SEG_TYPE_TSS_BUSY   0xb
+#define SEG_TYPE_TASK_GATE  0x5
+#define SEG_TYPE_INT_GATE   0xe     // 32 bit
+#define SEG_TYPE_DATA_RW    0x2
+#define SEG_TYPE_CODE_RW    0xa
+
+typedef uint16_t seg_sel_t;
+
+void set_global_desc(seg_sel_t sel, void *base, uint32_t limit,
+                     uint8_t present, uint8_t ring, uint8_t sys, uint8_t type, uint8_t gran, uint8_t bits);
+
+#endif
diff --git a/src/bsp/lk/arch/x86/include/arch/x86/mmu.h b/src/bsp/lk/arch/x86/include/arch/x86/mmu.h
new file mode 100644
index 0000000..ac3548d
--- /dev/null
+++ b/src/bsp/lk/arch/x86/include/arch/x86/mmu.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2008 Travis Geiselbrecht
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/types.h>
+#include <compiler.h>
+
+__BEGIN_CDECLS
+
+void x86_mmu_init(void);
+
+#define X86_MMU_PG_P        0x001           /* P    Valid                   */
+#define X86_MMU_PG_RW       0x002           /* R/W  Read/Write              */
+#define X86_MMU_PG_U        0x004           /* U/S  User/Supervisor         */
+#define X86_MMU_PG_PS       0x080           /* PS   Page size (0=4k,1=4M)   */
+#define X86_MMU_PG_PTE_PAT  0x080           /* PAT  PAT index               */
+#define X86_MMU_PG_G        0x100           /* G    Global                  */
+#define X86_MMU_CLEAR       0x0
+#define X86_DIRTY_ACCESS_MASK   0xf9f
+#define X86_MMU_CACHE_DISABLE   0x010       /* C Cache disable */
+
+#define PAGE_SIZE       4096
+#define PAGE_DIV_SHIFT      12
+
+#ifdef PAE_MODE_ENABLED
+/* PAE mode */
+#define X86_PDPT_ADDR_MASK  (0x00000000ffffffe0ul)
+#define X86_PG_FRAME        (0x000ffffffffff000ul)
+#define X86_PHY_ADDR_MASK   (0x000ffffffffffffful)
+#define X86_FLAGS_MASK      (0x0000000000000ffful)  /* NX Bit is ignored in the PAE mode */
+#define X86_PTE_NOT_PRESENT (0xFFFFFFFFFFFFFFFEul)
+#define X86_2MB_PAGE_FRAME  (0x000fffffffe00000ul)
+#define PAGE_OFFSET_MASK_4KB    (0x0000000000000ffful)
+#define PAGE_OFFSET_MASK_2MB    (0x00000000001ffffful)
+#define X86_MMU_PG_NX       (1ul << 63)
+#define X86_PAE_PAGING_LEVELS   3
+#define PDP_SHIFT       30
+#define PD_SHIFT        21
+#define PT_SHIFT        12
+#define ADDR_OFFSET     9
+#define PDPT_ADDR_OFFSET    2
+#define NO_OF_PT_ENTRIES    512
+
+#else
+/* non PAE mode */
+#define X86_PG_FRAME        (0xfffff000)
+#define X86_FLAGS_MASK          (0x00000fff)
+#define X86_PTE_NOT_PRESENT     (0xfffffffe)
+#define X86_4MB_PAGE_FRAME      (0xffc00000)
+#define PAGE_OFFSET_MASK_4KB    (0x00000fff)
+#define PAGE_OFFSET_MASK_4MB    (0x003fffff)
+#define NO_OF_PT_ENTRIES    1024
+#define X86_PAGING_LEVELS   2
+#define PD_SHIFT        22
+#define PT_SHIFT        12
+#define ADDR_OFFSET     10
+
+#endif
+
+#define X86_PHYS_TO_VIRT(x)     (x)
+#define X86_VIRT_TO_PHYS(x)     (x)
+
+/* Different page table levels in the page table mgmt hirerachy */
+enum page_table_levels {
+    PF_L,
+    PT_L,
+    PD_L,
+#ifdef PAE_MODE_ENABLED
+    PDP_L
+#endif
+} page_level;
+
+
+struct map_range {
+    vaddr_t start_vaddr;
+#ifdef PAE_MODE_ENABLED
+    uint64_t start_paddr; /* Physical address in the PAE mode is 64 bits wide */
+#else
+    paddr_t start_paddr; /* Physical address in the PAE mode is 32 bits wide */
+#endif
+    uint32_t size;
+};
+
+#ifdef PAE_MODE_ENABLED
+typedef uint64_t map_addr_t;
+typedef uint64_t arch_flags_t;
+#else
+typedef uint32_t map_addr_t;
+typedef uint32_t arch_flags_t;
+#endif
+
+status_t x86_mmu_map_range (map_addr_t pdpt, struct map_range *range, arch_flags_t flags);
+status_t x86_mmu_add_mapping(map_addr_t init_table, map_addr_t paddr,
+                             vaddr_t vaddr, arch_flags_t flags);
+status_t x86_mmu_get_mapping(map_addr_t init_table, vaddr_t vaddr, uint32_t *ret_level,
+                             arch_flags_t *mmu_flags, map_addr_t *last_valid_entry);
+status_t x86_mmu_unmap(map_addr_t init_table, vaddr_t vaddr, uint count);
+addr_t *x86_create_new_cr3(void);
+map_addr_t get_kernel_cr3(void);
+
+__END_CDECLS
diff --git a/src/bsp/lk/arch/x86/kernel.ld b/src/bsp/lk/arch/x86/kernel.ld
new file mode 100644
index 0000000..d96f397
--- /dev/null
+++ b/src/bsp/lk/arch/x86/kernel.ld
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2013 Travis Geiselbrecht
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ENTRY(_start)
+SECTIONS
+{
+    . = 0x0200000;
+
+	.text : {
+		__code_start = .;
+		KEEP(*(.text.boot))
+		*(.text* .sram.text)
+		*(.gnu.linkonce.t.*)
+		__code_end = .;
+	} =0x9090
+
+	.rodata : ALIGN(4096) {
+		__rodata_start = .;
+		*(.rodata*)
+		*(.gnu.linkonce.r.*)
+		. = ALIGN(4);
+	}
+
+    /*
+     * extra linker scripts tend to insert sections just after .rodata,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_rodata : {
+        __rodata_end = .;
+    }
+
+	.data : ALIGN(4096) {
+		__data_start = .;
+		*(.data .data.* .gnu.linkonce.d.*)
+	}
+
+	.ctors : ALIGN(4) {
+		__ctor_list = .;
+		KEEP(*(.ctors .init_array))
+		__ctor_end = .;
+	}
+	.dtors : ALIGN(4) {
+		__dtor_list = .;
+		KEEP(*(.dtors .fini_array))
+		__dtor_end = .;
+	}
+
+	.stab   : { *(.stab) }
+	.stabst : { *(.stabstr) }
+
+    /*
+     * extra linker scripts tend to insert sections just after .data,
+     * so we want to make sure this symbol comes after anything inserted above,
+     * but not aligned to the next section necessarily.
+     */
+    .dummy_post_data : {
+        __data_end = .;
+    }
+
+	.bss : {
+		__bss_start = .;
+		*(.bss*)
+		*(.gnu.linkonce.b.*)
+		*(COMMON)
+		. = ALIGN(4);
+		__bss_end = .;
+	}
+
+
+	_end = .;
+
+	/DISCARD/ : { *(.comment .note .eh_frame) }
+}
diff --git a/src/bsp/lk/arch/x86/mmu.c b/src/bsp/lk/arch/x86/mmu.c
new file mode 100644
index 0000000..4884d83
--- /dev/null
+++ b/src/bsp/lk/arch/x86/mmu.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <debug.h>
+#include <sys/types.h>
+#include <compiler.h>
+#include <arch.h>
+#include <arch/x86.h>
+#include <arch/x86/mmu.h>
+#include <stdlib.h>
+#include <string.h>
+#include <arch/mmu.h>
+#include <assert.h>
+#include <err.h>
+#include <arch/arch_ops.h>
+
+extern map_addr_t g_CR3;
+
+#ifdef PAE_MODE_ENABLED
+/* PDP table address is 32 bit wide when on PAE mode, but the PDP entries are 64 bit wide */
+static inline map_addr_t get_pdp_entry_from_pdp_table(vaddr_t vaddr, map_addr_t pdpt)
+{
+    uint32_t pdp_index;
+    map_addr_t *pdp_table;
+
+    pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
+    pdp_table = (map_addr_t *)(pdpt & X86_PDPT_ADDR_MASK);
+    return X86_PHYS_TO_VIRT(pdp_table[pdp_index]);
+}
+
+static inline map_addr_t get_pfn_from_pt(map_addr_t pt)
+{
+    map_addr_t pfn;
+
+    pfn = (pt & X86_2MB_PAGE_FRAME);
+    return X86_PHYS_TO_VIRT(pfn);
+}
+
+#else
+static inline map_addr_t get_pfn_from_pde(map_addr_t pde)
+{
+    map_addr_t pfn;
+
+    pfn = (pde & X86_4MB_PAGE_FRAME);
+    return X86_PHYS_TO_VIRT(pfn);
+}
+#endif
+
+static inline map_addr_t get_pd_entry_from_pd_table(vaddr_t vaddr, map_addr_t pdt)
+{
+    uint32_t pd_index;
+    map_addr_t *pd_table;
+
+    pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+    pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pd_table[pd_index]);
+}
+
+static inline map_addr_t get_pt_entry_from_page_table(vaddr_t vaddr, map_addr_t pt)
+{
+    uint32_t pt_index;
+    map_addr_t *pt_table;
+
+    pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+    pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pt_table[pt_index]);
+}
+
+static inline map_addr_t get_pfn_from_pte(map_addr_t pte)
+{
+    map_addr_t pfn;
+
+    pfn = (pte & X86_PG_FRAME);
+    return X86_PHYS_TO_VIRT(pfn);
+}
+
+/**
+ * @brief Returning the x86 arch flags from generic mmu flags
+ */
+arch_flags_t get_x86_arch_flags(arch_flags_t flags)
+{
+    arch_flags_t arch_flags = 0;
+
+    if (!(flags & ARCH_MMU_FLAG_PERM_RO))
+        arch_flags |= X86_MMU_PG_RW;
+
+    if (flags & ARCH_MMU_FLAG_PERM_USER)
+        arch_flags |= X86_MMU_PG_U;
+
+    if (flags & ARCH_MMU_FLAG_UNCACHED)
+        arch_flags |= X86_MMU_CACHE_DISABLE;
+
+#ifdef PAE_MODE_ENABLED
+    if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)
+        arch_flags |= X86_MMU_PG_NX;
+#endif
+    return arch_flags;
+}
+
+/**
+ * @brief Returning the generic mmu flags from x86 arch flags
+ */
+uint get_arch_mmu_flags(arch_flags_t flags)
+{
+    arch_flags_t mmu_flags = 0;
+
+    if (!(flags & X86_MMU_PG_RW))
+        mmu_flags |= ARCH_MMU_FLAG_PERM_RO;
+
+    if (flags & X86_MMU_PG_U)
+        mmu_flags |= ARCH_MMU_FLAG_PERM_USER;
+
+    if (flags & X86_MMU_CACHE_DISABLE)
+        mmu_flags |= ARCH_MMU_FLAG_UNCACHED;
+
+#ifdef PAE_MODE_ENABLED
+    if (flags & X86_MMU_PG_NX)
+        mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
+#endif
+    return (uint)mmu_flags;
+}
+
+/**
+ * @brief  Walk the page table structures - supported for both PAE & non-PAE modes
+ *
+ */
+status_t x86_mmu_get_mapping(map_addr_t init_table, vaddr_t vaddr, uint32_t *ret_level,
+                             arch_flags_t *mmu_flags, map_addr_t *last_valid_entry)
+{
+    map_addr_t pt, pte, pdt;
+#ifdef PAE_MODE_ENABLED
+    map_addr_t pdpt;
+#endif
+
+    DEBUG_ASSERT(init_table);
+    if ((!ret_level) || (!last_valid_entry) || (!mmu_flags)) {
+        return ERR_INVALID_ARGS;
+    }
+
+    *mmu_flags = 0;
+
+#ifdef PAE_MODE_ENABLED
+    pdpt = init_table; /* First level table in PAE mode is pdpt */
+    *ret_level = PDP_L;
+    *last_valid_entry = pdpt;
+
+    pdt = get_pdp_entry_from_pdp_table(vaddr, pdpt);
+    if ((pdt & X86_MMU_PG_P) == 0) {
+        *ret_level = PDP_L;
+        *last_valid_entry = pdpt;
+        return ERR_NOT_FOUND;
+    }
+
+    pt = get_pd_entry_from_pd_table(vaddr, pdt);
+    if ((pt & X86_MMU_PG_P) == 0) {
+        *ret_level = PD_L;
+        *last_valid_entry = pdt;
+        return ERR_NOT_FOUND;
+    }
+#else
+    pdt = init_table; /* First table in non PAE mode is pdt */
+    *ret_level = PD_L;
+    *last_valid_entry = pdt;
+
+    pt = get_pd_entry_from_pd_table(vaddr, pdt);
+    if ((pt & X86_MMU_PG_P) == 0)
+        return ERR_NOT_FOUND;
+#endif
+
+    /* 4 MB pages (non PAE mode) and 2 MB pages (PAE mode) */
+    /* In this case, the page directory entry is NOT actually a PT (page table) */
+    if (pt & X86_MMU_PG_PS) {
+#ifdef PAE_MODE_ENABLED
+        /* Getting the Page frame & adding the 4KB page offset from the vaddr */
+        *last_valid_entry = get_pfn_from_pt(pt) + (vaddr & PAGE_OFFSET_MASK_2MB);
+#else
+        /* Getting the Page frame & adding the 4MB page offset from the vaddr */
+        *last_valid_entry = get_pfn_from_pde(pt) + (vaddr & PAGE_OFFSET_MASK_4MB);
+#endif
+        *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pt)) & X86_FLAGS_MASK);
+        goto last;
+    }
+
+    /* 4 KB pages */
+    pte = get_pt_entry_from_page_table(vaddr, pt);
+    if ((pte & X86_MMU_PG_P) == 0) {
+        *ret_level = PT_L;
+        *last_valid_entry = pt;
+        return ERR_NOT_FOUND;
+    }
+
+    /* Getting the Page frame & adding the 4KB page offset from the vaddr */
+    *last_valid_entry = get_pfn_from_pte(pte) + (vaddr & PAGE_OFFSET_MASK_4KB);
+    *mmu_flags = get_arch_mmu_flags((X86_PHYS_TO_VIRT(pte)) & X86_FLAGS_MASK);
+last:
+    *ret_level = PF_L;
+    return NO_ERROR;
+}
+
+/**
+ * Walk the page table structures to see if the mapping between a virtual address
+ * and a physical address exists. Also, check the flags.
+ *
+ */
+status_t x86_mmu_check_mapping(map_addr_t init_table, map_addr_t paddr,
+                               vaddr_t vaddr, arch_flags_t in_flags,
+                               uint32_t *ret_level, arch_flags_t *ret_flags,
+                               map_addr_t *last_valid_entry)
+{
+    status_t status;
+    arch_flags_t existing_flags = 0;
+
+    DEBUG_ASSERT(init_table);
+    if ((!ret_level) || (!last_valid_entry) || (!ret_flags) ||
+            (!IS_ALIGNED(vaddr, PAGE_SIZE)) ||
+            (!IS_ALIGNED(paddr, PAGE_SIZE))) {
+        return ERR_INVALID_ARGS;
+    }
+
+    status = x86_mmu_get_mapping(init_table, vaddr, ret_level, &existing_flags, last_valid_entry);
+    if (status || ((*last_valid_entry) != paddr)) {
+        /* We did not reach till we check the access flags for the mapping */
+        *ret_flags = in_flags;
+        return ERR_NOT_FOUND;
+    }
+
+    /* Checking the access flags for the mapped address. If it is not zero, then
+     * the access flags are different & the return flag will have those access bits
+     * which are different.
+     */
+    *ret_flags = (in_flags ^ get_x86_arch_flags(existing_flags)) & X86_DIRTY_ACCESS_MASK;
+
+    if (!(*ret_flags))
+        return NO_ERROR;
+
+    return ERR_NOT_FOUND;
+}
+
+#ifdef PAE_MODE_ENABLED
+static void update_pdp_entry(vaddr_t vaddr, map_addr_t pdpt, map_addr_t *m, arch_flags_t flags)
+{
+    uint32_t pdp_index;
+
+    map_addr_t *pdp_table = (map_addr_t *)(pdpt & X86_PG_FRAME);
+    pdp_index = ((vaddr >> PDP_SHIFT) & ((1ul << PDPT_ADDR_OFFSET) - 1));
+    pdp_table[pdp_index] = (map_addr_t)m;
+    pdp_table[pdp_index] |= X86_MMU_PG_P;
+}
+#endif
+
+static void update_pt_entry(vaddr_t vaddr, map_addr_t paddr, map_addr_t pt, arch_flags_t flags)
+{
+    uint32_t pt_index;
+
+    map_addr_t *pt_table = (map_addr_t *)(pt & X86_PG_FRAME);
+    pt_index = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+    pt_table[pt_index] = paddr;
+    pt_table[pt_index] |= flags | X86_MMU_PG_P; /* last level - actual page being mapped */
+    if (!(flags & X86_MMU_PG_U))
+        pt_table[pt_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+static void update_pd_entry(vaddr_t vaddr, map_addr_t pdt, map_addr_t *m, arch_flags_t flags)
+{
+    uint32_t pd_index;
+
+    map_addr_t *pd_table = (map_addr_t *)(pdt & X86_PG_FRAME);
+    pd_index = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+    pd_table[pd_index] = (map_addr_t)m;
+    pd_table[pd_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
+    if (flags & X86_MMU_PG_U)
+        pd_table[pd_index] |= X86_MMU_PG_U;
+    else
+        pd_table[pd_index] |= X86_MMU_PG_G; /* setting global flag for kernel pages */
+}
+
+/**
+ * @brief Allocating a new page table
+ */
+static map_addr_t *_map_alloc_page(void)
+{
+    map_addr_t *page_ptr = memalign(PAGE_SIZE, PAGE_SIZE);
+
+    if (page_ptr)
+        memset(page_ptr, 0, PAGE_SIZE);
+
+    return page_ptr;
+}
+
+addr_t *x86_create_new_cr3(void)
+{
+    map_addr_t *kernel_table, *new_table = NULL;
+
+    if (!g_CR3)
+        return 0;
+
+    kernel_table = (map_addr_t *)X86_PHYS_TO_VIRT(g_CR3);
+
+    /* Allocate a new Page to generate a new paging structure for a new CR3 */
+    new_table = _map_alloc_page();
+    ASSERT(new_table);
+
+    /* Copying the kernel mapping as-is */
+    memcpy(new_table, kernel_table, PAGE_SIZE);
+
+    return (addr_t *)new_table;
+}
+
+/**
+ * @brief Returning the kernel CR3
+ */
+map_addr_t get_kernel_cr3(void)
+{
+    return g_CR3;
+}
+
+/**
+ * @brief  Add a new mapping for the given virtual address & physical address
+ *
+ * This is a API which handles the mapping b/w a virtual address & physical address
+ * either by checking if the mapping already exists and is valid OR by adding a
+ * new mapping with the required flags.
+ *
+ */
+status_t x86_mmu_add_mapping(map_addr_t init_table, map_addr_t paddr,
+                             vaddr_t vaddr, arch_flags_t mmu_flags)
+{
+#ifdef PAE_MODE_ENABLED
+    map_addr_t pdt;
+    uint32_t pd_new = 0;
+#endif
+    map_addr_t pt, *m = NULL;
+    status_t ret = NO_ERROR;
+
+    DEBUG_ASSERT(init_table);
+    if ((!IS_ALIGNED(vaddr, PAGE_SIZE)) || (!IS_ALIGNED(paddr, PAGE_SIZE)) )
+        return ERR_INVALID_ARGS;
+
+#ifdef PAE_MODE_ENABLED
+    pdt = get_pdp_entry_from_pdp_table(vaddr, init_table);
+    if ((pdt & X86_MMU_PG_P) == 0) {
+        /* Creating a new pd table  */
+        m  = _map_alloc_page();
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            goto clean;
+        }
+        update_pdp_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags));
+        pdt = (map_addr_t)m;
+        pd_new = 1;
+    }
+
+    if (!pd_new)
+        pt = get_pd_entry_from_pd_table(vaddr, pdt);
+
+    if (pd_new || (pt & X86_MMU_PG_P) == 0) {
+        /* Creating a new pt */
+        m  = _map_alloc_page();
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            if (pd_new)
+                goto clean_pd;
+            goto clean;
+        }
+
+        update_pd_entry(vaddr, pdt, m, get_x86_arch_flags(mmu_flags));
+        pt = (map_addr_t)m;
+    }
+#else
+    pt = get_pd_entry_from_pd_table(vaddr, init_table);
+    if ((pt & X86_MMU_PG_P) == 0) {
+        /* Creating a new pt */
+        m  = _map_alloc_page();
+        if (m == NULL) {
+            ret = ERR_NO_MEMORY;
+            goto clean;
+        }
+
+        update_pd_entry(vaddr, init_table, m, get_x86_arch_flags(mmu_flags));
+        pt = (map_addr_t)m;
+    }
+#endif
+
+    /* Updating the page table entry with the paddr and access flags required for the mapping */
+    update_pt_entry(vaddr, paddr, pt, get_x86_arch_flags(mmu_flags));
+    ret = NO_ERROR;
+#ifdef PAE_MODE_ENABLED
+    goto clean;
+
+clean_pd:
+    if (pd_new)
+        free((map_addr_t *)pdt);
+#endif
+clean:
+    return ret;
+}
+
+/**
+ * @brief  x86 MMU unmap an entry in the page tables recursively and clear out tables
+ *
+ */
+static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, map_addr_t table_entry)
+{
+    uint32_t offset = 0, next_level_offset = 0;
+    map_addr_t *table, *next_table_addr, value;
+
+    next_table_addr = NULL;
+    table = (map_addr_t *)(X86_VIRT_TO_PHYS(table_entry) & X86_PG_FRAME);
+
+    switch (level) {
+#ifdef PAE_MODE_ENABLED
+        case PDP_L:
+            offset = ((vaddr >> PDP_SHIFT) & ((1 << PDPT_ADDR_OFFSET) - 1));
+            next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+#endif
+        case PD_L:
+            offset = ((vaddr >> PD_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+            next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+        case PT_L:
+            offset = ((vaddr >> PT_SHIFT) & ((1 << ADDR_OFFSET) - 1));
+            next_table_addr = (map_addr_t *)X86_PHYS_TO_VIRT(table[offset]);
+            if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) == 0)
+                return;
+            break;
+        case PF_L:
+            /* Reached page frame, Let's go back */
+        default:
+            return;
+    }
+
+    level -= 1;
+    x86_mmu_unmap_entry(vaddr, level, (map_addr_t)next_table_addr);
+    level += 1;
+
+    next_table_addr = (map_addr_t *)((map_addr_t)(X86_VIRT_TO_PHYS(next_table_addr)) & X86_PG_FRAME);
+    if (level > PT_L) {
+        /* Check all entries of next level table for present bit */
+        for (next_level_offset = 0; next_level_offset < NO_OF_PT_ENTRIES; next_level_offset++) {
+            if ((next_table_addr[next_level_offset] & X86_MMU_PG_P) != 0)
+                return; /* There is an entry in the next level table */
+        }
+        free(next_table_addr);
+    }
+    /* All present bits for all entries in next level table for this address are 0 */
+    if ((X86_PHYS_TO_VIRT(table[offset]) & X86_MMU_PG_P) != 0) {
+        arch_disable_ints();
+        value = table[offset];
+        value = value & X86_PTE_NOT_PRESENT;
+        table[offset] = value;
+        arch_enable_ints();
+    }
+}
+
+status_t x86_mmu_unmap(map_addr_t init_table, vaddr_t vaddr, uint count)
+{
+    vaddr_t next_aligned_v_addr;
+
+    DEBUG_ASSERT(init_table);
+    if (!IS_ALIGNED(vaddr, PAGE_SIZE))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    next_aligned_v_addr = vaddr;
+    while (count > 0) {
+#ifdef PAE_MODE_ENABLED
+        x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAE_PAGING_LEVELS, init_table);
+#else
+        x86_mmu_unmap_entry(next_aligned_v_addr, X86_PAGING_LEVELS, init_table);
+#endif
+        next_aligned_v_addr += PAGE_SIZE;
+        count--;
+    }
+    return NO_ERROR;
+}
+
+int arch_mmu_unmap(vaddr_t vaddr, uint count)
+{
+    map_addr_t init_table_from_cr3;
+
+    if (!IS_ALIGNED(vaddr, PAGE_SIZE))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    init_table_from_cr3 = x86_get_cr3();
+
+    return (x86_mmu_unmap(init_table_from_cr3, vaddr, count));
+}
+
+/**
+ * @brief  Mapping a section/range with specific permissions
+ *
+ */
+status_t x86_mmu_map_range(map_addr_t init_table, struct map_range *range, arch_flags_t flags)
+{
+    vaddr_t next_aligned_v_addr;
+    map_addr_t next_aligned_p_addr;
+    status_t map_status;
+    uint32_t no_of_pages, index;
+
+    DEBUG_ASSERT(init_table);
+    if (!range)
+        return ERR_INVALID_ARGS;
+
+    /* Calculating the number of 4k pages */
+    if (IS_ALIGNED(range->size, PAGE_SIZE))
+        no_of_pages = (range->size) >> PAGE_DIV_SHIFT;
+    else
+        no_of_pages = ((range->size) >> PAGE_DIV_SHIFT) + 1;
+
+    next_aligned_v_addr = range->start_vaddr;
+    next_aligned_p_addr = range->start_paddr;
+
+    for (index = 0; index < no_of_pages; index++) {
+        map_status = x86_mmu_add_mapping(init_table, next_aligned_p_addr, next_aligned_v_addr, flags);
+        if (map_status) {
+            dprintf(SPEW, "Add mapping failed with err=%d\n", map_status);
+            /* Unmap the partial mapping - if any */
+            x86_mmu_unmap(init_table, range->start_vaddr, index);
+            return map_status;
+        }
+        next_aligned_v_addr += PAGE_SIZE;
+        next_aligned_p_addr += PAGE_SIZE;
+    }
+
+    return NO_ERROR;
+}
+
+status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
+{
+    uint32_t ret_level, current_cr3_val;
+    map_addr_t last_valid_entry;
+    arch_flags_t ret_flags;
+    status_t stat;
+
+    if (!paddr)
+        return ERR_INVALID_ARGS;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    current_cr3_val = (map_addr_t)x86_get_cr3();
+
+    stat = x86_mmu_get_mapping(current_cr3_val, vaddr, &ret_level, &ret_flags, &last_valid_entry);
+    if (stat)
+        return stat;
+
+    *paddr = (paddr_t)last_valid_entry;
+
+    /* converting x86 arch specific flags to arch mmu flags */
+    if (flags)
+        *flags = ret_flags;
+
+    return NO_ERROR;
+}
+
+int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
+{
+    uint32_t current_cr3_val;
+    struct map_range range;
+
+    if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE)))
+        return ERR_INVALID_ARGS;
+
+    if (count == 0)
+        return NO_ERROR;
+
+    DEBUG_ASSERT(x86_get_cr3());
+    current_cr3_val = (map_addr_t)x86_get_cr3();
+
+    range.start_vaddr = vaddr;
+    range.start_paddr = (map_addr_t)paddr;
+    range.size = count * PAGE_SIZE;
+
+    return (x86_mmu_map_range(current_cr3_val, &range, flags));
+}
+
+/**
+ * @brief  x86 MMU basic initialization
+ *
+ */
+void arch_mmu_init(void)
+{
+    volatile uint32_t cr0;
+
+    /* Set WP bit in CR0*/
+    cr0 = x86_get_cr0();
+    cr0 |= X86_CR0_WP;
+    x86_set_cr0(cr0);
+
+#ifdef PAE_MODE_ENABLED
+    volatile uint32_t efer_msr, cr4;
+
+    /* Setting the SMEP & SMAP bit in CR4 */
+    cr4 = x86_get_cr4();
+    if (check_smep_avail())
+        cr4 |= X86_CR4_SMEP;
+    if (check_smap_avail())
+        cr4 |=X86_CR4_SMAP;
+    x86_set_cr4(cr4);
+
+    /* Set NXE bit in MSR_EFER*/
+    efer_msr = read_msr(x86_MSR_EFER);
+    efer_msr |= x86_EFER_NXE;
+    write_msr(x86_MSR_EFER, efer_msr);
+#endif
+}
diff --git a/src/bsp/lk/arch/x86/ops.S b/src/bsp/lk/arch/x86/ops.S
new file mode 100644
index 0000000..32a098c
--- /dev/null
+++ b/src/bsp/lk/arch/x86/ops.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+
+.text
+
+/* int _atomic_and(int *ptr, int val); */
+FUNCTION(_atomic_and)
+    movl 4(%esp), %edx
+    movl (%edx), %eax
+0:
+    movl %eax, %ecx
+    andl 8(%esp), %ecx
+    lock
+    cmpxchgl %ecx, (%edx)
+    jnz 1f                  /* static prediction: branch forward not taken */
+    ret
+1:
+    jmp 0b
+
+
+/* int _atomic_or(int *ptr, int val); */
+FUNCTION(_atomic_or)
+movl 4(%esp), %edx
+    movl (%edx), %eax
+0:
+    movl %eax, %ecx
+    orl 8(%esp), %ecx
+    lock
+    cmpxchgl %ecx, (%edx)
+    jnz 1f                  /* static prediction: branch forward not taken */
+    ret
+1:
+    jmp 0b
+
+/* void arch_idle(); */
+FUNCTION(arch_idle)
+    pushf
+    popl %eax
+    andl $0x200, %eax
+    test %eax, %eax
+    je 1f                   /* don't halt if local interrupts are disabled */
+    hlt
+1:
+    ret
+
diff --git a/src/bsp/lk/arch/x86/rules.mk b/src/bsp/lk/arch/x86/rules.mk
new file mode 100644
index 0000000..b158d3b
--- /dev/null
+++ b/src/bsp/lk/arch/x86/rules.mk
@@ -0,0 +1,58 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+WITH_KERNEL_VM=1
+
+GLOBAL_DEFINES += \
+	MEMBASE=0x00200000U \
+	KERNEL_ASPACE_BASE=0x00200000U \
+	KERNEL_ASPACE_SIZE=0x7fe00000U \
+	X86_WITH_FPU=1 \
+	SMP_MAX_CPUS=1
+
+
+KERNEL_BASE ?= 0x00200000
+KERNEL_LOAD_OFFSET ?= 0x0
+
+MODULE_SRCS += \
+	$(LOCAL_DIR)/crt0.S \
+	$(LOCAL_DIR)/arch.c \
+	$(LOCAL_DIR)/asm.S \
+	$(LOCAL_DIR)/cache.c \
+	$(LOCAL_DIR)/cache-ops.S \
+	$(LOCAL_DIR)/ops.S \
+	$(LOCAL_DIR)/thread.c \
+	$(LOCAL_DIR)/mmu.c \
+	$(LOCAL_DIR)/faults.c \
+	$(LOCAL_DIR)/descriptor.c \
+	$(LOCAL_DIR)/fpu.c
+
+# set the default toolchain to x86 elf and set a #define
+ifndef TOOLCHAIN_PREFIX
+TOOLCHAIN_PREFIX := i386-elf-
+endif
+
+
+cc-option = $(shell if test -z "`$(1) $(2) -S -o /dev/null -xc /dev/null 2>&1`"; \
+	then echo "$(2)"; else echo "$(3)"; fi ;)
+
+# disable SSP if the compiler supports it; it will break stuff
+GLOBAL_CFLAGS += $(call cc-option,$(CC),-fno-stack-protector,)
+
+GLOBAL_COMPILEFLAGS += -fasynchronous-unwind-tables
+GLOBAL_COMPILEFLAGS += -gdwarf-2
+
+ARCH_OPTFLAGS := -O2
+
+# potentially generated files that should be cleaned out with clean make rule
+GENERATED += \
+	$(BUILDDIR)/kernel.ld
+
+# rules for generating the linker scripts
+$(BUILDDIR)/kernel.ld: $(LOCAL_DIR)/kernel.ld $(wildcard arch/*.ld)
+	@echo generating $@
+	@$(MKDIR)
+	$(NOECHO)cp $< $@
+
+include make/module.mk
diff --git a/src/bsp/lk/arch/x86/thread.c b/src/bsp/lk/arch/x86/thread.c
new file mode 100755
index 0000000..94648e0
--- /dev/null
+++ b/src/bsp/lk/arch/x86/thread.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Travis Geiselbrecht
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <kernel/thread.h>
+#include <kernel/spinlock.h>
+#include <arch/x86.h>
+#include <arch/x86/descriptor.h>
+#include <arch/fpu.h>
+
+/*struct context_switch_frame {
+    uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;
+    uint32_t ds, es, fs, gs;
+    uint32_t eip, cs, eflags;
+};*/
+struct context_switch_frame {
+    uint32_t edi, esi, ebp, esp, ebx, edx, ecx, eax;
+    uint32_t eflags;
+    uint32_t eip;
+};
+
+/* we're uniprocessor at this point for x86, so store a global pointer to the current thread */
+struct thread *_current_thread;
+
+extern void x86_context_switch(addr_t *old_sp, addr_t new_sp);
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    ret = _current_thread->entry(_current_thread->arg);
+
+//  dprintf("initial_thread_func: thread %p exiting with %d\n", _current_thread, ret);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    // create a default stack frame on the stack
+    vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
+
+    // make sure the top of the stack is 8 byte aligned for EABI compliance
+    stack_top = ROUNDDOWN(stack_top, 8);
+
+    struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
+    frame--;
+
+    // fill it in
+    memset(frame, 0, sizeof(*frame));
+
+    frame->eip = (vaddr_t) &initial_thread_func;
+    frame->eflags = 0x3002; // IF = 0, NT = 0, IOPL = 3
+    //frame->cs = CODE_SELECTOR;
+    //frame->fs = DATA_SELECTOR;
+    //frame->gs = DATA_SELECTOR;
+    //frame->es = DATA_SELECTOR;
+    //frame->ds = DATA_SELECTOR;
+
+    // set the stack pointer
+    t->arch.esp = (vaddr_t)frame;
+#if X86_WITH_FPU
+    memset(t->arch.fpu_buffer, 0, sizeof(t->arch.fpu_buffer));
+    t->arch.fpu_states = (vaddr_t *)ROUNDUP(((vaddr_t)t->arch.fpu_buffer), 16);
+#endif
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx\n", t->arch.esp);
+    }
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+    //dprintf(DEBUG, "arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
+
+#if X86_WITH_FPU
+    fpu_context_switch(oldthread, newthread);
+#endif
+
+    __asm__ __volatile__ (
+        "pushl $1f			\n\t"
+        "pushf				\n\t"
+        "pusha				\n\t"
+        "movl %%esp,(%%edx)	\n\t"
+        "movl %%eax,%%esp	\n\t"
+        "popa				\n\t"
+        "popf				\n\t"
+        "ret				\n\t"
+        "1:					\n\t"
+
+        :
+        : "d" (&oldthread->arch.esp), "a" (newthread->arch.esp)
+    );
+
+    /*__asm__ __volatile__ (
+        "pushf              \n\t"
+        "pushl %%cs         \n\t"
+        "pushl $1f          \n\t"
+        "pushl %%gs         \n\t"
+        "pushl %%fs         \n\t"
+        "pushl %%es         \n\t"
+        "pushl %%ds         \n\t"
+        "pusha              \n\t"
+        "movl %%esp,(%%edx) \n\t"
+        "movl %%eax,%%esp   \n\t"
+        "popa               \n\t"
+        "popl %%ds          \n\t"
+        "popl %%es          \n\t"
+        "popl %%fs          \n\t"
+        "popl %%gs          \n\t"
+        "iret               \n\t"
+        "1: "
+        :
+        : "d" (&oldthread->arch.esp), "a" (newthread->arch.esp)
+    );*/
+}
+
diff --git a/src/bsp/lk/arch/x86/toolchain.mk b/src/bsp/lk/arch/x86/toolchain.mk
new file mode 100755
index 0000000..ba00c8e
--- /dev/null
+++ b/src/bsp/lk/arch/x86/toolchain.mk
@@ -0,0 +1,13 @@
+ifndef ARCH_x86_TOOLCHAIN_INCLUDED
+ARCH_x86_TOOLCHAIN_INCLUDED := 1
+
+ifndef ARCH_x86_TOOLCHAIN_PREFIX
+ARCH_x86_TOOLCHAIN_PREFIX := i386-elf-
+FOUNDTOOL=$(shell which $(ARCH_x86_TOOLCHAIN_PREFIX)gcc)
+endif
+
+ifeq ($(FOUNDTOOL),)
+$(error cannot find toolchain, please set ARCH_x86_TOOLCHAIN_PREFIX or add it to your path)
+endif
+
+endif