zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Kconfig b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Kconfig
new file mode 100644
index 0000000..2d876bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Kconfig
@@ -0,0 +1,72 @@
+config CRYPTO_DEV_FSL_CAAM
+	tristate "Freescale CAAM-Multicore driver backend"
+	depends on FSL_SOC
+	help
+	  Enables the driver module for Freescale's Cryptographic Accelerator
+	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+	  This module adds a job ring operation interface, and configures h/w
+	  to operate as a DPAA component automatically, depending
+	  on h/w feature availability.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called caam.
+
+config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+	int "Job Ring size"
+	depends on CRYPTO_DEV_FSL_CAAM
+	range 2 9
+	default "9"
+	help
+	  Select size of Job Rings as a power of 2, within the
+	  range 2-9 (ring size 4-512).
+	  Examples:
+		2 => 4
+		3 => 8
+		4 => 16
+		5 => 32
+		6 => 64
+		7 => 128
+		8 => 256
+		9 => 512
+
+config CRYPTO_DEV_FSL_CAAM_INTC
+	bool "Job Ring interrupt coalescing"
+	depends on CRYPTO_DEV_FSL_CAAM
+	default y
+	help
+	  Enable the Job Ring's interrupt coalescing feature.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+	int "Job Ring interrupt coalescing count threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 255
+	default 255
+	help
+	  Select number of descriptor completions to queue before
+	  raising an interrupt, in the range 1-255. Note that a selection
+	  of 1 functionally defeats the coalescing feature, and a selection
+	  equal or greater than the job ring size will force timeouts.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+	int "Job Ring interrupt coalescing timer threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 65535
+	default 2048
+	help
+	  Select number of bus clocks/64 to timeout in the case that one or
+	  more descriptor completions are queued without reaching the count
+	  threshold. Range is 1-65535.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+	tristate "Register algorithm implementations with the Crypto API"
+	depends on CRYPTO_DEV_FSL_CAAM
+	default y
+	select CRYPTO_ALGAPI
+	select CRYPTO_AUTHENC
+	help
+	  Selecting this will offload crypto for users of the
+	  scatterlist crypto API (such as the linux native IPSec
+	  stack) to the SEC4 via job ring.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamalg.
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Makefile b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Makefile
new file mode 100644
index 0000000..ef39011
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the CAAM backend and dependent components
+#
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+
+caam-objs := ctrl.o jr.o error.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/caamalg.c b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/caamalg.c
new file mode 100644
index 0000000..534a364
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,2419 @@
+/*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Based on talitos crypto API driver.
+ *
+ * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |   (PDB)     |
+ * ---------------      |------------->|  (hashKey)  |
+ *       .              |              | (cipherKey) |
+ *       .              |    |-------->| (operation) |
+ * ---------------      |    |         ---------------
+ * | JobDesc #2  |------|    |
+ * | *(packet 2) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #3  |------------
+ * | *(packet 3) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | LOAD (to DECO)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY		3000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 SHA512_DIGEST_SIZE * 2)
+/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define CAAM_MAX_IV_LENGTH		16
+
+/* length of descriptors text */
+#define DESC_JOB_IO_LEN			(CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
+
+#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
+					 CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define xstr(s) str(s)
+#define str(s) #s
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+	u32 *jump_cmd, *uncond_jump_cmd;
+
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT);
+	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+	set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * Wait for completion of class 1 key loading before allowing
+ * error propagation
+ */
+static inline void append_dec_shr_done(u32 *desc)
+{
+	u32 *jump_cmd;
+
+	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+{
+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+		   LDST_CLASS_1_CCB | ivsize);
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
+			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG		1
+#define GIV_DST_CONTIG		(1 << 1)
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+	struct device *jrdev;
+	u32 sh_desc_enc[DESC_MAX_USED_LEN];
+	u32 sh_desc_dec[DESC_MAX_USED_LEN];
+	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+	dma_addr_t sh_desc_enc_dma;
+	dma_addr_t sh_desc_dec_dma;
+	dma_addr_t sh_desc_givenc_dma;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+	u32 alg_op;
+	u8 key[CAAM_MAX_KEY_SIZE];
+	dma_addr_t key_dma;
+	unsigned int enckeylen;
+	unsigned int split_key_len;
+	unsigned int split_key_pad_len;
+	unsigned int authsize;
+};
+
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+			    int keys_fit_inline)
+{
+	if (keys_fit_inline) {
+		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+				  ctx->split_key_len, CLASS_2 |
+				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+		append_key_as_imm(desc, (void *)ctx->key +
+				  ctx->split_key_pad_len, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	} else {
+		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
+		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	}
+}
+
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+				  int keys_fit_inline)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_WAIT);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	append_key_aead(desc, ctx, keys_fit_inline);
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Propagate errors from shared to job descriptor */
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+	struct aead_tfm *tfm = &aead->base.crt_aead;
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool keys_fit_inline = 0;
+	u32 *key_jump_cmd, *jump_cmd;
+	u32 geniv, moveiv;
+	u32 *desc;
+
+	if (!ctx->enckeylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = 1;
+
+	/* aead_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+	/* Class 2 operation */
+	append_operation(desc, ctx->class2_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+	/* assoclen + cryptlen = seqinlen - ivsize */
+	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+	/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+	aead_append_ld_iv(desc, tfm->ivsize);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = 1;
+
+	desc = ctx->sh_desc_dec;
+
+	/* aead_decrypt shared descriptor */
+	init_sh_desc(desc, HDR_SHARE_WAIT);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	append_key_aead(desc, ctx, keys_fit_inline);
+
+	/* Only propagate error immediately if shared */
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, key_jump_cmd);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+	set_jump_tgt_here(desc, jump_cmd);
+
+	/* Class 2 operation */
+	append_operation(desc, ctx->class2_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqinlen - ivsize */
+	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+				ctx->authsize + tfm->ivsize)
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	aead_append_ld_iv(desc, tfm->ivsize);
+
+	append_dec_op1(desc, ctx->class1_alg_type);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+	append_dec_shr_done(desc);
+
+	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = 1;
+
+	/* aead_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy IV to class 1 context */
+	append_move(desc, MOVE_SRC_CLASS1CTX |
+		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Return to encryption */
+	append_operation(desc, ctx->class2_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* ivsize + cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+	/* assoclen = seqinlen - (ivsize + cryptlen) */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	/* Copy iv from class 1 ctx to class 2 fifo*/
+	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Not need to reload iv */
+	append_seq_fifo_load(desc, tfm->ivsize,
+			     FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+						 desc_bytes(desc),
+						 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
+				    unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	aead_set_sh_desc(authenc);
+
+	return 0;
+}
+
+struct split_key_result {
+	struct completion completion;
+	int err;
+};
+
+static void split_key_done(struct device *dev, u32 *desc, u32 err,
+			   void *context)
+{
+	struct split_key_result *res = context;
+
+#ifdef DEBUG
+	dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	if (err) {
+		char tmp[CAAM_ERROR_STR_MAX];
+
+		dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+	}
+
+	res->err = err;
+
+	complete(&res->completion);
+}
+
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008    jobdesc: stidx=1 share=never len=8
+[01] 0x04000014        key: class2->keyreg len=20
+			@0xffe01000
+[03] 0x84410014  operation: cls2-op sha1 hmac init dec
+[04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001       jump: class2 local all ->1 [06]
+[06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
+			@0xffe04000
+*/
+static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
+{
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	struct split_key_result result;
+	dma_addr_t dma_addr_in, dma_addr_out;
+	int ret = 0;
+
+	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+
+	init_job_desc(desc, 0);
+
+	dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
+				     DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_in)) {
+		dev_err(jrdev, "unable to map key input memory\n");
+		kfree(desc);
+		return -ENOMEM;
+	}
+	append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
+		       KEY_DEST_CLASS_REG);
+
+	/* Sets MDHA up into an HMAC-INIT */
+	append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
+			     OP_ALG_AS_INIT);
+
+	/*
+	 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+	   into both pads inside MDHA
+	 */
+	append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+				FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+	/*
+	 * FIFO_STORE with the explicit split-key content store
+	 * (0x26 output type)
+	 */
+	dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+				      DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_out)) {
+		dev_err(jrdev, "unable to map key output memory\n");
+		kfree(desc);
+		return -ENOMEM;
+	}
+	append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
+			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
+	print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	result.err = 0;
+	init_completion(&result.completion);
+
+	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+	if (!ret) {
+		/* in progress */
+		wait_for_completion_interruptible(&result.completion);
+		ret = result.err;
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+			       ctx->split_key_pad_len, 1);
+#endif
+	}
+
+	dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
+			 DMA_FROM_DEVICE);
+	dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
+
+	kfree(desc);
+
+	return ret;
+}
+
+static int aead_setkey(struct crypto_aead *aead,
+			       const u8 *key, unsigned int keylen)
+{
+	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct rtattr *rta = (void *)key;
+	struct crypto_authenc_key_param *param;
+	unsigned int authkeylen;
+	unsigned int enckeylen;
+	int ret = 0;
+
+	param = RTA_DATA(rta);
+	enckeylen = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < enckeylen)
+		goto badkey;
+
+	authkeylen = keylen - enckeylen;
+
+	if (keylen > CAAM_MAX_KEY_SIZE)
+		goto badkey;
+
+	/* Pick class 2 key length from algorithm submask */
+	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+				      OP_ALG_ALGSEL_SHIFT] * 2;
+	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+	       keylen, enckeylen, authkeylen);
+	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+	       ctx->split_key_len, ctx->split_key_pad_len);
+	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	ret = gen_split_key(ctx, key, authkeylen);
+	if (ret) {
+		goto badkey;
+	}
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+				       enckeylen, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->split_key_pad_len + enckeylen, 1);
+#endif
+
+	ctx->enckeylen = enckeylen;
+
+	ret = aead_set_sh_desc(aead);
+	if (ret) {
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+				 enckeylen, DMA_TO_DEVICE);
+	}
+
+	return ret;
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+	u32 *key_jump_cmd, *jump_cmd;
+	u32 *desc;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+	ctx->enckeylen = keylen;
+
+	/* ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	init_sh_desc(desc, HDR_SHARE_WAIT);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Propagate errors from shared to job descriptor */
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+	/* Load iv */
+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+		   LDST_CLASS_1_CCB | tfm->ivsize);
+
+	/* Load operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+	/* ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+
+	init_sh_desc(desc, HDR_SHARE_WAIT);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	/* For aead, only propagate error immediately if shared */
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, key_jump_cmd);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+	set_jump_tgt_here(desc, jump_cmd);
+
+	/* load IV */
+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+		   LDST_CLASS_1_CCB | tfm->ivsize);
+
+	/* Choose operation */
+	append_dec_op1(desc, ctx->class1_alg_type);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	/* Wait for key to load before allowing propagating error */
+	append_dec_shr_done(desc);
+
+	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return ret;
+}
+
+struct link_tbl_entry {
+	u64 ptr;
+	u32 len;
+	u8 reserved;
+	u8 buf_pool_id;
+	u16 offset;
+};
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct aead_edesc {
+	int assoc_nents;
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int link_tbl_bytes;
+	dma_addr_t link_tbl_dma;
+	struct link_tbl_entry *link_tbl;
+	u32 hw_desc[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int link_tbl_bytes;
+	dma_addr_t link_tbl_dma;
+	struct link_tbl_entry *link_tbl;
+	u32 hw_desc[0];
+};
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents, int dst_nents,
+		       dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
+		       int link_tbl_bytes)
+{
+	if (unlikely(dst != src)) {
+		dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+	if (link_tbl_bytes)
+		dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
+				 DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+		       struct aead_edesc *edesc,
+		       struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	int ivsize = crypto_aead_ivsize(aead);
+
+	dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+		   edesc->link_tbl_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+		   edesc->link_tbl_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ivsize = crypto_aead_ivsize(aead);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = (struct aead_edesc *)((char *)desc -
+		 offsetof(struct aead_edesc, hw_desc));
+
+	if (err) {
+		char tmp[CAAM_ERROR_STR_MAX];
+
+		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+	}
+
+	aead_unmap(jrdev, edesc, req);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+		       req->assoclen , 1);
+	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
+		       edesc->src_nents ? 100 : ivsize, 1);
+	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->src_nents ? 100 : req->cryptlen +
+		       ctx->authsize + 4, 1);
+#endif
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ivsize = crypto_aead_ivsize(aead);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = (struct aead_edesc *)((char *)desc -
+		 offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+		       ivsize, 1);
+	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+		       req->cryptlen, 1);
+#endif
+
+	if (err) {
+		char tmp[CAAM_ERROR_STR_MAX];
+
+		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+	}
+
+	aead_unmap(jrdev, edesc, req);
+
+	/*
+	 * verify hw auth check passed else return -EBADMSG
+	 */
+	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+		err = -EBADMSG;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+		       sizeof(struct iphdr) + req->assoclen +
+		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
+		       ctx->authsize + 36, 1);
+	if (!err && edesc->link_tbl_bytes) {
+		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
+		print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
+			sg->length + ctx->authsize + 16, 1);
+	}
+#endif
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = (struct ablkcipher_edesc *)((char *)desc -
+		 offsetof(struct ablkcipher_edesc, hw_desc));
+
+	if (err) {
+		char tmp[CAAM_ERROR_STR_MAX];
+
+		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+	ablkcipher_unmap(jrdev, edesc, req);
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				    void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = (struct ablkcipher_edesc *)((char *)desc -
+		 offsetof(struct ablkcipher_edesc, hw_desc));
+	if (err) {
+		char tmp[CAAM_ERROR_STR_MAX];
+
+		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+	ablkcipher_unmap(jrdev, edesc, req);
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
+			       dma_addr_t dma, u32 len, u32 offset)
+{
+	link_tbl_ptr->ptr = dma;
+	link_tbl_ptr->len = len;
+	link_tbl_ptr->reserved = 0;
+	link_tbl_ptr->buf_pool_id = 0;
+	link_tbl_ptr->offset = offset;
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
+		       sizeof(struct link_tbl_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
+					     int sg_count, struct link_tbl_entry
+					     *link_tbl_ptr, u32 offset)
+{
+	while (sg_count) {
+		sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
+				   sg_dma_len(sg), offset);
+		link_tbl_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return link_tbl_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
+				struct link_tbl_entry *link_tbl_ptr, u32 offset)
+{
+	link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
+	link_tbl_ptr->len |= 0x40000000;
+}
+
+/*
+ * Fill in aead job descriptor
+ */
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+			  struct aead_edesc *edesc,
+			  struct aead_request *req,
+			  bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ivsize = crypto_aead_ivsize(aead);
+	int authsize = ctx->authsize;
+	u32 *desc = edesc->hw_desc;
+	u32 out_options = 0, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, link_tbl_index = 0;
+
+#ifdef DEBUG
+	debug("assoclen %d cryptlen %d authsize %d\n",
+	      req->assoclen, req->cryptlen, authsize);
+	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+		       req->assoclen , 1);
+	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+		       edesc->src_nents ? 100 : ivsize, 1);
+	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+			edesc->src_nents ? 100 : req->cryptlen, 1);
+	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+		       desc_bytes(sh_desc), 1);
+#endif
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (all_contig) {
+		src_dma = sg_dma_address(req->assoc);
+		in_options = 0;
+	} else {
+		src_dma = edesc->link_tbl_dma;
+		link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
+				  (edesc->src_nents ? : 1);
+		in_options = LDST_SGF;
+	}
+	if (encrypt)
+		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+				  req->cryptlen - authsize, in_options);
+	else
+		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+				  req->cryptlen, in_options);
+
+	if (likely(req->src == req->dst)) {
+		if (all_contig) {
+			dst_dma = sg_dma_address(req->src);
+		} else {
+			dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+				  ((edesc->assoc_nents ? : 1) + 1);
+			out_options = LDST_SGF;
+		}
+	} else {
+		if (!edesc->dst_nents) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->link_tbl_dma +
+				  link_tbl_index *
+				  sizeof(struct link_tbl_entry);
+			out_options = LDST_SGF;
+		}
+	}
+	if (encrypt)
+		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+	else
+		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+				   out_options);
+}
+
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+			      struct aead_edesc *edesc,
+			      struct aead_request *req,
+			      int contig)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ivsize = crypto_aead_ivsize(aead);
+	int authsize = ctx->authsize;
+	u32 *desc = edesc->hw_desc;
+	u32 out_options = 0, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, link_tbl_index = 0;
+
+#ifdef DEBUG
+	debug("assoclen %d cryptlen %d authsize %d\n",
+	      req->assoclen, req->cryptlen, authsize);
+	print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+		       req->assoclen , 1);
+	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+	print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+		       desc_bytes(sh_desc), 1);
+#endif
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (contig & GIV_SRC_CONTIG) {
+		src_dma = sg_dma_address(req->assoc);
+		in_options = 0;
+	} else {
+		src_dma = edesc->link_tbl_dma;
+		link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+			  req->cryptlen - authsize, in_options);
+
+	if (contig & GIV_DST_CONTIG) {
+		dst_dma = edesc->iv_dma;
+	} else {
+		if (likely(req->src == req->dst)) {
+			dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+				  edesc->assoc_nents;
+			out_options = LDST_SGF;
+		} else {
+			dst_dma = edesc->link_tbl_dma +
+				  link_tbl_index *
+				  sizeof(struct link_tbl_entry);
+			out_options = LDST_SGF;
+		}
+	}
+
+	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+				struct ablkcipher_edesc *edesc,
+				struct ablkcipher_request *req,
+				bool iv_contig)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 out_options = 0, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, link_tbl_index = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (iv_contig) {
+		src_dma = edesc->iv_dma;
+		in_options = 0;
+	} else {
+		src_dma = edesc->link_tbl_dma;
+		link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+
+	if (likely(req->src == req->dst)) {
+		if (!edesc->src_nents && iv_contig) {
+			dst_dma = sg_dma_address(req->src);
+		} else {
+			dst_dma = edesc->link_tbl_dma +
+				sizeof(struct link_tbl_entry);
+			out_options = LDST_SGF;
+		}
+	} else {
+		if (!edesc->dst_nents) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->link_tbl_dma +
+				link_tbl_index * sizeof(struct link_tbl_entry);
+			out_options = LDST_SGF;
+		}
+	}
+	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
+}
+
+/*
+ * derive number of elements in scatterlist
+ */
+static int sg_count(struct scatterlist *sg_list, int nbytes)
+{
+	struct scatterlist *sg = sg_list;
+	int sg_nents = 0;
+
+	while (nbytes > 0) {
+		sg_nents++;
+		nbytes -= sg->length;
+		if (!sg_is_last(sg) && (sg + 1)->length == 0)
+			BUG(); /* Not support chaining */
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (likely(sg_nents == 1))
+		return 0;
+
+	return sg_nents;
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+					   int desc_bytes, bool *all_contig_ptr)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+	int assoc_nents, src_nents, dst_nents = 0;
+	struct aead_edesc *edesc;
+	dma_addr_t iv_dma = 0;
+	int sgc;
+	bool all_contig = true;
+	int ivsize = crypto_aead_ivsize(aead);
+	int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+	assoc_nents = sg_count(req->assoc, req->assoclen);
+	src_nents = sg_count(req->src, req->cryptlen);
+
+	if (unlikely(req->dst != req->src))
+		dst_nents = sg_count(req->dst, req->cryptlen);
+
+	sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+			 DMA_BIDIRECTIONAL);
+	if (likely(req->src == req->dst)) {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_BIDIRECTIONAL);
+	} else {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_TO_DEVICE);
+		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+				 DMA_FROM_DEVICE);
+	}
+
+	/* Check if data are contiguous */
+	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+	    iv_dma || src_nents || iv_dma + ivsize !=
+	    sg_dma_address(req->src)) {
+		all_contig = false;
+		assoc_nents = assoc_nents ? : 1;
+		src_nents = src_nents ? : 1;
+		link_tbl_len = assoc_nents + 1 + src_nents;
+	}
+	link_tbl_len += dst_nents;
+
+	link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+			link_tbl_bytes, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->assoc_nents = assoc_nents;
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->link_tbl_bytes = link_tbl_bytes;
+	edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
+			  desc_bytes;
+	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+					     link_tbl_bytes, DMA_TO_DEVICE);
+	*all_contig_ptr = all_contig;
+
+	link_tbl_index = 0;
+	if (!all_contig) {
+		sg_to_link_tbl(req->assoc,
+			       (assoc_nents ? : 1),
+			       edesc->link_tbl +
+			       link_tbl_index, 0);
+		link_tbl_index += assoc_nents ? : 1;
+		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+				   iv_dma, ivsize, 0);
+		link_tbl_index += 1;
+		sg_to_link_tbl_last(req->src,
+				    (src_nents ? : 1),
+				    edesc->link_tbl +
+				    link_tbl_index, 0);
+		link_tbl_index += src_nents ? : 1;
+	}
+	if (dst_nents) {
+		sg_to_link_tbl_last(req->dst, dst_nents,
+				    edesc->link_tbl + link_tbl_index, 0);
+	}
+
+	return edesc;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	req->cryptlen += ctx->authsize;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+				 CAAM_CMD_SZ, &all_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+		      all_contig, true);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+				 CAAM_CMD_SZ, &all_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       req->cryptlen, 1);
+#endif
+
+	/* Create and submit job descriptor*/
+	init_aead_job(ctx->sh_desc_dec,
+		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+					       *greq, int desc_bytes,
+					       u32 *contig_ptr)
+{
+	struct aead_request *req = &greq->areq;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+	int assoc_nents, src_nents, dst_nents = 0;
+	struct aead_edesc *edesc;
+	dma_addr_t iv_dma = 0;
+	int sgc;
+	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+	int ivsize = crypto_aead_ivsize(aead);
+	int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+	assoc_nents = sg_count(req->assoc, req->assoclen);
+	src_nents = sg_count(req->src, req->cryptlen);
+
+	if (unlikely(req->dst != req->src))
+		dst_nents = sg_count(req->dst, req->cryptlen);
+
+	sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+			 DMA_BIDIRECTIONAL);
+	if (likely(req->src == req->dst)) {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_BIDIRECTIONAL);
+	} else {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_TO_DEVICE);
+		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+				 DMA_FROM_DEVICE);
+	}
+
+	/* Check if data are contiguous */
+	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+		contig &= ~GIV_SRC_CONTIG;
+	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+		contig &= ~GIV_DST_CONTIG;
+		if (unlikely(req->src != req->dst)) {
+			dst_nents = dst_nents ? : 1;
+			link_tbl_len += 1;
+		}
+	if (!(contig & GIV_SRC_CONTIG)) {
+		assoc_nents = assoc_nents ? : 1;
+		src_nents = src_nents ? : 1;
+		link_tbl_len += assoc_nents + 1 + src_nents;
+		if (likely(req->src == req->dst))
+			contig &= ~GIV_DST_CONTIG;
+	}
+	link_tbl_len += dst_nents;
+
+	link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+			link_tbl_bytes, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->assoc_nents = assoc_nents;
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->link_tbl_bytes = link_tbl_bytes;
+	edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
+			  desc_bytes;
+	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+					     link_tbl_bytes, DMA_TO_DEVICE);
+	*contig_ptr = contig;
+
+	link_tbl_index = 0;
+	if (!(contig & GIV_SRC_CONTIG)) {
+		sg_to_link_tbl(req->assoc, assoc_nents,
+			       edesc->link_tbl +
+			       link_tbl_index, 0);
+		link_tbl_index += assoc_nents;
+		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+				   iv_dma, ivsize, 0);
+		link_tbl_index += 1;
+		sg_to_link_tbl_last(req->src, src_nents,
+				    edesc->link_tbl +
+				    link_tbl_index, 0);
+		link_tbl_index += src_nents;
+	}
+	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+		sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+				   iv_dma, ivsize, 0);
+		link_tbl_index += 1;
+		sg_to_link_tbl_last(req->dst, dst_nents,
+				    edesc->link_tbl + link_tbl_index, 0);
+	}
+
+	return edesc;
+}
+
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
+{
+	struct aead_request *req = &areq->areq;
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	u32 contig;
+	u32 *desc;
+	int ret = 0;
+
+	req->cryptlen += ctx->authsize;
+
+	/* allocate extended descriptor */
+	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+				     CAAM_CMD_SZ, &contig);
+
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       req->cryptlen, 1);
+#endif
+
+	/* Create and submit job descriptor*/
+	init_aead_giv_job(ctx->sh_desc_givenc,
+			  ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, int desc_bytes,
+						       bool *iv_contig_out)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, dst_nents = 0, link_tbl_bytes;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma = 0;
+	bool iv_contig = false;
+	int sgc;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int link_tbl_index;
+
+	src_nents = sg_count(req->src, req->nbytes);
+
+	if (unlikely(req->dst != req->src))
+		dst_nents = sg_count(req->dst, req->nbytes);
+
+	if (likely(req->src == req->dst)) {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_BIDIRECTIONAL);
+	} else {
+		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+				 DMA_TO_DEVICE);
+		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+				 DMA_FROM_DEVICE);
+	}
+
+	/*
+	 * Check if iv can be contiguous with source and destination.
+	 * If so, include it. If not, create scatterlist.
+	 */
+	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+		iv_contig = true;
+	else
+		src_nents = src_nents ? : 1;
+	link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+			 sizeof(struct link_tbl_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+			link_tbl_bytes, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->link_tbl_bytes = link_tbl_bytes;
+	edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+			  desc_bytes;
+
+	link_tbl_index = 0;
+	if (!iv_contig) {
+		sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
+		sg_to_link_tbl_last(req->src, src_nents,
+				    edesc->link_tbl + 1, 0);
+		link_tbl_index += 1 + src_nents;
+	}
+
+	if (unlikely(dst_nents)) {
+		sg_to_link_tbl_last(req->dst, dst_nents,
+			edesc->link_tbl + link_tbl_index, 0);
+	}
+
+	edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+					     link_tbl_bytes, DMA_TO_DEVICE);
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
+		       link_tbl_bytes, 1);
+#endif
+
+	*iv_contig_out = iv_contig;
+	return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	bool iv_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_enc,
+		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	bool iv_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_dec,
+		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+	desc = edesc->hw_desc;
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+#define template_aead		template_u.aead
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+		struct aead_alg aead;
+		struct blkcipher_alg blkcipher;
+		struct cipher_alg cipher;
+		struct compress_alg compress;
+		struct rng_alg rng;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+	u32 alg_op;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* single-pass ipsec_esp descriptor */
+	{
+		.name = "authenc(hmac(md5),cbc(aes))",
+		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha1),cbc(aes))",
+		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha224),cbc(aes))",
+		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha256),cbc(aes))",
+		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha384),cbc(aes))",
+		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+	},
+
+	{
+		.name = "authenc(hmac(sha512),cbc(aes))",
+		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(md5),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha1),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha224),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha256),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha384),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha512),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(md5),cbc(des))",
+		.driver_name = "authenc-hmac-md5-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha1),cbc(des))",
+		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha224),cbc(des))",
+		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha256),cbc(des))",
+		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha384),cbc(des))",
+		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha512),cbc(des))",
+		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+	},
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	}
+};
+
+struct caam_crypto_alg {
+	struct list_head entry;
+	struct device *ctrldev;
+	int class1_alg_type;
+	int class2_alg_type;
+	int alg_op;
+	struct crypto_alg crypto_alg;
+};
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct caam_crypto_alg *caam_alg =
+		 container_of(alg, struct caam_crypto_alg, crypto_alg);
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
+	int tgt_jr = atomic_inc_return(&priv->tfm_count);
+
+	/*
+	 * distribute tfms across job rings to ensure in-order
+	 * crypto request processing per tfm
+	 */
+	ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
+
+	/* copy descriptor header template value */
+	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
+	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
+	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+
+	return 0;
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->sh_desc_enc_dma &&
+	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+	if (ctx->sh_desc_dec_dma &&
+	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+	if (ctx->sh_desc_givenc_dma &&
+	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+				 desc_bytes(ctx->sh_desc_givenc),
+				 DMA_TO_DEVICE);
+}
+
+static void __exit caam_algapi_exit(void)
+{
+
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	struct caam_crypto_alg *t_alg, *n;
+	int i, err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node)
+		return;
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev)
+		return;
+
+	ctrldev = &pdev->dev;
+	of_node_put(dev_node);
+	priv = dev_get_drvdata(ctrldev);
+
+	if (!priv->alg_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+		crypto_unregister_alg(&t_alg->crypto_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+
+	for (i = 0; i < priv->total_jobrs; i++) {
+		err = caam_jr_deregister(priv->algapi_jr[i]);
+		if (err < 0)
+			break;
+	}
+	kfree(priv->algapi_jr);
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
+					      struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+	if (!t_alg) {
+		dev_err(ctrldev, "failed to allocate t_alg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_cra_init;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		alg->cra_type = &crypto_aead_type;
+		alg->cra_aead = template->template_aead;
+		break;
+	}
+
+	t_alg->class1_alg_type = template->class1_alg_type;
+	t_alg->class2_alg_type = template->class2_alg_type;
+	t_alg->alg_op = template->alg_op;
+	t_alg->ctrldev = ctrldev;
+
+	return t_alg;
+}
+
+static int __init caam_algapi_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev, **jrdev;
+	struct caam_drv_private *priv;
+	int i = 0, err = 0;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node)
+		return -ENODEV;
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev)
+		return -ENODEV;
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	INIT_LIST_HEAD(&priv->alg_list);
+
+	jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
+	if (!jrdev)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->total_jobrs; i++) {
+		err = caam_jr_register(ctrldev, &jrdev[i]);
+		if (err < 0)
+			break;
+	}
+	if (err < 0 && i == 0) {
+		dev_err(ctrldev, "algapi error in job ring registration: %d\n",
+			err);
+		kfree(jrdev);
+		return err;
+	}
+
+	priv->num_jrs_for_algapi = i;
+	priv->algapi_jr = jrdev;
+	atomic_set(&priv->tfm_count, -1);
+
+	/* register crypto algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		/* TODO: check if h/w supports alg */
+		struct caam_crypto_alg *t_alg;
+
+		t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			dev_warn(ctrldev, "%s alg allocation failed\n",
+				 driver_algs[i].driver_name);
+			continue;
+		}
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			dev_warn(ctrldev, "%s alg registration failed\n",
+				t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+		} else
+			list_add_tail(&t_alg->entry, &priv->alg_list);
+	}
+	if (!list_empty(&priv->alg_list))
+		dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
+			 (char *)of_get_property(dev_node, "compatible", NULL));
+
+	return err;
+}
+
+module_init(caam_algapi_init);
+module_exit(caam_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/compat.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/compat.h
new file mode 100644
index 0000000..a63bc65
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/compat.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_COMPAT_H
+#define CAAM_COMPAT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <net/xfrm.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+
+#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/ctrl.c b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/ctrl.c
new file mode 100644
index 0000000..c5f61c5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,247 @@
+/*
+ * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+
+static int caam_remove(struct platform_device *pdev)
+{
+	struct device *ctrldev;
+	struct caam_drv_private *ctrlpriv;
+	struct caam_drv_private_jr *jrpriv;
+	struct caam_full __iomem *topregs;
+	int ring, ret = 0;
+
+	ctrldev = &pdev->dev;
+	ctrlpriv = dev_get_drvdata(ctrldev);
+	topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+	/* shut down JobRs */
+	for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+		ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
+		jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
+		irq_dispose_mapping(jrpriv->irq);
+	}
+
+	/* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+	/* Unmap controller region */
+	iounmap(&topregs->ctrl);
+
+	kfree(ctrlpriv->jrdev);
+	kfree(ctrlpriv);
+
+	return ret;
+}
+
+/* Probe routine for CAAM top (controller) level */
+static int caam_probe(struct platform_device *pdev)
+{
+	int ring, rspec;
+	struct device *dev;
+	struct device_node *nprop, *np;
+	struct caam_ctrl __iomem *ctrl;
+	struct caam_full __iomem *topregs;
+	struct caam_drv_private *ctrlpriv;
+#ifdef CONFIG_DEBUG_FS
+	struct caam_perfmon *perfmon;
+#endif
+
+	ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+	if (!ctrlpriv)
+		return -ENOMEM;
+
+	dev = &pdev->dev;
+	dev_set_drvdata(dev, ctrlpriv);
+	ctrlpriv->pdev = pdev;
+	nprop = pdev->dev.of_node;
+
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	ctrl = of_iomap(nprop, 0);
+	if (ctrl == NULL) {
+		dev_err(dev, "caam: of_iomap() failed\n");
+		return -ENOMEM;
+	}
+	ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+
+	/* topregs used to derive pointers to CAAM sub-blocks only */
+	topregs = (struct caam_full __iomem *)ctrl;
+
+	/* Get the IRQ of the controller (for security violations only) */
+	ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+
+	/*
+	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+	 * 36-bit pointers in master configuration register
+	 */
+	setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
+		  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+
+	if (sizeof(dma_addr_t) == sizeof(u64))
+		dma_set_mask(dev, DMA_BIT_MASK(36));
+
+	/*
+	 * Detect and enable JobRs
+	 * First, find out how many ring spec'ed, allocate references
+	 * for all, then go probe each one.
+	 */
+	rspec = 0;
+	for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
+		rspec++;
+	ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
+	if (ctrlpriv->jrdev == NULL) {
+		iounmap(&topregs->ctrl);
+		return -ENOMEM;
+	}
+
+	ring = 0;
+	ctrlpriv->total_jobrs = 0;
+	for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
+		caam_jr_probe(pdev, np, ring);
+		ctrlpriv->total_jobrs++;
+		ring++;
+	}
+
+	/* Check to see if QI present. If so, enable */
+	ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
+				  CTPR_QI_MASK);
+	if (ctrlpriv->qi_present) {
+		ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
+		/* This is all that's required to physically enable QI */
+		wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
+	}
+
+	/* If no QI and no rings specified, quit and go home */
+	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+		dev_err(dev, "no queues configured, terminating\n");
+		caam_remove(pdev);
+		return -ENOMEM;
+	}
+
+	/* NOTE: RTIC detection ought to go here, around Si time */
+
+	/* Initialize queue allocator lock */
+	spin_lock_init(&ctrlpriv->jr_alloc_lock);
+
+	/* Report "alive" for developer to see */
+	dev_info(dev, "device ID = 0x%016llx\n",
+		 rd_reg64(&topregs->ctrl.perfmon.caam_id));
+	dev_info(dev, "job rings = %d, qi = %d\n",
+		 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+#ifdef CONFIG_DEBUG_FS
+	/*
+	 * FIXME: needs better naming distinction, as some amalgamation of
+	 * "caam" and nprop->full_name. The OF name isn't distinctive,
+	 * but does separate instances
+	 */
+	perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+	ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+	ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+
+	/* Controller-level - performance monitor counters */
+	ctrlpriv->ctl_rq_dequeued =
+		debugfs_create_u64("rq_dequeued",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->req_dequeued);
+	ctrlpriv->ctl_ob_enc_req =
+		debugfs_create_u64("ob_rq_encrypted",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ob_enc_req);
+	ctrlpriv->ctl_ib_dec_req =
+		debugfs_create_u64("ib_rq_decrypted",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ib_dec_req);
+	ctrlpriv->ctl_ob_enc_bytes =
+		debugfs_create_u64("ob_bytes_encrypted",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ob_enc_bytes);
+	ctrlpriv->ctl_ob_prot_bytes =
+		debugfs_create_u64("ob_bytes_protected",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ob_prot_bytes);
+	ctrlpriv->ctl_ib_dec_bytes =
+		debugfs_create_u64("ib_bytes_decrypted",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ib_dec_bytes);
+	ctrlpriv->ctl_ib_valid_bytes =
+		debugfs_create_u64("ib_bytes_validated",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->ib_valid_bytes);
+
+	/* Controller level - global status values */
+	ctrlpriv->ctl_faultaddr =
+		debugfs_create_u64("fault_addr",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->faultaddr);
+	ctrlpriv->ctl_faultdetail =
+		debugfs_create_u32("fault_detail",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->faultdetail);
+	ctrlpriv->ctl_faultstatus =
+		debugfs_create_u32("fault_status",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   ctrlpriv->ctl, &perfmon->status);
+
+	/* Internal covering keys (useful in non-secure mode only) */
+	ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+						S_IRUSR |
+						S_IRGRP | S_IROTH,
+						ctrlpriv->ctl,
+						&ctrlpriv->ctl_kek_wrap);
+
+	ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tkek_wrap);
+
+	ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tdsk_wrap);
+#endif
+	return 0;
+}
+
+static struct of_device_id caam_match[] = {
+	{
+		.compatible = "fsl,sec-v4.0",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
+static struct platform_driver caam_driver = {
+	.driver = {
+		.name = "caam",
+		.owner = THIS_MODULE,
+		.of_match_table = caam_match,
+	},
+	.probe       = caam_probe,
+	.remove      = __devexit_p(caam_remove),
+};
+
+module_platform_driver(caam_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc.h
new file mode 100644
index 0000000..a17c295
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc.h
@@ -0,0 +1,1604 @@
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE	64
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE		16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT		27
+#define CMD_MASK		0xf8000000
+
+#define CMD_KEY			(0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY		(0x01 << CMD_SHIFT)
+#define CMD_LOAD		(0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD		(0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD		(0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD	(0x05 << CMD_SHIFT)
+#define CMD_STORE		(0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE		(0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE		(0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE	(0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN		(0x0e << CMD_SHIFT)
+#define CMD_MOVE		(0x0f << CMD_SHIFT)
+#define CMD_OPERATION		(0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE		(0x12 << CMD_SHIFT)
+#define CMD_JUMP		(0x14 << CMD_SHIFT)
+#define CMD_MATH		(0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR		(0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR	(0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR		(0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR		(0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT		25
+#define CLASS_MASK		(0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE		(0x00 << CLASS_SHIFT)
+#define CLASS_1			(0x01 << CLASS_SHIFT)
+#define CLASS_2			(0x02 << CLASS_SHIFT)
+#define CLASS_BOTH		(0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR			0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE			0x00800000
+#define HDR_ZRO			0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_MASK	0x3f
+#define HDR_START_IDX_SHIFT	16
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK	0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK	0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED		0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED	0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX		0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED		0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE		0x00000800
+
+/* Propogate DNR property to SharedDesc */
+#define HDR_PROP_DNR		0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_MASK	0x03
+#define HDR_SD_SHARE_SHIFT	8
+#define HDR_JD_SHARE_MASK	0x07
+#define HDR_JD_SHARE_SHIFT	8
+
+#define HDR_SHARE_NEVER		(0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT		(0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL	(0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER		(0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK	0x7f
+#define HDR_SD_LENGTH_MASK	0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT	25	/* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK	(0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF			0x01000000
+#define KEY_VLF			0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM			0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC			0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB			0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT			0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK			0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT		16
+#define KEY_DEST_MASK		(0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG	(0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E		(0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX	(0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT	(0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK		0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT	25
+#define LDST_CLASS_MASK		(0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB	(0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB	(0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB	(0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO		(0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF		0x01000000
+#define LDST_VLF		LDST_SGF
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK		1
+#define LDST_IMM_SHIFT		23
+#define LDST_IMM		(LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT	16
+#define LDST_SRCDST_MASK	(0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT	(0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY		(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO		(0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO	(0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG	(0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG	(0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG	(0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG	(0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW		(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0	(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT		(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1	(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2	(0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ	(0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1	(0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ	(0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ	(0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ	(0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ	(0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF	(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO	(0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT	8
+#define LDST_OFFSET_MASK	(0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT		0
+#define LDOFF_CHG_SHARE_MASK		(0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER		(0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP		(0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT	4
+#define LDOFF_CHG_NONSEQLIODN_MASK	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ	(0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT	6
+#define LDOFF_CHG_SEQLIODN_MASK		(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ		(0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes	*/
+#define LDST_LEN_SHIFT		0
+#define LDST_LEN_MASK		(0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT		(1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR		(1 << 6)
+#define LDLEN_RST_OFIFO			(1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID	(1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD	(1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT	0
+#define LDLEN_SET_OFIFO_OFFSET_MASK	(3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT	25
+#define FIFOLD_CLASS_MASK	(0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP	(0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1	(0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2	(0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH	(0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT	25
+#define FIFOST_CLASS_MASK	(0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL	(0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY	(0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY	(0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT	24
+#define FIFOLDST_SGF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF		(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF		(1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT	23
+#define FIFOLD_IMM_MASK		(1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM		(1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT	23
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT	22
+#define FIFOLDST_EXT_MASK	(1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT		(1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT	16
+#define FIFOLD_CONT_TYPE_SHIFT	19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK	(0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK		(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK	(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1	(0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N	(0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A	(0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B	(0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK	(0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG		(0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2	(0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV		(0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA	(0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD		(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV		(0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL	(0x07 << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK	0xffff
+#define FIFOLDST_EXT_LEN_MASK	0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT	16
+#define FIFOST_TYPE_MASK	(0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0	 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1	 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2	 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3	 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0	 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1	 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2	 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3	 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK	 (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK	 (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK	 (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK	 (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK	 (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK	 (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE	 (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO	 (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP	 (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT		24
+#define OP_TYPE_MASK		(0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL	(0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK		(0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG	(0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG	(0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL	(0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL	(0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT		16
+#define OP_PCLID_MASK		(0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF	(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF	(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF	(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF	(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF	(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF	(0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF		(0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB		(0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY	(0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR	(0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN	(0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY	(0x16 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC		(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP		(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC		(0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI		(0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX		(0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30		(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10		(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11		(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12		(0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS		(0x0c << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK				 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK		 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK			 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64			 0x0100
+#define OP_PCL_IPSEC_DES			 0x0200
+#define OP_PCL_IPSEC_3DES			 0x0300
+#define OP_PCL_IPSEC_AES_CBC			 0x0c00
+#define OP_PCL_IPSEC_AES_CTR			 0x0d00
+#define OP_PCL_IPSEC_AES_XTS			 0x1600
+#define OP_PCL_IPSEC_AES_CCM8			 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12			 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16			 0x1000
+#define OP_PCL_IPSEC_AES_GCM8			 0x1200
+#define OP_PCL_IPSEC_AES_GCM12			 0x1300
+#define OP_PCL_IPSEC_AES_GCM16			 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL			 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96		 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96		 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96		 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128		 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160		 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128		 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192		 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256		 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK			 0xff00
+#define OP_PCL_SRTP_AUTH_MASK			 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR			 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160		 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17		 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5		 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA		 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA		 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5		 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2		 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5			 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2		 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA		 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2		 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3		 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4		 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5		 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6		 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7		 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9		 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA			 0x0028
+
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512		 0xff65
+
+
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512		 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA			 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2	 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3	 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4	 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5	 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6	 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2	 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3	 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4	 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5	 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6	 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512		 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10		 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11		 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12		 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13		 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14		 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15		 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16		 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17		 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18		 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5			 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA		 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA			 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7		 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160		 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224		 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256		 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384		 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512		 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512		 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM			 0x0201
+#define OP_PCL_WIMAX_OFDMA			 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI				 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC				 0x0001
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST			 0x0008
+#define OP_PCL_PKPROT_DECRYPT			 0x0004
+#define OP_PCL_PKPROT_ECC			 0x0002
+#define OP_PCL_PKPROT_F2M			 0x0001
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT	24
+#define OP_ALG_TYPE_MASK	(0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1	2
+#define OP_ALG_TYPE_CLASS2	4
+
+#define OP_ALG_ALGSEL_SHIFT	16
+#define OP_ALG_ALGSEL_MASK	(0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK	(0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES	(0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES	(0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES	(0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4	(0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5	(0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1	(0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224	(0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256	(0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384	(0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512	(0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG	(0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI	(0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC	(0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9	(0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT	4
+#define OP_ALG_AAI_MASK		(0x1ff << OP_ALG_AAI_SHIFT)
+
+/* blockcipher AAI set */
+#define OP_ALG_AAI_CTR_MOD128	(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8	(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16	(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24	(0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40	(0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48	(0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56	(0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64	(0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72	(0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80	(0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88	(0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96	(0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104	(0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112	(0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120	(0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB		(0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB		(0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS		(0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC		(0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC	(0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM		(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM		(0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC	(0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC	(0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD	(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK		(0x100 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NOZERO	(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_ODD	(0x20 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP	(0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_AAI_802		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC		(0x40 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW AAI set */
+#define OP_ALG_AAI_F8		(0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9		(0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE		(0x20 << OP_ALG_AAI_SHIFT)
+
+
+#define OP_ALG_AS_SHIFT		2
+#define OP_ALG_AS_MASK		(0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE	(0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT		(1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE	(2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL	(3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT	1
+#define OP_ALG_ICV_MASK		(1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF		(0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON		(1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_DIR_SHIFT	0
+#define OP_ALG_DIR_MASK		1
+#define OP_ALG_DECRYPT		0
+#define OP_ALG_ENCRYPT		1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK		0x00800000
+#define OP_ALG_PK_FUN_MASK	0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM	0x80000
+#define OP_ALG_PKMODE_B_RAM	0x40000
+#define OP_ALG_PKMODE_E_RAM	0x20000
+#define OP_ALG_PKMODE_N_RAM	0x10000
+#define OP_ALG_PKMODE_CLEARMEM	0x00001
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY	0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY	0x40000
+#define OP_ALG_PKMODE_MOD_F2M		0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN		0x10000
+#define OP_ALG_PKMODE_PRJECTV		0x00800
+#define OP_ALG_PKMODE_TIME_EQ		0x400
+#define OP_ALG_PKMODE_OUT_B		0x000
+#define OP_ALG_PKMODE_OUT_A		0x100
+#define OP_ALG_PKMODE_MOD_ADD		0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB	0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA	0x004
+#define OP_ALG_PKMODE_MOD_MULT		0x005
+#define OP_ALG_PKMODE_MOD_EXPO		0x006
+#define OP_ALG_PKMODE_MOD_REDUCT	0x007
+#define OP_ALG_PKMODE_MOD_INV		0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD	0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL	0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT	0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST	0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST	0x00d
+#define OP_ALG_PKMODE_MOD_GCD		0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY	0x00f
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT	13
+#define OP_ALG_PKMODE_SRC_REG_MASK	(7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT	10
+#define OP_ALG_PKMODE_DST_REG_MASK	(7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT	8
+#define OP_ALG_PKMODE_SRC_SEG_MASK	(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT	6
+#define OP_ALG_PKMODE_DST_SEG_MASK	(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A		(0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B		(1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N		(3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A		(0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B		(1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E		(2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N		(3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0		(0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1		(1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2		(2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3		(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0		(0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1		(1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2		(2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3		(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ	0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ	0x81
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS	0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL	0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE	0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT	0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO	0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD	0x00100000
+
+#define SQIN_LEN_SHIFT		 0
+#define SQIN_LEN_MASK		(0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE	0x00800000
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO	0x00200000
+
+/* Use extended length following pointer */
+#define SQOUT_EXT	0x00400000
+
+#define SQOUT_LEN_SHIFT		0
+#define SQOUT_LEN_MASK		(0xffff << SQOUT_LEN_SHIFT)
+
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT		16
+#define SIGN_TYPE_MASK		(0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL		(0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2		(0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3		(0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4		(0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT		25
+#define MOVE_AUX_MASK		(3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS		(2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS		(1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT	24
+#define MOVE_WAITCOMP_MASK	(1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP		(1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT		20
+#define MOVE_SRC_MASK		(0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX	(0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX	(0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO	(0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF	(0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0		(0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1		(0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2		(0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3		(0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO		(0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL	(0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT		16
+#define MOVE_DEST_MASK		(0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX	(0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX	(0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO	(0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF	(0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0		(0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1		(0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2		(0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3		(0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO	(0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO	(0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A		(0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY	(0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY	(0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT	8
+#define MOVE_OFFSET_MASK	(0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT		0
+#define MOVE_LEN_MASK		(0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT	0
+#define MOVELEN_MRSEL_MASK	(0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT		26
+#define MATH_IFB_MASK		(1 << MATH_IFB_SHIFT)
+#define MATH_IFB		(1 << MATH_IFB_SHIFT)
+
+#define MATH_NFU_SHIFT		25
+#define MATH_NFU_MASK		(1 << MATH_NFU_SHIFT)
+#define MATH_NFU		(1 << MATH_NFU_SHIFT)
+
+#define MATH_STL_SHIFT		24
+#define MATH_STL_MASK		(1 << MATH_STL_SHIFT)
+#define MATH_STL		(1 << MATH_STL_SHIFT)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT		20
+#define MATH_FUN_MASK		(0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD		(0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC		(0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB		(0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB		(0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR		(0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND		(0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR		(0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT		(0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT		(0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD		(0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT		(0x0a << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT		16
+#define MATH_SRC0_MASK		(0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0		(0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1		(0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2		(0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3		(0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM		(0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN	(0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN	(0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN	(0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN	(0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO		(0x0c << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT		12
+#define MATH_SRC1_MASK		(0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0		(0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1		(0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2		(0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3		(0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM		(0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT		8
+#define MATH_DEST_MASK		(0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0		(0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1		(0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2		(0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3		(0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN	(0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN	(0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN	(0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN	(0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE		(0x0f << MATH_DEST_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT		0
+#define MATH_LEN_MASK		(0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE		0x01
+#define MATH_LEN_2BYTE		0x02
+#define MATH_LEN_4BYTE		0x04
+#define MATH_LEN_8BYTE		0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT	25
+#define JUMP_CLASS_MASK		(3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE		0
+#define JUMP_CLASS_CLASS1	(1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2	(2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH		(3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT		24
+#define JUMP_JSL_MASK		(1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL		(1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT		22
+#define JUMP_TYPE_MASK		(0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL		(0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL	(0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT		(0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER	(0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT		16
+#define JUMP_TEST_MASK		(0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL		(0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL	(0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY		(0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY	(0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT		8
+#define JUMP_COND_MASK		(0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0		(0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1	(0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME	(0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N	(0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z	(0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C	(0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV	(0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP		((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD		((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF		((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM		((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP		((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP		((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP		((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP		((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT	0
+#define JUMP_OFFSET_MASK	(0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT	30
+#define NFIFOENTRY_DEST_MASK	(3 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_DECO	(0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1	(1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2	(2 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_BOTH	(3 << NFIFOENTRY_DEST_SHIFT)
+
+#define NFIFOENTRY_LC2_SHIFT	29
+#define NFIFOENTRY_LC2_MASK	(1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2		(1 << NFIFOENTRY_LC2_SHIFT)
+
+#define NFIFOENTRY_LC1_SHIFT	28
+#define NFIFOENTRY_LC1_MASK	(1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1		(1 << NFIFOENTRY_LC1_SHIFT)
+
+#define NFIFOENTRY_FC2_SHIFT	27
+#define NFIFOENTRY_FC2_MASK	(1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2		(1 << NFIFOENTRY_FC2_SHIFT)
+
+#define NFIFOENTRY_FC1_SHIFT	26
+#define NFIFOENTRY_FC1_MASK	(1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1		(1 << NFIFOENTRY_FC1_SHIFT)
+
+#define NFIFOENTRY_STYPE_SHIFT	24
+#define NFIFOENTRY_STYPE_MASK	(3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO	(0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO	(1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD	(2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP	(3 << NFIFOENTRY_STYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SHIFT	20
+#define NFIFOENTRY_DTYPE_MASK	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV	(0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP	(0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0	(0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1	(0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2	(0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3	(0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N	(0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E	(0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A	(0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B	(0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+
+#define NFIFOENTRY_BND_SHIFT	19
+#define NFIFOENTRY_BND_MASK	(1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND		(1 << NFIFOENTRY_BND_SHIFT)
+
+#define NFIFOENTRY_PTYPE_SHIFT	16
+#define NFIFOENTRY_PTYPE_MASK	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS		(0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS	(0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT	(0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND		(0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ	(0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ	(0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N		(0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT	15
+#define NFIFOENTRY_OC_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_AST_SHIFT	14
+#define NFIFOENTRY_AST_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_BM_SHIFT	11
+#define NFIFOENTRY_BM_MASK	(1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM		(1 << NFIFOENTRY_BM_SHIFT)
+
+#define NFIFOENTRY_PS_SHIFT	10
+#define NFIFOENTRY_PS_MASK	(1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS		(1 << NFIFOENTRY_PS_SHIFT)
+
+#define NFIFOENTRY_DLEN_SHIFT	0
+#define NFIFOENTRY_DLEN_MASK	(0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT	0
+#define NFIFOENTRY_PLEN_MASK	(0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/*
+ * PDB internal definitions
+ */
+
+/* IPSec ESP CBC Encap/Decap Options */
+#define PDBOPTS_ESPCBC_ARSNONE	0x00	/* no antireplay window	*/
+#define PDBOPTS_ESPCBC_ARS32	0x40	/* 32-entry antireplay window */
+#define PDBOPTS_ESPCBC_ARS64	0xc0	/* 64-entry antireplay window */
+#define PDBOPTS_ESPCBC_IVSRC	0x20	/* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN	0x10	/* extended sequence included */
+#define PDBOPTS_ESPCBC_OUTFMT	0x08	/* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08	/* IP header comes from PDB (encap) */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04	/* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN	0x02	/* process IPv6 header */
+#define PDBOPTS_ESPCBC_TUNNEL	0x01	/* tunnel mode next-header byte */
+
+#endif /* DESC_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc_constr.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 0000000..348b882
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,262 @@
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "desc.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+
+#ifdef DEBUG
+#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+			      &__func__[sizeof("append")]); } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_CHG_SHARE_OK_NO_PROP << \
+				LDST_OFFSET_SHIFT))
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+				LDST_SRCDST_WORD_DECOCTRL | \
+				(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+static inline int desc_len(u32 *desc)
+{
+	return *desc & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void *desc)
+{
+	return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 *desc)
+{
+	return desc + desc_len(desc);
+}
+
+static inline void *sh_desc_pdb(u32 *desc)
+{
+	return desc + 1;
+}
+
+static inline void init_desc(u32 *desc, u32 options)
+{
+	*desc = options | HDR_ONE | 1;
+}
+
+static inline void init_sh_desc(u32 *desc, u32 options)
+{
+	PRINT_POS;
+	init_desc(desc, CMD_SHARED_DESC_HDR | options);
+}
+
+static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+	u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
+
+	init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
+		     options);
+}
+
+static inline void init_job_desc(u32 *desc, u32 options)
+{
+	init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+{
+	dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+	*offset = ptr;
+
+	(*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+}
+
+static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
+					u32 options)
+{
+	PRINT_POS;
+	init_job_desc(desc, HDR_SHARED | options |
+		      (len << HDR_START_IDX_SHIFT));
+	append_ptr(desc, ptr);
+}
+
+static inline void append_data(u32 *desc, void *data, int len)
+{
+	u32 *offset = desc_end(desc);
+
+	if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+		memcpy(offset, data, len);
+
+	(*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+}
+
+static inline void append_cmd(u32 *desc, u32 command)
+{
+	u32 *cmd = desc_end(desc);
+
+	*cmd = command;
+
+	(*desc)++;
+}
+
+static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+				  u32 command)
+{
+	append_cmd(desc, command | len);
+	append_ptr(desc, ptr);
+}
+
+static inline void append_cmd_data(u32 *desc, void *data, int len,
+				   u32 command)
+{
+	append_cmd(desc, command | IMMEDIATE | len);
+	append_data(desc, data, len);
+}
+
+static inline u32 *append_jump(u32 *desc, u32 options)
+{
+	u32 *cmd = desc_end(desc);
+
+	PRINT_POS;
+	append_cmd(desc, CMD_JUMP | options);
+
+	return cmd;
+}
+
+static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+{
+	*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 *desc, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+APPEND_CMD(move, MOVE)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | len | options); \
+}
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
+				u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(store, STORE)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+					 unsigned int data_len, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+	append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+					     u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+	append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+	   MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+#define append_math_add(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+	APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+	APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+	APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+	APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+	append_cmd(desc, data); \
+} while (0);
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.c b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.c
new file mode 100644
index 0000000..9b8d231
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.c
@@ -0,0 +1,252 @@
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc.h"
+#include "jr.h"
+#include "error.h"
+
+#define SPRINTFCAT(str, format, param, max_alloc)		\
+{								\
+	char *tmp;						\
+								\
+	tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);	\
+	if (likely(tmp)) {					\
+		sprintf(tmp, format, param);			\
+		strcat(str, tmp);				\
+		kfree(tmp);					\
+	} else {						\
+		strcat(str, "kmalloc failure in SPRINTFCAT");	\
+	}							\
+}
+
+static void report_jump_idx(u32 status, char *outstr)
+{
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+
+	if (status & JRSTA_DECOERR_JUMP)
+		strcat(outstr, "jump tgt desc idx ");
+	else
+		strcat(outstr, "desc idx ");
+
+	SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
+}
+
+static void report_ccb_status(u32 status, char *outstr)
+{
+	char *cha_id_list[] = {
+		"",
+		"AES",
+		"DES, 3DES",
+		"ARC4",
+		"MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
+		"RNG",
+		"SNOW f8",
+		"Kasumi f8, f9",
+		"All Public Key Algorithms",
+		"CRC",
+		"SNOW f9",
+	};
+	char *err_id_list[] = {
+		"None. No error.",
+		"Mode error.",
+		"Data size error.",
+		"Key size error.",
+		"PKHA A memory size error.",
+		"PKHA B memory size error.",
+		"Data arrived out of sequence error.",
+		"PKHA divide-by-zero error.",
+		"PKHA modulus even error.",
+		"DES key parity error.",
+		"ICV check failed.",
+		"Hardware error.",
+		"Unsupported CCM AAD size.",
+		"Class 1 CHA is not reset",
+		"Invalid CHA combination was selected",
+		"Invalid CHA selected.",
+	};
+	u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+		    JRSTA_CCBERR_CHAID_SHIFT;
+	u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+	report_jump_idx(status, outstr);
+
+	if (cha_id < ARRAY_SIZE(cha_id_list)) {
+		SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
+			   strlen(cha_id_list[cha_id]));
+	} else {
+		SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
+			   cha_id, sizeof("ff"));
+	}
+
+	if (err_id < ARRAY_SIZE(err_id_list)) {
+		SPRINTFCAT(outstr, "%s", err_id_list[err_id],
+			   strlen(err_id_list[err_id]));
+	} else {
+		SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
+			   err_id, sizeof("ff"));
+	}
+}
+
+static void report_jump_status(u32 status, char *outstr)
+{
+	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+static void report_deco_status(u32 status, char *outstr)
+{
+	const struct {
+		u8 value;
+		char *error_text;
+	} desc_error_list[] = {
+		{ 0x00, "None. No error." },
+		{ 0x01, "SGT Length Error. The descriptor is trying to read "
+			"more data than is contained in the SGT table." },
+		{ 0x02, "Reserved." },
+		{ 0x03, "Job Ring Control Error. There is a bad value in the "
+			"Job Ring Control register." },
+		{ 0x04, "Invalid Descriptor Command. The Descriptor Command "
+			"field is invalid." },
+		{ 0x05, "Reserved." },
+		{ 0x06, "Invalid KEY Command" },
+		{ 0x07, "Invalid LOAD Command" },
+		{ 0x08, "Invalid STORE Command" },
+		{ 0x09, "Invalid OPERATION Command" },
+		{ 0x0A, "Invalid FIFO LOAD Command" },
+		{ 0x0B, "Invalid FIFO STORE Command" },
+		{ 0x0C, "Invalid MOVE Command" },
+		{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
+			"invalid because the target is not a Job Header "
+			"Command, or the jump is from a Trusted Descriptor to "
+			"a Job Descriptor, or because the target Descriptor "
+			"contains a Shared Descriptor." },
+		{ 0x0E, "Invalid MATH Command" },
+		{ 0x0F, "Invalid SIGNATURE Command" },
+		{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
+			"Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
+			"LOAD, or SEQ FIFO STORE decremented the input or "
+			"output sequence length below 0. This error may result "
+			"if a built-in PROTOCOL Command has encountered a "
+			"malformed PDU." },
+		{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+		{ 0x12, "Shared Descriptor Header Error" },
+		{ 0x13, "Header Error. Invalid length or parity, or certain "
+			"other problems." },
+		{ 0x14, "Burster Error. Burster has gotten to an illegal "
+			"state" },
+		{ 0x15, "Context Register Length Error. The descriptor is "
+			"trying to read or write past the end of the Context "
+			"Register. A SEQ LOAD or SEQ STORE with the VLF bit "
+			"set was executed with too large a length in the "
+			"variable length register (VSOL for SEQ STORE or VSIL "
+			"for SEQ LOAD)." },
+		{ 0x16, "DMA Error" },
+		{ 0x17, "Reserved." },
+		{ 0x1A, "Job failed due to JR reset" },
+		{ 0x1B, "Job failed due to Fail Mode" },
+		{ 0x1C, "DECO Watchdog timer timeout error" },
+		{ 0x1D, "DECO tried to copy a key from another DECO but the "
+			"other DECO's Key Registers were locked" },
+		{ 0x1E, "DECO attempted to copy data from a DECO that had an "
+			"unmasked Descriptor error" },
+		{ 0x1F, "LIODN error. DECO was trying to share from itself or "
+			"from another DECO but the two Non-SEQ LIODN values "
+			"didn't match or the 'shared from' DECO's Descriptor "
+			"required that the SEQ LIODNs be the same and they "
+			"aren't." },
+		{ 0x20, "DECO has completed a reset initiated via the DRR "
+			"register" },
+		{ 0x21, "Nonce error. When using EKT (CCM) key encryption "
+			"option in the FIFO STORE Command, the Nonce counter "
+			"reached its maximum value and this encryption mode "
+			"can no longer be used." },
+		{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
+			"(input frame; block ciphers) and IPsec decap (output "
+			"frame, when doing the next header byte update) and "
+			"DCRC (output frame)." },
+		{ 0x80, "DNR (do not run) error" },
+		{ 0x81, "undefined protocol command" },
+		{ 0x82, "invalid setting in PDB" },
+		{ 0x83, "Anti-replay LATE error" },
+		{ 0x84, "Anti-replay REPLAY error" },
+		{ 0x85, "Sequence number overflow" },
+		{ 0x86, "Sigver invalid signature" },
+		{ 0x87, "DSA Sign Illegal test descriptor" },
+		{ 0x88, "Protocol Format Error - A protocol has seen an error "
+			"in the format of data received. When running RSA, "
+			"this means that formatting with random padding was "
+			"used, and did not follow the form: 0x00, 0x02, 8-to-N "
+			"bytes of non-zero pad, 0x00, F data." },
+		{ 0x89, "Protocol Size Error - A protocol has seen an error in "
+			"size. When running RSA, pdb size N < (size of F) when "
+			"no formatting is used; or pdb size N < (F + 11) when "
+			"formatting is used." },
+		{ 0xC1, "Blob Command error: Undefined mode" },
+		{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+		{ 0xC4, "Blob Command error: Black Blob key or input size "
+			"error" },
+		{ 0xC5, "Blob Command error: Invalid key destination" },
+		{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
+		{ 0xF0, "IPsec TTL or hop limit field either came in as 0, "
+			"or was decremented to 0" },
+		{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+	};
+	u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
+	int i;
+
+	report_jump_idx(status, outstr);
+
+	for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+		if (desc_error_list[i].value == desc_error)
+			break;
+
+	if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
+		SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
+			   strlen(desc_error_list[i].error_text));
+	} else {
+		SPRINTFCAT(outstr, "unidentified error value 0x%02x",
+			   desc_error, sizeof("ff"));
+	}
+}
+
+static void report_jr_status(u32 status, char *outstr)
+{
+	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+static void report_cond_code_status(u32 status, char *outstr)
+{
+	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+}
+
+char *caam_jr_strstatus(char *outstr, u32 status)
+{
+	struct stat_src {
+		void (*report_ssed)(u32 status, char *outstr);
+		char *error;
+	} status_src[] = {
+		{ NULL, "No error" },
+		{ NULL, NULL },
+		{ report_ccb_status, "CCB" },
+		{ report_jump_status, "Jump" },
+		{ report_deco_status, "DECO" },
+		{ NULL, NULL },
+		{ report_jr_status, "Job Ring" },
+		{ report_cond_code_status, "Condition Code" },
+	};
+	u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+
+	sprintf(outstr, "%s: ", status_src[ssrc].error);
+
+	if (status_src[ssrc].report_ssed)
+		status_src[ssrc].report_ssed(status, outstr);
+
+	return outstr;
+}
+EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.h
new file mode 100644
index 0000000..02c7baa
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/error.h
@@ -0,0 +1,11 @@
+/*
+ * CAAM Error Reporting code header
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_ERROR_H
+#define CAAM_ERROR_H
+#define CAAM_ERROR_STR_MAX 302
+extern char *caam_jr_strstatus(char *outstr, u32 status);
+#endif /* CAAM_ERROR_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/intern.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/intern.h
new file mode 100644
index 0000000..a34be01
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/intern.h
@@ -0,0 +1,113 @@
+/*
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef INTERN_H
+#define INTERN_H
+
+#define JOBR_UNASSIGNED 0
+#define JOBR_ASSIGNED 1
+
+/* Currently comes from Kconfig param as a ^2 (driver-required) */
+#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+/* Kconfig params for interrupt coalescing if selected (else zero) */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
+#define JOBR_INTC JRCFG_ICEN
+#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+#else
+#define JOBR_INTC 0
+#define JOBR_INTC_TIME_THLD 0
+#define JOBR_INTC_COUNT_THLD 0
+#endif
+
+/*
+ * Storage for tracking each in-process entry moving across a ring
+ * Each entry on an output ring needs one of these
+ */
+struct caam_jrentry_info {
+	void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
+	void *cbkarg;	/* Argument per ring entry */
+	u32 *desc_addr_virt;	/* Stored virt addr for postprocessing */
+	dma_addr_t desc_addr_dma;	/* Stored bus addr for done matching */
+	u32 desc_size;	/* Stored size for postprocessing, header derived */
+};
+
+/* Private sub-storage for a single JobR */
+struct caam_drv_private_jr {
+	struct device *parentdev;	/* points back to controller dev */
+	int ridx;
+	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask[NR_CPUS];
+	int irq;			/* One per queue */
+	int assign;			/* busy/free */
+
+	/* Job ring info */
+	int ringsize;	/* Size of rings (assume input = output) */
+	struct caam_jrentry_info *entinfo;	/* Alloc'ed 1 per ring entry */
+	spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
+	int inp_ring_write_index;	/* Input index "tail" */
+	int head;			/* entinfo (s/w ring) head index */
+	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
+	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+	int out_ring_read_index;	/* Output index "tail" */
+	int tail;			/* entinfo (s/w ring) tail index */
+	struct jr_outentry *outring;	/* Base of output ring, DMA-safe */
+};
+
+/*
+ * Driver-private storage for a single CAAM block instance
+ */
+struct caam_drv_private {
+
+	struct device *dev;
+	struct device **jrdev; /* Alloc'ed array per sub-device */
+	spinlock_t jr_alloc_lock;
+	struct platform_device *pdev;
+
+	/* Physical-presence section */
+	struct caam_ctrl *ctrl; /* controller region */
+	struct caam_deco **deco; /* DECO/CCB views */
+	struct caam_assurance *ac;
+	struct caam_queue_if *qi; /* QI control region */
+
+	/*
+	 * Detected geometry block. Filled in from device tree if powerpc,
+	 * or from register-based version detection code
+	 */
+	u8 total_jobrs;		/* Total Job Rings in device */
+	u8 qi_present;		/* Nonzero if QI present in device */
+	int secvio_irq;		/* Security violation interrupt number */
+
+	/* which jr allocated to scatterlist crypto */
+	atomic_t tfm_count ____cacheline_aligned;
+	int num_jrs_for_algapi;
+	struct device **algapi_jr;
+	/* list of registered crypto algorithms (mk generic context handle?) */
+	struct list_head alg_list;
+
+	/*
+	 * debugfs entries for developer view into driver/device
+	 * variables at runtime.
+	 */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dfs_root;
+	struct dentry *ctl; /* controller dir */
+	struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
+	struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
+	struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
+	struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
+
+	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+	struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#endif
+};
+
+void caam_jr_algapi_init(struct device *dev);
+void caam_jr_algapi_remove(struct device *dev);
+#endif /* INTERN_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.c b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.c
new file mode 100644
index 0000000..340fa32
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.c
@@ -0,0 +1,517 @@
+/*
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+
+/* Main per-ring interrupt handler */
+static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+{
+	struct device *dev = st_dev;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	u32 irqstate;
+
+	/*
+	 * Check the output ring for ready responses, kick
+	 * tasklet if jobs done.
+	 */
+	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+	if (!irqstate)
+		return IRQ_NONE;
+
+	/*
+	 * If JobR error, we got more development work to do
+	 * Flag a bug now, but we really need to shut down and
+	 * restart the queue (and fix code).
+	 */
+	if (irqstate & JRINT_JR_ERROR) {
+		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
+		BUG();
+	}
+
+	/* mask valid interrupts */
+	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+	/* Have valid interrupt at this point, just ACK and trigger */
+	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
+	preempt_enable();
+
+	return IRQ_HANDLED;
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+	int hw_idx, sw_idx, i, head, tail;
+	struct device *dev = (struct device *)devarg;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+	u32 *userdesc, userstatus;
+	void *userarg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&jrp->outlock, flags);
+
+	head = ACCESS_ONCE(jrp->head);
+	sw_idx = tail = jrp->tail;
+
+	while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+	       rd_reg32(&jrp->rregs->outring_used)) {
+
+		hw_idx = jrp->out_ring_read_index;
+		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
+			smp_read_barrier_depends();
+
+			if (jrp->outring[hw_idx].desc ==
+			    jrp->entinfo[sw_idx].desc_addr_dma)
+				break; /* found */
+		}
+		/* we should never fail to find a matching descriptor */
+		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
+
+		/* Unmap just-run descriptor so we can post-process */
+		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+				 jrp->entinfo[sw_idx].desc_size,
+				 DMA_TO_DEVICE);
+
+		/* mark completed, avoid matching on a recycled desc addr */
+		jrp->entinfo[sw_idx].desc_addr_dma = 0;
+
+		/* Stash callback params for use outside of lock */
+		usercall = jrp->entinfo[sw_idx].callbk;
+		userarg = jrp->entinfo[sw_idx].cbkarg;
+		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+		userstatus = jrp->outring[hw_idx].jrstatus;
+
+		smp_mb();
+
+		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+					   (JOBR_DEPTH - 1);
+
+		/*
+		 * if this job completed out-of-order, do not increment
+		 * the tail.  Otherwise, increment tail by 1 plus the
+		 * number of subsequent jobs already completed out-of-order
+		 */
+		if (sw_idx == tail) {
+			do {
+				tail = (tail + 1) & (JOBR_DEPTH - 1);
+				smp_read_barrier_depends();
+			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+				 jrp->entinfo[tail].desc_addr_dma == 0);
+
+			jrp->tail = tail;
+		}
+
+		/* set done */
+		wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+		spin_unlock_irqrestore(&jrp->outlock, flags);
+
+		/* Finally, execute user's callback */
+		usercall(dev, userdesc, userstatus, userarg);
+
+		spin_lock_irqsave(&jrp->outlock, flags);
+
+		head = ACCESS_ONCE(jrp->head);
+		sw_idx = tail = jrp->tail;
+	}
+
+	spin_unlock_irqrestore(&jrp->outlock, flags);
+
+	/* reenable / unmask IRQs */
+	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+}
+
+/**
+ * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
+ * an ordinal of the rings allocated, else returns -ENODEV if no rings
+ * are available.
+ * @ctrldev: points to the controller level dev (parent) that
+ *           owns rings available for use.
+ * @dev:     points to where a pointer to the newly allocated queue's
+ *           dev can be written to if successful.
+ **/
+int caam_jr_register(struct device *ctrldev, struct device **rdev)
+{
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_drv_private_jr *jrpriv = NULL;
+	unsigned long flags;
+	int ring;
+
+	/* Lock, if free ring - assign, unlock */
+	spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+	for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+		jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
+		if (jrpriv->assign == JOBR_UNASSIGNED) {
+			jrpriv->assign = JOBR_ASSIGNED;
+			*rdev = ctrlpriv->jrdev[ring];
+			spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+			return ring;
+		}
+	}
+
+	/* If assigned, write dev where caller needs it */
+	spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+	*rdev = NULL;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(caam_jr_register);
+
+/**
+ * caam_jr_deregister() - Deregister an API and release the queue.
+ * Returns 0 if OK, -EBUSY if queue still contains pending entries
+ * or unprocessed results at the time of the call
+ * @dev     - points to the dev that identifies the queue to
+ *            be released.
+ **/
+int caam_jr_deregister(struct device *rdev)
+{
+	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+	struct caam_drv_private *ctrlpriv;
+	unsigned long flags;
+
+	/* Get the owning controller's private space */
+	ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
+
+	/*
+	 * Make sure ring empty before release
+	 */
+	if (rd_reg32(&jrpriv->rregs->outring_used) ||
+	    (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
+		return -EBUSY;
+
+	/* Release ring */
+	spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+	jrpriv->assign = JOBR_UNASSIGNED;
+	spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_jr_deregister);
+
+/**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+ * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * descriptor.
+ * @dev:  device of the job ring to be used. This device should have
+ *        been assigned prior by caam_jr_register().
+ * @desc: points to a job descriptor that execute our request. All
+ *        descriptors (and all referenced data) must be in a DMAable
+ *        region, and all data references must be physical addresses
+ *        accessible to CAAM (i.e. within a PAMU window granted
+ *        to it).
+ * @cbk:  pointer to a callback function to be invoked upon completion
+ *        of this request. This has the form:
+ *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
+ *        where:
+ *        @dev:    contains the job ring device that processed this
+ *                 response.
+ *        @desc:   descriptor that initiated the request, same as
+ *                 "desc" being argued to caam_jr_enqueue().
+ *        @status: untranslated status received from CAAM. See the
+ *                 reference manual for a detailed description of
+ *                 error meaning, or see the JRSTA definitions in the
+ *                 register header file
+ *        @areq:   optional pointer to an argument passed with the
+ *                 original request
+ * @areq: optional pointer to a user argument for use at callback
+ *        time.
+ **/
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc,
+				u32 status, void *areq),
+		    void *areq)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	struct caam_jrentry_info *head_entry;
+	unsigned long flags;
+	int head, tail, desc_size;
+	dma_addr_t desc_dma;
+
+	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, desc_dma)) {
+		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
+		return -EIO;
+	}
+
+	spin_lock_irqsave(&jrp->inplock, flags);
+
+	head = jrp->head;
+	tail = ACCESS_ONCE(jrp->tail);
+
+	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
+		spin_unlock_irqrestore(&jrp->inplock, flags);
+		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+		return -EBUSY;
+	}
+
+	head_entry = &jrp->entinfo[head];
+	head_entry->desc_addr_virt = desc;
+	head_entry->desc_size = desc_size;
+	head_entry->callbk = (void *)cbk;
+	head_entry->cbkarg = areq;
+	head_entry->desc_addr_dma = desc_dma;
+
+	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+
+	smp_wmb();
+
+	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+				    (JOBR_DEPTH - 1);
+	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
+	wmb();
+
+	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+	spin_unlock_irqrestore(&jrp->inplock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_jr_enqueue);
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	unsigned int timeout = 100000;
+
+	/*
+	 * mask interrupts since we are going to poll
+	 * for reset completion status
+	 */
+	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+	/* initiate flush (required prior to reset) */
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+		JRINT_ERR_HALT_INPROGRESS) && --timeout)
+		cpu_relax();
+
+	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* initiate reset */
+	timeout = 100000;
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* unmask interrupts */
+	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+	return 0;
+}
+
+/*
+ * Init JobR independent of platform property detection
+ */
+static int caam_jr_init(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp;
+	dma_addr_t inpbusaddr, outbusaddr;
+	int i, error;
+
+	jrp = dev_get_drvdata(dev);
+
+	/* Connect job ring interrupt handler. */
+	for_each_possible_cpu(i)
+		tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
+			     (unsigned long)dev);
+
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    "caam-jobr", dev);
+	if (error) {
+		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+			jrp->ridx, jrp->irq);
+		irq_dispose_mapping(jrp->irq);
+		jrp->irq = 0;
+		return -EINVAL;
+	}
+
+	error = caam_reset_hw_jr(dev);
+	if (error)
+		return error;
+
+	jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
+			       GFP_KERNEL | GFP_DMA);
+	jrp->outring = kzalloc(sizeof(struct jr_outentry) *
+			       JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
+
+	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
+			       GFP_KERNEL);
+
+	if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
+	    (jrp->entinfo == NULL)) {
+		dev_err(dev, "can't allocate job rings for %d\n",
+			jrp->ridx);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < JOBR_DEPTH; i++)
+		jrp->entinfo[i].desc_addr_dma = !0;
+
+	/* Setup rings */
+	inpbusaddr = dma_map_single(dev, jrp->inpring,
+				    sizeof(u32 *) * JOBR_DEPTH,
+				    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, inpbusaddr)) {
+		dev_err(dev, "caam_jr_init(): can't map input ring\n");
+		kfree(jrp->inpring);
+		kfree(jrp->outring);
+		kfree(jrp->entinfo);
+		return -EIO;
+	}
+
+	outbusaddr = dma_map_single(dev, jrp->outring,
+				    sizeof(struct jr_outentry) * JOBR_DEPTH,
+				    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, outbusaddr)) {
+		dev_err(dev, "caam_jr_init(): can't map output ring\n");
+			dma_unmap_single(dev, inpbusaddr,
+					 sizeof(u32 *) * JOBR_DEPTH,
+					 DMA_BIDIRECTIONAL);
+		kfree(jrp->inpring);
+		kfree(jrp->outring);
+		kfree(jrp->entinfo);
+		return -EIO;
+	}
+
+	jrp->inp_ring_write_index = 0;
+	jrp->out_ring_read_index = 0;
+	jrp->head = 0;
+	jrp->tail = 0;
+
+	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+	jrp->ringsize = JOBR_DEPTH;
+
+	spin_lock_init(&jrp->inplock);
+	spin_lock_init(&jrp->outlock);
+
+	/* Select interrupt coalescing parameters */
+	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
+		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
+	jrp->assign = JOBR_UNASSIGNED;
+	return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+int caam_jr_shutdown(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	dma_addr_t inpbusaddr, outbusaddr;
+	int ret, i;
+
+	ret = caam_reset_hw_jr(dev);
+
+	for_each_possible_cpu(i)
+		tasklet_kill(&jrp->irqtask[i]);
+
+	/* Release interrupt */
+	free_irq(jrp->irq, dev);
+
+	/* Free rings */
+	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+	dma_unmap_single(dev, outbusaddr,
+			 sizeof(struct jr_outentry) * JOBR_DEPTH,
+			 DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
+			 DMA_BIDIRECTIONAL);
+	kfree(jrp->outring);
+	kfree(jrp->inpring);
+	kfree(jrp->entinfo);
+
+	return ret;
+}
+
+/*
+ * Probe routine for each detected JobR subsystem. It assumes that
+ * property detection was picked up externally.
+ */
+int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
+		  int ring)
+{
+	struct device *ctrldev, *jrdev;
+	struct platform_device *jr_pdev;
+	struct caam_drv_private *ctrlpriv;
+	struct caam_drv_private_jr *jrpriv;
+	u32 *jroffset;
+	int error;
+
+	ctrldev = &pdev->dev;
+	ctrlpriv = dev_get_drvdata(ctrldev);
+
+	jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
+			 GFP_KERNEL);
+	if (jrpriv == NULL) {
+		dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
+			ring);
+		return -ENOMEM;
+	}
+	jrpriv->parentdev = ctrldev; /* point back to parent */
+	jrpriv->ridx = ring; /* save ring identity relative to detection */
+
+	/*
+	 * Derive a pointer to the detected JobRs regs
+	 * Driver has already iomapped the entire space, we just
+	 * need to add in the offset to this JobR. Don't know if I
+	 * like this long-term, but it'll run
+	 */
+	jroffset = (u32 *)of_get_property(np, "reg", NULL);
+	jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
+							 + *jroffset);
+
+	/* Build a local dev for each detected queue */
+	jr_pdev = of_platform_device_create(np, NULL, ctrldev);
+	if (jr_pdev == NULL) {
+		kfree(jrpriv);
+		return -EINVAL;
+	}
+	jrdev = &jr_pdev->dev;
+	dev_set_drvdata(jrdev, jrpriv);
+	ctrlpriv->jrdev[ring] = jrdev;
+
+	/* Identify the interrupt */
+	jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+
+	/* Now do the platform independent part */
+	error = caam_jr_init(jrdev); /* now turn on hardware */
+	if (error) {
+		kfree(jrpriv);
+		return error;
+	}
+
+	return error;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.h
new file mode 100644
index 0000000..c23df39
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/jr.h
@@ -0,0 +1,21 @@
+/*
+ * CAAM public-level include definitions for the JobR backend
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef JR_H
+#define JR_H
+
+/* Prototypes for backend-level services exposed to APIs */
+int caam_jr_register(struct device *ctrldev, struct device **rdev);
+int caam_jr_deregister(struct device *rdev);
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc, u32 status,
+				void *areq),
+		    void *areq);
+
+extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
+			 int ring);
+extern int caam_jr_shutdown(struct device *dev);
+#endif /* JR_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/crypto/caam/regs.h b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/regs.h
new file mode 100644
index 0000000..e9f7a70
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/crypto/caam/regs.h
@@ -0,0 +1,662 @@
+/*
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef REGS_H
+#define REGS_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Architecture-specific register access methods
+ *
+ * CAAM's bus-addressable registers are 64 bits internally.
+ * They have been wired to be safely accessible on 32-bit
+ * architectures, however. Registers were organized such
+ * that (a) they can be contained in 32 bits, (b) if not, then they
+ * can be treated as two 32-bit entities, or finally (c) if they
+ * must be treated as a single 64-bit value, then this can safely
+ * be done with two 32-bit cycles.
+ *
+ * For 32-bit operations on 64-bit values, CAAM follows the same
+ * 64-bit register access conventions as it's predecessors, in that
+ * writes are "triggered" by a write to the register at the numerically
+ * higher address, thus, a full 64-bit write cycle requires a write
+ * to the lower address, followed by a write to the higher address,
+ * which will latch/execute the write cycle.
+ *
+ * For example, let's assume a SW reset of CAAM through the master
+ * configuration register.
+ * - SWRST is in bit 31 of MCFG.
+ * - MCFG begins at base+0x0000.
+ * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
+ * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
+ *
+ * (and on Power, the convention is 0-31, 32-63, I know...)
+ *
+ * Assuming a 64-bit write to this MCFG to perform a software reset
+ * would then require a write of 0 to base+0x0000, followed by a
+ * write of 0x80000000 to base+0x0004, which would "execute" the
+ * reset.
+ *
+ * Of course, since MCFG 63-32 is all zero, we could cheat and simply
+ * write 0x8000000 to base+0x0004, and the reset would work fine.
+ * However, since CAAM does contain some write-and-read-intended
+ * 64-bit registers, this code defines 64-bit access methods for
+ * the sake of internal consistency and simplicity, and so that a
+ * clean transition to 64-bit is possible when it becomes necessary.
+ *
+ * There are limitations to this that the developer must recognize.
+ * 32-bit architectures cannot enforce an atomic-64 operation,
+ * Therefore:
+ *
+ * - On writes, since the HW is assumed to latch the cycle on the
+ *   write of the higher-numeric-address word, then ordered
+ *   writes work OK.
+ *
+ * - For reads, where a register contains a relevant value of more
+ *   that 32 bits, the hardware employs logic to latch the other
+ *   "half" of the data until read, ensuring an accurate value.
+ *   This is of particular relevance when dealing with CAAM's
+ *   performance counters.
+ *
+ */
+
+#ifdef __BIG_ENDIAN
+#define wr_reg32(reg, data) out_be32(reg, data)
+#define rd_reg32(reg) in_be32(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) out_be64(reg, data)
+#define rd_reg64(reg) in_be64(reg)
+#endif
+#else
+#ifdef __LITTLE_ENDIAN
+#define wr_reg32(reg, data) __raw_writel(reg, data)
+#define rd_reg32(reg) __raw_readl(reg)
+#ifdef CONFIG_64BIT
+#define wr_reg64(reg, data) __raw_writeq(reg, data)
+#define rd_reg64(reg) __raw_readq(reg)
+#endif
+#endif
+#endif
+
+#ifndef CONFIG_64BIT
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+	wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
+	wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+	return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
+		((u64)rd_reg32((u32 __iomem *)reg + 1));
+}
+#endif
+
+/*
+ * jr_outentry
+ * Represents each entry in a JobR output ring
+ */
+struct jr_outentry {
+	dma_addr_t desc;/* Pointer to completed descriptor */
+	u32 jrstatus;	/* Status for completed descriptor */
+} __packed;
+
+/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ *                CAAM Global Status/Component Version IDs
+ *
+ * Spans f00-fff wherever instantiated
+ */
+
+/* Number of DECOs */
+#define CHA_NUM_DECONUM_SHIFT	56
+#define CHA_NUM_DECONUM_MASK	(0xfull << CHA_NUM_DECONUM_SHIFT)
+
+struct caam_perfmon {
+	/* Performance Monitor Registers			f00-f9f */
+	u64 req_dequeued;	/* PC_REQ_DEQ - Dequeued Requests	     */
+	u64 ob_enc_req;	/* PC_OB_ENC_REQ - Outbound Encrypt Requests */
+	u64 ib_dec_req;	/* PC_IB_DEC_REQ - Inbound Decrypt Requests  */
+	u64 ob_enc_bytes;	/* PC_OB_ENCRYPT - Outbound Bytes Encrypted  */
+	u64 ob_prot_bytes;	/* PC_OB_PROTECT - Outbound Bytes Protected  */
+	u64 ib_dec_bytes;	/* PC_IB_DECRYPT - Inbound Bytes Decrypted   */
+	u64 ib_valid_bytes;	/* PC_IB_VALIDATED Inbound Bytes Validated   */
+	u64 rsvd[13];
+
+	/* CAAM Hardware Instantiation Parameters		fa0-fbf */
+	u64 cha_rev;		/* CRNR - CHA Revision Number		*/
+#define CTPR_QI_SHIFT		57
+#define CTPR_QI_MASK		(0x1ull << CTPR_QI_SHIFT)
+	u64 comp_parms;	/* CTPR - Compile Parameters Register	*/
+	u64 rsvd1[2];
+
+	/* CAAM Global Status					fc0-fdf */
+	u64 faultaddr;	/* FAR  - Fault Address		*/
+	u32 faultliodn;	/* FALR - Fault Address LIODN	*/
+	u32 faultdetail;	/* FADR - Fault Addr Detail	*/
+	u32 rsvd2;
+	u32 status;		/* CSTA - CAAM Status */
+	u64 rsvd3;
+
+	/* Component Instantiation Parameters			fe0-fff */
+	u32 rtic_id;		/* RVID - RTIC Version ID	*/
+	u32 ccb_id;		/* CCBVID - CCB Version ID	*/
+	u64 cha_id;		/* CHAVID - CHA Version ID	*/
+	u64 cha_num;		/* CHANUM - CHA Number		*/
+	u64 caam_id;		/* CAAMVID - CAAM Version ID	*/
+};
+
+/* LIODN programming for DMA configuration */
+#define MSTRID_LOCK_LIODN	0x80000000
+#define MSTRID_LOCK_MAKETRUSTED	0x00010000	/* only for JR masterid */
+
+#define MSTRID_LIODN_MASK	0x0fff
+struct masterid {
+	u32 liodn_ms;	/* lock and make-trusted control bits */
+	u32 liodn_ls;	/* LIODN for non-sequence and seq access */
+};
+
+/* Partition ID for DMA configuration */
+struct partid {
+	u32 rsvd1;
+	u32 pidr;	/* partition ID, DECO */
+};
+
+/* RNG test mode (replicated twice in some configurations) */
+/* Padded out to 0x100 */
+struct rngtst {
+	u32 mode;		/* RTSTMODEx - Test mode */
+	u32 rsvd1[3];
+	u32 reset;		/* RTSTRESETx - Test reset control */
+	u32 rsvd2[3];
+	u32 status;		/* RTSTSSTATUSx - Test status */
+	u32 rsvd3;
+	u32 errstat;		/* RTSTERRSTATx - Test error status */
+	u32 rsvd4;
+	u32 errctl;		/* RTSTERRCTLx - Test error control */
+	u32 rsvd5;
+	u32 entropy;		/* RTSTENTROPYx - Test entropy */
+	u32 rsvd6[15];
+	u32 verifctl;	/* RTSTVERIFCTLx - Test verification control */
+	u32 rsvd7;
+	u32 verifstat;	/* RTSTVERIFSTATx - Test verification status */
+	u32 rsvd8;
+	u32 verifdata;	/* RTSTVERIFDx - Test verification data */
+	u32 rsvd9;
+	u32 xkey;		/* RTSTXKEYx - Test XKEY */
+	u32 rsvd10;
+	u32 oscctctl;	/* RTSTOSCCTCTLx - Test osc. counter control */
+	u32 rsvd11;
+	u32 oscct;		/* RTSTOSCCTx - Test oscillator counter */
+	u32 rsvd12;
+	u32 oscctstat;	/* RTSTODCCTSTATx - Test osc counter status */
+	u32 rsvd13[2];
+	u32 ofifo[4];	/* RTSTOFIFOx - Test output FIFO */
+	u32 rsvd14[15];
+};
+
+/*
+ * caam_ctrl - basic core configuration
+ * starts base + 0x0000 padded out to 0x1000
+ */
+
+#define KEK_KEY_SIZE		8
+#define TKEK_KEY_SIZE		8
+#define TDSK_KEY_SIZE		8
+
+#define DECO_RESET	1	/* Use with DECO reset/availability regs */
+#define DECO_RESET_0	(DECO_RESET << 0)
+#define DECO_RESET_1	(DECO_RESET << 1)
+#define DECO_RESET_2	(DECO_RESET << 2)
+#define DECO_RESET_3	(DECO_RESET << 3)
+#define DECO_RESET_4	(DECO_RESET << 4)
+
+struct caam_ctrl {
+	/* Basic Configuration Section				000-01f */
+	/* Read/Writable					        */
+	u32 rsvd1;
+	u32 mcr;		/* MCFG      Master Config Register  */
+	u32 rsvd2[2];
+
+	/* Bus Access Configuration Section			010-11f */
+	/* Read/Writable                                                */
+	struct masterid jr_mid[4];	/* JRxLIODNR - JobR LIODN setup */
+	u32 rsvd3[12];
+	struct masterid rtic_mid[4];	/* RTICxLIODNR - RTIC LIODN setup */
+	u32 rsvd4[7];
+	u32 deco_rq;			/* DECORR - DECO Request */
+	struct partid deco_mid[5];	/* DECOxLIODNR - 1 per DECO */
+	u32 rsvd5[22];
+
+	/* DECO Availability/Reset Section			120-3ff */
+	u32 deco_avail;		/* DAR - DECO availability */
+	u32 deco_reset;		/* DRR - DECO reset */
+	u32 rsvd6[182];
+
+	/* Key Encryption/Decryption Configuration              400-5ff */
+	/* Read/Writable only while in Non-secure mode                  */
+	u32 kek[KEK_KEY_SIZE];	/* JDKEKR - Key Encryption Key */
+	u32 tkek[TKEK_KEY_SIZE];	/* TDKEKR - Trusted Desc KEK */
+	u32 tdsk[TDSK_KEY_SIZE];	/* TDSKR - Trusted Desc Signing Key */
+	u32 rsvd7[32];
+	u64 sknonce;			/* SKNR - Secure Key Nonce */
+	u32 rsvd8[70];
+
+	/* RNG Test/Verification/Debug Access                   600-7ff */
+	/* (Useful in Test/Debug modes only...)                         */
+	struct rngtst rtst[2];
+
+	u32 rsvd9[448];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+/*
+ * Controller master config register defs
+ */
+#define MCFGR_SWRESET		0x80000000 /* software reset */
+#define MCFGR_WDENABLE		0x40000000 /* DECO watchdog enable */
+#define MCFGR_WDFAIL		0x20000000 /* DECO watchdog force-fail */
+#define MCFGR_DMA_RESET		0x10000000
+#define MCFGR_LONG_PTR		0x00010000 /* Use >32-bit desc addressing */
+
+/* AXI read cache control */
+#define MCFGR_ARCACHE_SHIFT	12
+#define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
+
+/* AXI write cache control */
+#define MCFGR_AWCACHE_SHIFT	8
+#define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
+
+/* AXI pipeline depth */
+#define MCFGR_AXIPIPE_SHIFT	4
+#define MCFGR_AXIPIPE_MASK	(0xf << MCFGR_AXIPIPE_SHIFT)
+
+#define MCFGR_AXIPRI		0x00000008 /* Assert AXI priority sideband */
+#define MCFGR_BURST_64		0x00000001 /* Max burst size */
+
+/*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+ * Padded out to 0x1000
+ */
+struct caam_job_ring {
+	/* Input ring */
+	u64 inpring_base;	/* IRBAx -  Input desc ring baseaddr */
+	u32 rsvd1;
+	u32 inpring_size;	/* IRSx - Input ring size */
+	u32 rsvd2;
+	u32 inpring_avail;	/* IRSAx - Input ring room remaining */
+	u32 rsvd3;
+	u32 inpring_jobadd;	/* IRJAx - Input ring jobs added */
+
+	/* Output Ring */
+	u64 outring_base;	/* ORBAx - Output status ring base addr */
+	u32 rsvd4;
+	u32 outring_size;	/* ORSx - Output ring size */
+	u32 rsvd5;
+	u32 outring_rmvd;	/* ORJRx - Output ring jobs removed */
+	u32 rsvd6;
+	u32 outring_used;	/* ORSFx - Output ring slots full */
+
+	/* Status/Configuration */
+	u32 rsvd7;
+	u32 jroutstatus;	/* JRSTAx - JobR output status */
+	u32 rsvd8;
+	u32 jrintstatus;	/* JRINTx - JobR interrupt status */
+	u32 rconfig_hi;	/* JRxCFG - Ring configuration */
+	u32 rconfig_lo;
+
+	/* Indices. CAAM maintains as "heads" of each queue */
+	u32 rsvd9;
+	u32 inp_rdidx;	/* IRRIx - Input ring read index */
+	u32 rsvd10;
+	u32 out_wtidx;	/* ORWIx - Output ring write index */
+
+	/* Command/control */
+	u32 rsvd11;
+	u32 jrcommand;	/* JRCRx - JobR command */
+
+	u32 rsvd12[932];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+#define JR_RINGSIZE_MASK	0x03ff
+/*
+ * jrstatus - Job Ring Output Status
+ * All values in lo word
+ * Also note, same values written out as status through QI
+ * in the command/status field of a frame descriptor
+ */
+#define JRSTA_SSRC_SHIFT            28
+#define JRSTA_SSRC_MASK             0xf0000000
+
+#define JRSTA_SSRC_NONE             0x00000000
+#define JRSTA_SSRC_CCB_ERROR        0x20000000
+#define JRSTA_SSRC_JUMP_HALT_USER   0x30000000
+#define JRSTA_SSRC_DECO             0x40000000
+#define JRSTA_SSRC_JRERROR          0x60000000
+#define JRSTA_SSRC_JUMP_HALT_CC     0x70000000
+
+#define JRSTA_DECOERR_JUMP          0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT   8
+#define JRSTA_DECOERR_INDEX_MASK    0xff00
+#define JRSTA_DECOERR_ERROR_MASK    0x00ff
+
+#define JRSTA_DECOERR_NONE          0x00
+#define JRSTA_DECOERR_LINKLEN       0x01
+#define JRSTA_DECOERR_LINKPTR       0x02
+#define JRSTA_DECOERR_JRCTRL        0x03
+#define JRSTA_DECOERR_DESCCMD       0x04
+#define JRSTA_DECOERR_ORDER         0x05
+#define JRSTA_DECOERR_KEYCMD        0x06
+#define JRSTA_DECOERR_LOADCMD       0x07
+#define JRSTA_DECOERR_STORECMD      0x08
+#define JRSTA_DECOERR_OPCMD         0x09
+#define JRSTA_DECOERR_FIFOLDCMD     0x0a
+#define JRSTA_DECOERR_FIFOSTCMD     0x0b
+#define JRSTA_DECOERR_MOVECMD       0x0c
+#define JRSTA_DECOERR_JUMPCMD       0x0d
+#define JRSTA_DECOERR_MATHCMD       0x0e
+#define JRSTA_DECOERR_SHASHCMD      0x0f
+#define JRSTA_DECOERR_SEQCMD        0x10
+#define JRSTA_DECOERR_DECOINTERNAL  0x11
+#define JRSTA_DECOERR_SHDESCHDR     0x12
+#define JRSTA_DECOERR_HDRLEN        0x13
+#define JRSTA_DECOERR_BURSTER       0x14
+#define JRSTA_DECOERR_DESCSIGNATURE 0x15
+#define JRSTA_DECOERR_DMA           0x16
+#define JRSTA_DECOERR_BURSTFIFO     0x17
+#define JRSTA_DECOERR_JRRESET       0x1a
+#define JRSTA_DECOERR_JOBFAIL       0x1b
+#define JRSTA_DECOERR_DNRERR        0x80
+#define JRSTA_DECOERR_UNDEFPCL      0x81
+#define JRSTA_DECOERR_PDBERR        0x82
+#define JRSTA_DECOERR_ANRPLY_LATE   0x83
+#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
+#define JRSTA_DECOERR_SEQOVF        0x85
+#define JRSTA_DECOERR_INVSIGN       0x86
+#define JRSTA_DECOERR_DSASIGN       0x87
+
+#define JRSTA_CCBERR_JUMP           0x08000000
+#define JRSTA_CCBERR_INDEX_MASK     0xff00
+#define JRSTA_CCBERR_INDEX_SHIFT    8
+#define JRSTA_CCBERR_CHAID_MASK     0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT    4
+#define JRSTA_CCBERR_ERRID_MASK     0x000f
+
+#define JRSTA_CCBERR_CHAID_AES      (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_DES      (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_ARC4     (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_MD       (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_RNG      (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_SNOW     (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_KASUMI   (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_PK       (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_CRC      (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_CCBERR_ERRID_NONE     0x00
+#define JRSTA_CCBERR_ERRID_MODE     0x01
+#define JRSTA_CCBERR_ERRID_DATASIZ  0x02
+#define JRSTA_CCBERR_ERRID_KEYSIZ   0x03
+#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
+#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
+#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
+#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
+#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
+#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
+#define JRSTA_CCBERR_ERRID_ICVCHK   0x0a
+#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
+#define JRSTA_CCBERR_ERRID_CCMAAD   0x0c
+#define JRSTA_CCBERR_ERRID_INVCHA   0x0f
+
+#define JRINT_ERR_INDEX_MASK        0x3fff0000
+#define JRINT_ERR_INDEX_SHIFT       16
+#define JRINT_ERR_TYPE_MASK         0xf00
+#define JRINT_ERR_TYPE_SHIFT        8
+#define JRINT_ERR_HALT_MASK         0xc
+#define JRINT_ERR_HALT_SHIFT        2
+#define JRINT_ERR_HALT_INPROGRESS   0x4
+#define JRINT_ERR_HALT_COMPLETE     0x8
+#define JRINT_JR_ERROR              0x02
+#define JRINT_JR_INT                0x01
+
+#define JRINT_ERR_TYPE_WRITE        1
+#define JRINT_ERR_TYPE_BAD_INPADDR  3
+#define JRINT_ERR_TYPE_BAD_OUTADDR  4
+#define JRINT_ERR_TYPE_INV_INPWRT   5
+#define JRINT_ERR_TYPE_INV_OUTWRT   6
+#define JRINT_ERR_TYPE_RESET        7
+#define JRINT_ERR_TYPE_REMOVE_OFL   8
+#define JRINT_ERR_TYPE_ADD_OFL      9
+
+#define JRCFG_SOE		0x04
+#define JRCFG_ICEN		0x02
+#define JRCFG_IMSK		0x01
+#define JRCFG_ICDCT_SHIFT	8
+#define JRCFG_ICTT_SHIFT	16
+
+#define JRCR_RESET                  0x01
+
+/*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+ */
+
+struct rtic_element {
+	u64 address;
+	u32 rsvd;
+	u32 length;
+};
+
+struct rtic_block {
+	struct rtic_element element[2];
+};
+
+struct rtic_memhash {
+	u32 memhash_be[32];
+	u32 memhash_le[32];
+};
+
+struct caam_assurance {
+    /* Status/Command/Watchdog */
+	u32 rsvd1;
+	u32 status;		/* RSTA - Status */
+	u32 rsvd2;
+	u32 cmd;		/* RCMD - Command */
+	u32 rsvd3;
+	u32 ctrl;		/* RCTL - Control */
+	u32 rsvd4;
+	u32 throttle;	/* RTHR - Throttle */
+	u32 rsvd5[2];
+	u64 watchdog;	/* RWDOG - Watchdog Timer */
+	u32 rsvd6;
+	u32 rend;		/* REND - Endian corrections */
+	u32 rsvd7[50];
+
+	/* Block access/configuration @ 100/110/120/130 */
+	struct rtic_block memblk[4];	/* Memory Blocks A-D */
+	u32 rsvd8[32];
+
+	/* Block hashes @ 200/300/400/500 */
+	struct rtic_memhash hash[4];	/* Block hash values A-D */
+	u32 rsvd_3[640];
+};
+
+/*
+ * caam_queue_if - QI configuration and control
+ * starts base + 0x7000, padded out to 0x1000 long
+ */
+
+struct caam_queue_if {
+	u32 qi_control_hi;	/* QICTL  - QI Control */
+	u32 qi_control_lo;
+	u32 rsvd1;
+	u32 qi_status;	/* QISTA  - QI Status */
+	u32 qi_deq_cfg_hi;	/* QIDQC  - QI Dequeue Configuration */
+	u32 qi_deq_cfg_lo;
+	u32 qi_enq_cfg_hi;	/* QISEQC - QI Enqueue Command     */
+	u32 qi_enq_cfg_lo;
+	u32 rsvd2[1016];
+};
+
+/* QI control bits - low word */
+#define QICTL_DQEN      0x01              /* Enable frame pop          */
+#define QICTL_STOP      0x02              /* Stop dequeue/enqueue      */
+#define QICTL_SOE       0x04              /* Stop on error             */
+
+/* QI control bits - high word */
+#define QICTL_MBSI	0x01
+#define QICTL_MHWSI	0x02
+#define QICTL_MWSI	0x04
+#define QICTL_MDWSI	0x08
+#define QICTL_CBSI	0x10		/* CtrlDataByteSwapInput     */
+#define QICTL_CHWSI	0x20		/* CtrlDataHalfSwapInput     */
+#define QICTL_CWSI	0x40		/* CtrlDataWordSwapInput     */
+#define QICTL_CDWSI	0x80		/* CtrlDataDWordSwapInput    */
+#define QICTL_MBSO	0x0100
+#define QICTL_MHWSO	0x0200
+#define QICTL_MWSO	0x0400
+#define QICTL_MDWSO	0x0800
+#define QICTL_CBSO	0x1000		/* CtrlDataByteSwapOutput    */
+#define QICTL_CHWSO	0x2000		/* CtrlDataHalfSwapOutput    */
+#define QICTL_CWSO	0x4000		/* CtrlDataWordSwapOutput    */
+#define QICTL_CDWSO     0x8000		/* CtrlDataDWordSwapOutput   */
+#define QICTL_DMBS	0x010000
+#define QICTL_EPO	0x020000
+
+/* QI status bits */
+#define QISTA_PHRDERR   0x01              /* PreHeader Read Error      */
+#define QISTA_CFRDERR   0x02              /* Compound Frame Read Error */
+#define QISTA_OFWRERR   0x04              /* Output Frame Read Error   */
+#define QISTA_BPDERR    0x08              /* Buffer Pool Depleted      */
+#define QISTA_BTSERR    0x10              /* Buffer Undersize          */
+#define QISTA_CFWRERR   0x20              /* Compound Frame Write Err  */
+#define QISTA_STOPD     0x80000000        /* QI Stopped (see QICTL)    */
+
+/* deco_sg_table - DECO view of scatter/gather table */
+struct deco_sg_table {
+	u64 addr;		/* Segment Address */
+	u32 elen;		/* E, F bits + 30-bit length */
+	u32 bpid_offset;	/* Buffer Pool ID + 16-bit length */
+};
+
+/*
+ * caam_deco - descriptor controller - CHA cluster block
+ *
+ * Only accessible when direct DECO access is turned on
+ * (done in DECORR, via MID programmed in DECOxMID
+ *
+ * 5 typical, base + 0x8000/9000/a000/b000
+ * Padded out to 0x1000 long
+ */
+struct caam_deco {
+	u32 rsvd1;
+	u32 cls1_mode;	/* CxC1MR -  Class 1 Mode */
+	u32 rsvd2;
+	u32 cls1_keysize;	/* CxC1KSR - Class 1 Key Size */
+	u32 cls1_datasize_hi;	/* CxC1DSR - Class 1 Data Size */
+	u32 cls1_datasize_lo;
+	u32 rsvd3;
+	u32 cls1_icvsize;	/* CxC1ICVSR - Class 1 ICV size */
+	u32 rsvd4[5];
+	u32 cha_ctrl;	/* CCTLR - CHA control */
+	u32 rsvd5;
+	u32 irq_crtl;	/* CxCIRQ - CCB interrupt done/error/clear */
+	u32 rsvd6;
+	u32 clr_written;	/* CxCWR - Clear-Written */
+	u32 ccb_status_hi;	/* CxCSTA - CCB Status/Error */
+	u32 ccb_status_lo;
+	u32 rsvd7[3];
+	u32 aad_size;	/* CxAADSZR - Current AAD Size */
+	u32 rsvd8;
+	u32 cls1_iv_size;	/* CxC1IVSZR - Current Class 1 IV Size */
+	u32 rsvd9[7];
+	u32 pkha_a_size;	/* PKASZRx - Size of PKHA A */
+	u32 rsvd10;
+	u32 pkha_b_size;	/* PKBSZRx - Size of PKHA B */
+	u32 rsvd11;
+	u32 pkha_n_size;	/* PKNSZRx - Size of PKHA N */
+	u32 rsvd12;
+	u32 pkha_e_size;	/* PKESZRx - Size of PKHA E */
+	u32 rsvd13[24];
+	u32 cls1_ctx[16];	/* CxC1CTXR - Class 1 Context @100 */
+	u32 rsvd14[48];
+	u32 cls1_key[8];	/* CxC1KEYR - Class 1 Key @200 */
+	u32 rsvd15[121];
+	u32 cls2_mode;	/* CxC2MR - Class 2 Mode */
+	u32 rsvd16;
+	u32 cls2_keysize;	/* CxX2KSR - Class 2 Key Size */
+	u32 cls2_datasize_hi;	/* CxC2DSR - Class 2 Data Size */
+	u32 cls2_datasize_lo;
+	u32 rsvd17;
+	u32 cls2_icvsize;	/* CxC2ICVSZR - Class 2 ICV Size */
+	u32 rsvd18[56];
+	u32 cls2_ctx[18];	/* CxC2CTXR - Class 2 Context @500 */
+	u32 rsvd19[46];
+	u32 cls2_key[32];	/* CxC2KEYR - Class2 Key @600 */
+	u32 rsvd20[84];
+	u32 inp_infofifo_hi;	/* CxIFIFO - Input Info FIFO @7d0 */
+	u32 inp_infofifo_lo;
+	u32 rsvd21[2];
+	u64 inp_datafifo;	/* CxDFIFO - Input Data FIFO */
+	u32 rsvd22[2];
+	u64 out_datafifo;	/* CxOFIFO - Output Data FIFO */
+	u32 rsvd23[2];
+	u32 jr_ctl_hi;	/* CxJRR - JobR Control Register      @800 */
+	u32 jr_ctl_lo;
+	u64 jr_descaddr;	/* CxDADR - JobR Descriptor Address */
+	u32 op_status_hi;	/* DxOPSTA - DECO Operation Status */
+	u32 op_status_lo;
+	u32 rsvd24[2];
+	u32 liodn;		/* DxLSR - DECO LIODN Status - non-seq */
+	u32 td_liodn;	/* DxLSR - DECO LIODN Status - trustdesc */
+	u32 rsvd26[6];
+	u64 math[4];		/* DxMTH - Math register */
+	u32 rsvd27[8];
+	struct deco_sg_table gthr_tbl[4];	/* DxGTR - Gather Tables */
+	u32 rsvd28[16];
+	struct deco_sg_table sctr_tbl[4];	/* DxSTR - Scatter Tables */
+	u32 rsvd29[48];
+	u32 descbuf[64];	/* DxDESB - Descriptor buffer */
+	u32 rsvd30[320];
+};
+
+/*
+ * Current top-level view of memory map is:
+ *
+ * 0x0000 - 0x0fff - CAAM Top-Level Control
+ * 0x1000 - 0x1fff - Job Ring 0
+ * 0x2000 - 0x2fff - Job Ring 1
+ * 0x3000 - 0x3fff - Job Ring 2
+ * 0x4000 - 0x4fff - Job Ring 3
+ * 0x5000 - 0x5fff - (unused)
+ * 0x6000 - 0x6fff - Assurance Controller
+ * 0x7000 - 0x7fff - Queue Interface
+ * 0x8000 - 0x8fff - DECO-CCB 0
+ * 0x9000 - 0x9fff - DECO-CCB 1
+ * 0xa000 - 0xafff - DECO-CCB 2
+ * 0xb000 - 0xbfff - DECO-CCB 3
+ * 0xc000 - 0xcfff - DECO-CCB 4
+ *
+ * caam_full describes the full register view of CAAM if useful,
+ * although many configurations may choose to implement parts of
+ * the register map separately, in differing privilege regions
+ */
+struct caam_full {
+	struct caam_ctrl __iomem ctrl;
+	struct caam_job_ring jr[4];
+	u64 rsvd[512];
+	struct caam_assurance assure;
+	struct caam_queue_if qi;
+};
+
+#endif /* REGS_H */