ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/arch/powerpc/kvm/book3s_32_sr.S b/marvell/linux/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 0000000..e3ab9df
--- /dev/null
+++ b/marvell/linux/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+/******************************************************************************
+ *                                                                            *
+ *                               Entry code                                   *
+ *                                                                            *
+ *****************************************************************************/
+
+.macro LOAD_GUEST_SEGMENTS
+
+	/* Required state:
+	 *
+	 * MSR = ~IR|DR
+	 * R1 = host R1
+	 * R2 = host R2
+	 * R3 = shadow vcpu
+	 * all other volatile GPRS = free except R4, R6
+	 * SVCPU[CR]  = guest CR
+	 * SVCPU[XER] = guest XER
+	 * SVCPU[CTR] = guest CTR
+	 * SVCPU[LR]  = guest LR
+	 */
+
+#define XCHG_SR(n)	lwz	r9, (SVCPU_SR+(n*4))(r3);  \
+			mtsr	n, r9
+
+	XCHG_SR(0)
+	XCHG_SR(1)
+	XCHG_SR(2)
+	XCHG_SR(3)
+	XCHG_SR(4)
+	XCHG_SR(5)
+	XCHG_SR(6)
+	XCHG_SR(7)
+	XCHG_SR(8)
+	XCHG_SR(9)
+	XCHG_SR(10)
+	XCHG_SR(11)
+	XCHG_SR(12)
+	XCHG_SR(13)
+	XCHG_SR(14)
+	XCHG_SR(15)
+
+	/* Clear BATs. */
+
+#define KVM_KILL_BAT(n, reg)		\
+        mtspr   SPRN_IBAT##n##U,reg;	\
+        mtspr   SPRN_IBAT##n##L,reg;	\
+        mtspr   SPRN_DBAT##n##U,reg;	\
+        mtspr   SPRN_DBAT##n##L,reg;	\
+
+        li	r9, 0
+	KVM_KILL_BAT(0, r9)
+	KVM_KILL_BAT(1, r9)
+	KVM_KILL_BAT(2, r9)
+	KVM_KILL_BAT(3, r9)
+
+.endm
+
+/******************************************************************************
+ *                                                                            *
+ *                               Exit code                                    *
+ *                                                                            *
+ *****************************************************************************/
+
+.macro LOAD_HOST_SEGMENTS
+
+	/* Register usage at this point:
+	 *
+	 * R1         = host R1
+	 * R2         = host R2
+	 * R12        = exit handler id
+	 * R13        = shadow vcpu - SHADOW_VCPU_OFF
+	 * SVCPU.*    = guest *
+	 * SVCPU[CR]  = guest CR
+	 * SVCPU[XER] = guest XER
+	 * SVCPU[CTR] = guest CTR
+	 * SVCPU[LR]  = guest LR
+	 *
+	 */
+
+	/* Restore BATs */
+
+	/* We only overwrite the upper part, so we only restoree
+	   the upper part. */
+#define KVM_LOAD_BAT(n, reg, RA, RB)	\
+	lwz	RA,(n*16)+0(reg);	\
+	lwz	RB,(n*16)+4(reg);	\
+	mtspr	SPRN_IBAT##n##U,RA;	\
+	mtspr	SPRN_IBAT##n##L,RB;	\
+	lwz	RA,(n*16)+8(reg);	\
+	lwz	RB,(n*16)+12(reg);	\
+	mtspr	SPRN_DBAT##n##U,RA;	\
+	mtspr	SPRN_DBAT##n##L,RB;	\
+
+	lis     r9, BATS@ha
+	addi    r9, r9, BATS@l
+	tophys(r9, r9)
+	KVM_LOAD_BAT(0, r9, r10, r11)
+	KVM_LOAD_BAT(1, r9, r10, r11)
+	KVM_LOAD_BAT(2, r9, r10, r11)
+	KVM_LOAD_BAT(3, r9, r10, r11)
+
+	/* Restore Segment Registers */
+
+	/* 0xc - 0xf */
+
+        li      r0, 4
+        mtctr   r0
+	LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
+        lis     r4, 0xc000
+3:      mtsrin  r3, r4
+        addi    r3, r3, 0x111     /* increment VSID */
+        addis   r4, r4, 0x1000    /* address of next segment */
+        bdnz    3b
+
+	/* 0x0 - 0xb */
+
+	/* 'current->mm' needs to be in r4 */
+	tophys(r4, r2)
+	lwz	r4, MM(r4)
+	tophys(r4, r4)
+	/* This only clobbers r0, r3, r4 and r5 */
+	bl	switch_mmu_context
+
+.endm