blob: 8208a2b1ebc9c0c7e025294983a8f7613ee74753 [file] [log] [blame]
/*
* Copyright (c) 2014 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <asm.h>
#include <arch/arm64/mmu.h>
#include <arch/asm_macros.h>
/* use x9 ~ x15 as scratch registers */
tmp .req x9
tmp2 .req x10
#define ESR_EC_SHIFT 26
#define ESR_EC_LENGTH 6
#define EC_AARCH64_HVC 0x16
#define EC_AARCH64_SMC 0x17
.weak mtk_sip
FUNCTION(setup_el2_or_el3_exception_base)
/* install el2 or el3 exception table */
ldr tmp, =.Lel2_or_el3_exception_base
#if WITH_KERNEL_VM
and tmp, tmp, #~(~0 << MMU_KERNEL_SIZE_SHIFT)
#endif
mrs tmp2, CurrentEL
cmp tmp2, #(0b11 << 2) /* in EL3? */
b.eq .Lin_el3
cmp tmp2, #(0b10 << 2) /* in EL2? */
b.eq .Lin_el2
.Lin_el3:
msr vbar_el3, tmp
b .Lexit
.Lin_el2:
msr vbar_el2, tmp
.Lexit:
ret
.section .text.boot.vectab
.align 12
/*
* The next boot stage after lk can be ATF (lk as bl2 bootloader), linux
* kernel or hypervisor (lk as bl33 bootloader). Different entry execution
* level is required for each next boot stage,
* - ATF: from EL3
* - linux kernel: from EL2 or EL1
* - hypervisor: from EL2
* It's necessary for lk to return to its beginning entry level before jumping
* to next boot stage.
*
* SMC or HVC will be used for this purpose, thus we install only the exception
* vector to handle sync exception from lower exception level.
*
* [TODO] add rest exception vectors to catch unhandled exceptions.
*/
.Lel2_or_el3_exception_base:
FUNCTION(arm64_el2_or_el3_exception_base)
/* exceptions from lower EL, running arm64 */
.org 0x400
LOCAL_FUNCTION(arm64_sync_exc_lower_el_64)
#if WITH_KERNEL_VM
mov tmp, sp
and sp, tmp, #~(~0 << MMU_KERNEL_SIZE_SHIFT)
#endif
mrs tmp, CurrentEL
cmp tmp, #(0b11 << 2) /* in EL3? */
b.ne .LnotEL3
mrs tmp, esr_el3
b .Lcheck_ec
.LnotEL3:
cmp tmp, #(0b10 << 2) /* in EL2? */
b.ne .Lunhandled_sync_exc
mrs tmp, esr_el2
.Lcheck_ec:
ubfx tmp, tmp, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp tmp, #EC_AARCH64_SMC
b.eq .Lsip_handler
cmp tmp, #EC_AARCH64_HVC
b.ne .Lunhandled_sync_exc
.Lsip_handler:
b mtk_sip
.Lunhandled_sync_exc:
b .