Phriction Trusted Firmware Trusted Firmware-A (TF-A) Patchdescription Libel3 Runtimeaarch64context Mgmtcvsplatarmboardfvp Rfvp R Context Mgmtc
Libel3 Runtimeaarch64context Mgmtcvsplatarmboardfvp Rfvp R Context Mgmtc
Libel3 Runtimeaarch64context Mgmtcvsplatarmboardfvp Rfvp R Context Mgmtc
/* /* * Copyright (c) 2013-2021, ARM Limited and Contributors. | * Copyright (c) 2021, ARM Limited and Contributors. All r * * * SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause */ */ #include <assert.h> #include <assert.h> #include <stdbool.h> #include <stdbool.h> #include <string.h> #include <string.h> #include <platform_def.h> #include <platform_def.h> #include <arch.h> #include <arch.h> #include <arch_helpers.h> | #include <fvp_r_arch_helpers.h> #include <arch_features.h> #include <arch_features.h> #include <bl31/interrupt_mgmt.h> #include <bl31/interrupt_mgmt.h> #include <common/bl_common.h> #include <common/bl_common.h> #include <context.h> #include <context.h> #include <lib/el3_runtime/context_mgmt.h> #include <lib/el3_runtime/context_mgmt.h> #include <lib/el3_runtime/pubsub_events.h> #include <lib/el3_runtime/pubsub_events.h> #include <lib/extensions/amu.h> #include <lib/extensions/amu.h> #include <lib/extensions/mpam.h> #include <lib/extensions/mpam.h> #include <lib/extensions/spe.h> #include <lib/extensions/spe.h> #include <lib/extensions/sve.h> #include <lib/extensions/sve.h> #include <lib/extensions/twed.h> #include <lib/extensions/twed.h> #include <lib/utils.h> #include <lib/utils.h> /********************************************************* /********************************************************* * Context management library initialisation routine. This | * File contains EL2 equivalents of EL3 functions from: * runtime services to share pointers to 'cpu_context' str | * .../lib/el3_runtime/aarch64/context_mgmt.c * and non-secure states. Management of the structures and < * memory is not done by the context management library e. < * manages the cpu context used for entry from and exit to < * The Secure payload dispatcher service manages the conte < * the secure state. It also uses this library to get acce < * state cpu context pointers. < * Lastly, this library provides the api to make SP_EL3 po < * which will used for programming an entry into a lower E < * will used to save state upon exception entry from that < ********************************************************* ********************************************************* void __init cm_init(void) < { < /* < * The context management library has only global < * that will be done when the BSS is zeroed out < */ < } < < /********************************************************* < * The following function initializes the cpu_context 'ctx < * first use, and sets the initial entrypoint state as spe < * entry_point_info structure. < * < * The security state to initialize is determined by the S < * of the entry_point_info. < * < * The EE and ST attributes are used to configure the endi < * timer availability for the new execution context. < * < * To prepare the register state for entry call cm_prepare < * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equ < * cm_el1_sysregs_context_restore(). < ********************************************************* < void cm_setup_context(cpu_context_t *ctx, const entry_poin < { < unsigned int security_state; < u_register_t scr_el3; < el3_state_t *state; < gp_regs_t *gp_regs; < u_register_t sctlr_elx, actlr_elx; < < assert(ctx != NULL); < < security_state = GET_SECURITY_STATE(ep->h.attr); < < /* Clear any residual register values from the con < zeromem(ctx, sizeof(*ctx)); < < /* < * SCR_EL3 was initialised during reset sequence i < * el3_arch_init_common. This code modifies the SC < * affect the next EL. < * < * The following fields are initially set to zero < * the required value depending on the state of th < * Security state and entrypoint attributes of the < */ < scr_el3 = read_scr(); < scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT < SCR_ST_BIT | SCR_HCE_BIT); < /* < * SCR_NS: Set the security state of the next EL. < */ < if (security_state != SECURE) < scr_el3 |= SCR_NS_BIT; < /* < * SCR_EL3.RW: Set the execution state, AArch32 or < * Exception level as specified by SPSR. < */ < if (GET_RW(ep->spsr) == MODE_RW_64) < scr_el3 |= SCR_RW_BIT; < /* < * SCR_EL3.ST: Traps Secure EL1 accesses to the Co < * Secure timer registers to EL3, from AArch64 st < * by the entrypoint attributes. < */ < if (EP_GET_ST(ep->h.attr) != 0U) < scr_el3 |= SCR_ST_BIT; < < #if RAS_TRAP_LOWER_EL_ERR_ACCESS < /* < * SCR_EL3.TERR: Trap Error record accesses. Acces < * and RAS ERX registers from EL1 and EL2 are trap < */ < scr_el3 |= SCR_TERR_BIT; < #endif < < #if !HANDLE_EA_EL3_FIRST < /* < * SCR_EL3.EA: Do not route External Abort and SEr < * to EL3 when executing at a lower EL. When exec < * Aborts are taken to EL3. < */ < scr_el3 &= ~SCR_EA_BIT; < #endif < < #if FAULT_INJECTION_SUPPORT < /* Enable fault injection from lower ELs */ < scr_el3 |= SCR_FIEN_BIT; < #endif < < #if !CTX_INCLUDE_PAUTH_REGS < /* < * If the pointer authentication registers aren't < * switches the value of the registers can be leak < * the Non-secure world. To prevent this, rather t < * authentication everywhere, we only enable it in < * < * If the Secure world wants to use pointer authen < * CTX_INCLUDE_PAUTH_REGS must be set to 1. < */ < if (security_state == NON_SECURE) < scr_el3 |= SCR_API_BIT | SCR_APK_BIT; < #endif /* !CTX_INCLUDE_PAUTH_REGS */ < < #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS < /* Get Memory Tagging Extension support level */ < unsigned int mte = get_armv8_5_mte_support(); < #endif < /* < * Enable MTE support. Support is enabled unilater < * world, and only for the secure world when CTX_I < * set. < */ < #if CTX_INCLUDE_MTE_REGS < assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE < scr_el3 |= SCR_ATA_BIT; < #else < /* < * When MTE is only implemented at EL0, it can be < * across both worlds as no MTE registers are used < */ < if ((mte == MTE_IMPLEMENTED_EL0) || < /* < * When MTE is implemented at all ELs, it can be o < * in Non-Secure world without register saving. < */ < (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IM < (security_state == NON_SECURE))) { < scr_el3 |= SCR_ATA_BIT; < } < #endif /* CTX_INCLUDE_MTE_REGS */ < < #ifdef IMAGE_BL31 < /* < * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical F < * indicated by the interrupt routing model for B < */ < scr_el3 |= get_scr_el3_from_routing_model(security < #endif < < /* < * SCR_EL3.HCE: Enable HVC instructions if next ex < * AArch64 and next EL is EL2, or if next executio < * next mode is Hyp. < * SCR_EL3.FGTEn: Enable Fine Grained Virtualizati < * same conditions as HVC instructions and when th < * ARMv8.6-FGT. < * SCR_EL3.ECVEn: Enable Enhanced Counter Virtuali < * CNTPOFF_EL2 register under the same conditions < * and when the processor supports ECV. < */ < if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(e < || ((GET_RW(ep->spsr) != MODE_RW_64) < && (GET_M32(ep->spsr) == MODE32_hyp))) { < scr_el3 |= SCR_HCE_BIT; < < if (is_armv8_6_fgt_present()) { < scr_el3 |= SCR_FGTEN_BIT; < } < < if (get_armv8_6_ecv_support() < == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { < scr_el3 |= SCR_ECVEN_BIT; < } < } < < /* Enable S-EL2 if the next EL is EL2 and security < if ((security_state == SECURE) && (GET_EL(ep->spsr < if (GET_RW(ep->spsr) != MODE_RW_64) { < ERROR("S-EL2 can not be used in AA < panic(); < } < < scr_el3 |= SCR_EEL2_BIT; < } < < /* < * FEAT_AMUv1p1 virtual offset registers are only < * and EL2, when clear, this bit traps accesses fr < * to 1 when EL2 is present. < */ < if (is_armv8_6_feat_amuv1p1_present() && < (el_implemented(2) != EL_IMPL_NONE)) { < scr_el3 |= SCR_AMVOFFEN_BIT; < } < < /* < * Initialise SCTLR_EL1 to the reset value corresp < * execution state setting all fields rather than < * Some fields have architecturally UNKNOWN reset < * set to zero. < * < * SCTLR.EE: Endianness is taken from the entrypoi < * < * SCTLR.M, SCTLR.C and SCTLR.I: These fields must < * required by PSCI specification) < */ < sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_ < if (GET_RW(ep->spsr) == MODE_RW_64) < sctlr_elx |= SCTLR_EL1_RES1; < else { < /* < * If the target execution state is AArch3 < * fields need to be set. < * < * SCTRL_EL1.nTWE: Set to one so that EL0 < * instructions are not trapped to EL1. < * < * SCTLR_EL1.nTWI: Set to one so that EL0 < * instructions are not trapped to EL1. < * < * SCTLR_EL1.CP15BEN: Set to one to enable < * CP15DMB, CP15DSB, and CP15ISB instruct < */ < sctlr_elx |= SCTLR_AARCH32_EL1_RES1 | SCTL < | SCTLR_NTWI_BIT | < } < < #if ERRATA_A75_764081 < /* < * If workaround of errata 764081 for Cortex-A75 i < * SCTLR_EL1.IESB to enable Implicit Error Synchro < */ < sctlr_elx |= SCTLR_IESB_BIT; < #endif < < /* Enable WFE trap delay in SCR_EL3 if supported a < if (is_armv8_6_twed_present()) { < uint32_t delay = plat_arm_set_twedel_scr_e < < if (delay != TWED_DISABLED) { < /* Make sure delay value fits */ < assert((delay & ~SCR_TWEDEL_MASK) < < /* Set delay in SCR_EL3 */ < scr_el3 &= ~(SCR_TWEDEL_MASK << SC < scr_el3 |= ((delay & SCR_TWEDEL_MA < << SCR_TWEDEL_SHIF < < /* Enable WFE delay */ < scr_el3 |= SCR_TWEDEn_BIT; < } < } < < /* < * Store the initialised SCTLR_EL1 value in the cp < * and other EL2 registers are set up by cm_prepar < * are not part of the stored cpu_context. < */ < write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_ < < /* < * Base the context ACTLR_EL1 on the current value < * implementation defined. The context restore pro < * the value from the context to the actual regist < * problems for processor cores that don't expect < * be zero. < */ < actlr_elx = read_actlr_el1(); < write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACT < < /* < * Populate EL3 state so that we've the right cont < * before doing ERET < */ < state = get_el3state_ctx(ctx); < write_ctx_reg(state, CTX_SCR_EL3, scr_el3); < write_ctx_reg(state, CTX_ELR_EL3, ep->pc); < write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); < < /* < * Store the X0-X7 value from the entrypoint into < * Use memcpy as we are in control of the layout o < */ < gp_regs = get_gpregs_ctx(ctx); < memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_ < } < /********************************************************* /********************************************************* * Enable architecture extensions on first entry to Non-se * Enable architecture extensions on first entry to Non-se * When EL2 is implemented but unused `el2_unused` is non- * When EL2 is implemented but unused `el2_unused` is non- * it is zero. * it is zero. ********************************************************* ********************************************************* static void enable_extensions_nonsecure(bool el2_unused) static void enable_extensions_nonsecure(bool el2_unused) { { #if IMAGE_BL31 #if IMAGE_BL31 #if ENABLE_SPE_FOR_LOWER_ELS #if ENABLE_SPE_FOR_LOWER_ELS spe_enable(el2_unused); spe_enable(el2_unused); #endif #endif #if ENABLE_AMU #if ENABLE_AMU amu_enable(el2_unused); amu_enable(el2_unused); #endif #endif #if ENABLE_SVE_FOR_NS #if ENABLE_SVE_FOR_NS sve_enable(el2_unused); sve_enable(el2_unused); #endif #endif #if ENABLE_MPAM_FOR_LOWER_ELS #if ENABLE_MPAM_FOR_LOWER_ELS mpam_enable(el2_unused); mpam_enable(el2_unused); #endif #endif #endif #endif } } /********************************************************* /********************************************************* * The following function initializes the cpu_context for < * its `cpu_idx` for first use, and sets the initial entry < * specified by the entry_point_info structure. < ********************************************************* < void cm_init_context_by_index(unsigned int cpu_idx, < const entry_point_info_t *ep < { < cpu_context_t *ctx; < ctx = cm_get_context_by_index(cpu_idx, GET_SECURIT < cm_setup_context(ctx, ep); < } < < /********************************************************* < * The following function initializes the cpu_context for < * for first use, and sets the initial entrypoint state as < * entry_point_info structure. < ********************************************************* < void cm_init_my_context(const entry_point_info_t *ep) < { < cpu_context_t *ctx; < ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr < cm_setup_context(ctx, ep); < } < < /********************************************************* < * Prepare the CPU system registers for first entry into s * Prepare the CPU system registers for first entry into s * * * If execution is requested to EL2 or hyp mode, SCTLR_EL2 * If execution is requested to EL2 or hyp mode, SCTLR_EL2 * If execution is requested to non-secure EL1 or svc mode * If execution is requested to non-secure EL1 or svc mode * EL2 then EL2 is disabled by configuring all necessary E * EL2 then EL2 is disabled by configuring all necessary E * For all entries, the EL1 registers are initialized from * For all entries, the EL1 registers are initialized from ********************************************************* ********************************************************* void cm_prepare_el3_exit(uint32_t security_state) | void cm_prepare_el2_exit(uint32_t security_state) { { u_register_t sctlr_elx, scr_el3, mdcr_el2; | u_register_t sctlr_elx, scr_el3, mdcr_el2; cpu_context_t *ctx = cm_get_context(security_state | cpu_context_t *ctx = cm_get_context(security_state bool el2_unused = false; | bool el2_unused = false; uint64_t hcr_el2 = 0U; | uint64_t hcr_el2 = 0U; | assert(ctx != NULL); | assert(ctx != NULL); | if (security_state == NON_SECURE) { | if (security_state == NON_SECURE) { scr_el3 = read_ctx_reg(get_el3state_ctx(ct | scr_el3 = read_ctx_reg(get_el3state_ctx(ct CTX_SCR_E | CTX_SCR_E if ((scr_el3 & SCR_HCE_BIT) != 0U) { | if ((scr_el3 & SCR_HCE_BIT) != 0U) { /* Use SCTLR_EL1.EE value to initi | /* Use SCTLR_EL1.EE value to initi sctlr_elx = read_ctx_reg(get_el1_s | sctlr_elx = read_ctx_reg(get_el1_s | sctlr_elx &= SCTLR_EE_BIT; | sctlr_elx &= SCTLR_EE_BIT; sctlr_elx |= SCTLR_EL2_RES1; | sctlr_elx |= SCTLR_EL2_RES1; #if ERRATA_A75_764081 #if ERRATA_A75_764081 /* | /* * If workaround of errata 764081 | * If workaround of errata 764081 * then set SCTLR_EL2.IESB to enab | * then set SCTLR_EL2.IESB to enab * Synchronization Barrier. | * Synchronization Barrier. */ | */ sctlr_elx |= SCTLR_IESB_BIT; | sctlr_elx |= SCTLR_IESB_BIT; #endif | #endif write_sctlr_el2(sctlr_elx); | write_sctlr_el2(sctlr_elx); } else if (el_implemented(2) != EL_IMPL_NO | } else if (el_implemented(2) != EL_IMPL_NO el2_unused = true; | el2_unused = true; | /* | /* * EL2 present but unused, need to | * EL2 present but unused, need to * SCTLR_EL2 can be ignored in thi | * SCTLR_EL2 can be ignored in thi * | * * Set EL2 register width appropri | * Set EL2 register width appropri * field to match SCR_EL3.RW. | * field to match SCR_EL3.RW. */ | */ if ((scr_el3 & SCR_RW_BIT) != 0U) | if ((scr_el3 & SCR_RW_BIT) != 0U) hcr_el2 |= HCR_RW_BIT; | hcr_el2 |= HCR_RW_BIT; | /* | /* * For Armv8.3 pointer authenticat | * For Armv8.3 pointer authenticat * traps to EL2 when accessing key | * traps to EL2 when accessing key * pointer authentication instruct | * pointer authentication instruct */ | */ hcr_el2 |= (HCR_API_BIT | HCR_APK_ | hcr_el2 |= (HCR_API_BIT | HCR_APK_ | write_hcr_el2(hcr_el2); | write_hcr_el2(hcr_el2); | /* | /* * Initialise CPTR_EL2 setting all | * Initialise CPTR_EL2 setting all * relying on the hw. All fields h | * relying on the hw. All fields h * UNKNOWN reset values. | * UNKNOWN reset values. * | * * CPTR_EL2.TCPAC: Set to zero so | * CPTR_EL2.TCPAC: Set to zero so * accesses to the CPACR_EL1 or C | * accesses to the CPACR_EL1 or C * Execution states do not trap t | * Execution states do not trap t * | * * CPTR_EL2.TTA: Set to zero so th | * CPTR_EL2.TTA: Set to zero so th * register accesses to the trace | * register accesses to the trace * Execution states do not trap t | * Execution states do not trap t * | * * CPTR_EL2.TFP: Set to zero so th | * CPTR_EL2.TFP: Set to zero so th * to SIMD and floating-point fun | * to SIMD and floating-point fun * Execution states do not trap t | * Execution states do not trap t */ | */ write_cptr_el2(CPTR_EL2_RESET_VAL | write_cptr_el2(CPTR_EL2_RESET_VAL ~(CPTR_EL2_TCPAC_B | ~(CPTR_EL2_TCPAC_B | CPTR_EL2_TFP_BIT | | CPTR_EL2_TFP_BIT | /* | /* * Initialise CNTHCTL_EL2. All fie | * Initialise CNTHCTL_EL2. All fie * architecturally UNKNOWN on rese | * architecturally UNKNOWN on rese * except for field(s) listed belo | * except for field(s) listed belo * | * * CNTHCTL_EL2.EL1PCEN: Set to one | * CNTHCTL_EL2.EL1PCEN: Set to one * Hyp mode of Non-secure EL0 and | * Hyp mode of Non-secure EL0 and * physical timer registers. | * physical timer registers. * | * * CNTHCTL_EL2.EL1PCTEN: Set to on | * CNTHCTL_EL2.EL1PCTEN: Set to on * Hyp mode of Non-secure EL0 an | * Hyp mode of Non-secure EL0 an * physical counter registers. | * physical counter registers. */ | */ write_cnthctl_el2(CNTHCTL_RESET_VA | write_cnthctl_el2(CNTHCTL_RESET_VA EL1PCEN_BI | EL1PCEN_BI | /* | /* * Initialise CNTVOFF_EL2 to zero | * Initialise CNTVOFF_EL2 to zero * architecturally UNKNOWN value. | * architecturally UNKNOWN value. */ | */ write_cntvoff_el2(0); | write_cntvoff_el2(0); | /* | /* * Set VPIDR_EL2 and VMPIDR_EL2 to | * Set VPIDR_EL2 and VMPIDR_EL2 to * MPIDR_EL1 respectively. | * MPIDR_EL1 respectively. */ | */ write_vpidr_el2(read_midr_el1()); | write_vpidr_el2(read_midr_el1()); write_vmpidr_el2(read_mpidr_el1()) | write_vmpidr_el2(read_mpidr_el1()) | /* | /* * Initialise VTTBR_EL2. All field | * Initialise VTTBR_EL2. All field * UNKNOWN on reset. | * UNKNOWN on reset. * | * * VTTBR_EL2.VMID: Set to zero. Ev | * VTTBR_EL2.VMID: Set to zero. Ev * 2 address translation is disab | * 2 address translation is disab * operations depend on the VMID. | * operations depend on the VMID. * | * * VTTBR_EL2.BADDR: Set to zero as | * VTTBR_EL2.BADDR: Set to zero as * translation is disabled. | * translation is disabled. */ | */ write_vttbr_el2(VTTBR_RESET_VAL & | write_vttbr_el2(VTTBR_RESET_VAL & ~((VTTBR_VMID_MASK << VTTB | ~((VTTBR_VMID_MASK << VTTB | (VTTBR_BADDR_MASK << VTT | | (VTTBR_BADDR_MASK << VTT | /* | /* * Initialise MDCR_EL2, setting al | * Initialise MDCR_EL2, setting al * relying on hw. Some fields are | * relying on hw. Some fields are * UNKNOWN on reset. | * UNKNOWN on reset. * | * * MDCR_EL2.HLP: Set to one so tha | * MDCR_EL2.HLP: Set to one so tha * overflow, that is recorded in | * overflow, that is recorded in * occurs on the increment that c | * occurs on the increment that c * PMEVCNTR<n>_EL0[63] from 1 to | * PMEVCNTR<n>_EL0[63] from 1 to * implemented. This bit is RES0 | * implemented. This bit is RES0 * architecture earlier than ARMv | * architecture earlier than ARMv * doesn't have any effect on the | * doesn't have any effect on the * | * * MDCR_EL2.TTRF: Set to zero so t | * MDCR_EL2.TTRF: Set to zero so t * Filter Control register TRFCR_ | * Filter Control register TRFCR_ * trapped to EL2. This bit is RE | * trapped to EL2. This bit is RE * the architecture earlier than | * the architecture earlier than * | * * MDCR_EL2.HPMD: Set to one so th | * MDCR_EL2.HPMD: Set to one so th * prohibited at EL2. This bit is | * prohibited at EL2. This bit is * the architecture earlier than | * the architecture earlier than * to 1 doesn't have any effect o | * to 1 doesn't have any effect o * | * * MDCR_EL2.TPMS: Set to zero so t | * MDCR_EL2.TPMS: Set to zero so t * Statistical Profiling control | * Statistical Profiling control * do not trap to EL2. This bit i | * do not trap to EL2. This bit i * not implemented. | * not implemented. * | * * MDCR_EL2.TDRA: Set to zero so t | * MDCR_EL2.TDRA: Set to zero so t * EL1 System register accesses t | * EL1 System register accesses t * registers are not trapped to E | * registers are not trapped to E * | * * MDCR_EL2.TDOSA: Set to zero so | * MDCR_EL2.TDOSA: Set to zero so * System register accesses to th | * System register accesses to th * registers are not trapped to E | * registers are not trapped to E * | * * MDCR_EL2.TDA: Set to zero so th | * MDCR_EL2.TDA: Set to zero so th * accesses to the debug register | * accesses to the debug register * | * * MDCR_EL2.TDE: Set to zero so th | * MDCR_EL2.TDE: Set to zero so th * are not routed to EL2. | * are not routed to EL2. * | * * MDCR_EL2.HPME: Set to zero to d | * MDCR_EL2.HPME: Set to zero to d * Monitors. | * Monitors. * | * * MDCR_EL2.TPM: Set to zero so th | * MDCR_EL2.TPM: Set to zero so th * EL1 accesses to all Performanc | * EL1 accesses to all Performanc * are not trapped to EL2. | * are not trapped to EL2. * | * * MDCR_EL2.TPMCR: Set to zero so | * MDCR_EL2.TPMCR: Set to zero so * and EL1 accesses to the PMCR_E | * and EL1 accesses to the PMCR_E * trapped to EL2. | * trapped to EL2. * | * * MDCR_EL2.HPMN: Set to value of | * MDCR_EL2.HPMN: Set to value of * architecturally-defined reset | * architecturally-defined reset */ | */ mdcr_el2 = ((MDCR_EL2_RESET_VAL | | mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HPMD) | | MDCR_EL2_HPMD) | ((read_pmcr_el0() & PMC | ((read_pmcr_el0() & PMC >> PMCR_EL0_N_SHIFT)) & | >> PMCR_EL0_N_SHIFT)) & ~(MDCR_EL2_TTRF | MDCR_ | ~(MDCR_EL2_TTRF | MDCR_ MDCR_EL2_TDRA_BIT | M | MDCR_EL2_TDRA_BIT | M MDCR_EL2_TDA_BIT | MD | MDCR_EL2_TDA_BIT | MD MDCR_EL2_HPME_BIT | M | MDCR_EL2_HPME_BIT | M MDCR_EL2_TPMCR_BIT); | MDCR_EL2_TPMCR_BIT); | write_mdcr_el2(mdcr_el2); | write_mdcr_el2(mdcr_el2); | /* | /* * Initialise HSTR_EL2. All fields | * Initialise HSTR_EL2. All fields * UNKNOWN on reset. | * UNKNOWN on reset. * | * * HSTR_EL2.T<n>: Set all these fi | * HSTR_EL2.T<n>: Set all these fi * Non-secure EL0 or EL1 accesses | * Non-secure EL0 or EL1 accesses * do not trap to EL2. | * do not trap to EL2. */ | */ write_hstr_el2(HSTR_EL2_RESET_VAL | write_hstr_el2(HSTR_EL2_RESET_VAL /* | /* * Initialise CNTHP_CTL_EL2. All f | * Initialise CNTHP_CTL_EL2. All f * architecturally UNKNOWN on rese | * architecturally UNKNOWN on rese * | * * CNTHP_CTL_EL2:ENABLE: Set to ze | * CNTHP_CTL_EL2:ENABLE: Set to ze * physical timer and prevent tim | * physical timer and prevent tim */ | */ write_cnthp_ctl_el2(CNTHP_CTL_RESE | write_cnthp_ctl_el2(CNTHP_CTL_RESE ~(CNTHP_CT | ~(CNTHP_CT } | } enable_extensions_nonsecure(el2_unused); | enable_extensions_nonsecure(el2_unused); } | } < cm_el1_sysregs_context_restore(security_state); < cm_set_next_eret_context(security_state); < } < < #if CTX_INCLUDE_EL2_REGS < /********************************************************* < * Save EL2 sysreg context < ********************************************************* < void cm_el2_sysregs_context_save(uint32_t security_state) < { < u_register_t scr_el3 = read_scr(); < < /* < * Always save the non-secure EL2 context, only sa < * S-EL2 context if S-EL2 is enabled. < */ < if ((security_state == NON_SECURE) || < ((security_state == SECURE) && ((scr_el3 & SCR < cpu_context_t *ctx; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < el2_sysregs_context_save(get_el2_sysregs_c < } < } < < /********************************************************* < * Restore EL2 sysreg context < ********************************************************* < void cm_el2_sysregs_context_restore(uint32_t security_stat < { < u_register_t scr_el3 = read_scr(); < < /* < * Always restore the non-secure EL2 context, only < * S-EL2 context if S-EL2 is enabled. < */ < if ((security_state == NON_SECURE) || < ((security_state == SECURE) && ((scr_el3 & SCR < cpu_context_t *ctx; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < el2_sysregs_context_restore(get_el2_sysreg < } < } < #endif /* CTX_INCLUDE_EL2_REGS */ < < /********************************************************* < * The next four functions are used by runtime services to < * EL1 context on the 'cpu_context' structure for the spec < * state. < ********************************************************* < void cm_el1_sysregs_context_save(uint32_t security_state) < { < cpu_context_t *ctx; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)) < < #if IMAGE_BL31 < if (security_state == SECURE) < PUBLISH_EVENT(cm_exited_secure_world); < else < PUBLISH_EVENT(cm_exited_normal_world); < #endif < } < < void cm_el1_sysregs_context_restore(uint32_t security_stat < { < cpu_context_t *ctx; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < el1_sysregs_context_restore(get_el1_sysregs_ctx(ct < < #if IMAGE_BL31 < if (security_state == SECURE) < PUBLISH_EVENT(cm_entering_secure_world); < else < PUBLISH_EVENT(cm_entering_normal_world); < #endif < } < < /********************************************************* < * This function populates ELR_EL3 member of 'cpu_context' < * given security state with the given entrypoint < ********************************************************* < void cm_set_elr_el3(uint32_t security_state, uintptr_t ent < { < cpu_context_t *ctx; < el3_state_t *state; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < /* Populate EL3 state so that ERET jumps to the co < state = get_el3state_ctx(ctx); < write_ctx_reg(state, CTX_ELR_EL3, entrypoint); < } < < /********************************************************* < * This function populates ELR_EL3 and SPSR_EL3 members of < * pertaining to the given security state < ********************************************************* < void cm_set_elr_spsr_el3(uint32_t security_state, < uintptr_t entrypoint, uint32_t sps < { < cpu_context_t *ctx; < el3_state_t *state; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < /* Populate EL3 state so that ERET jumps to the co < state = get_el3state_ctx(ctx); < write_ctx_reg(state, CTX_ELR_EL3, entrypoint); < write_ctx_reg(state, CTX_SPSR_EL3, spsr); < } < < /********************************************************* < * This function updates a single bit in the SCR_EL3 membe < * pertaining to the given security state using the value < * specified in the parameters. It preserves all other bit < ********************************************************* < void cm_write_scr_el3_bit(uint32_t security_state, < uint32_t bit_pos, < uint32_t value) < { < cpu_context_t *ctx; < el3_state_t *state; < u_register_t scr_el3; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < /* Ensure that the bit position is a valid one */ < assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != < < /* Ensure that the 'value' is only a bit wide */ < assert(value <= 1U); < < /* < * Get the SCR_EL3 value from the cpu context, cle < * and set it to its new value. < */ < state = get_el3state_ctx(ctx); < scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); < scr_el3 &= ~(1UL << bit_pos); < scr_el3 |= (u_register_t)value << bit_pos; < write_ctx_reg(state, CTX_SCR_EL3, scr_el3); < } < < /********************************************************* < * This function retrieves SCR_EL3 member of 'cpu_context' < * given security state. < ********************************************************* < u_register_t cm_get_scr_el3(uint32_t security_state) < { < cpu_context_t *ctx; < el3_state_t *state; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < < /* Populate EL3 state so that ERET jumps to the co < state = get_el3state_ctx(ctx); < return read_ctx_reg(state, CTX_SCR_EL3); < } < < /********************************************************* < * This function is used to program the context that's use < * return. This initializes the SP_EL3 to a pointer to a ' < * the required security state < ********************************************************* < void cm_set_next_eret_context(uint32_t security_state) < { < cpu_context_t *ctx; < < ctx = cm_get_context(security_state); < assert(ctx != NULL); < cm_set_next_context(ctx); | cm_el1_sysregs_context_restore(security_state); > cm_set_next_eret_context(security_state); } }
Tags
None
Subscribers
None
- Last Author
- garymorrison-arm
- Last Edited
- Jul 2 2021, 11:00 PM