Phriction Trusted Firmware Trusted Firmware-A (TF-A) Patchdescription Libxlat Tables V2aarch64xlat Tables Archcvslibxlat Mpuaarch64xlat Mpu Archc
Libxlat Tables V2aarch64xlat Tables Archcvslibxlat Mpuaarch64xlat Mpu Archc
Libxlat Tables V2aarch64xlat Tables Archcvslibxlat Mpuaarch64xlat Mpu Archc
/* /* * Copyright (c) 2017-2020, ARM Limited and Contributors. | * Copyright (c) 2017-2021, ARM Limited and Contributors. * * * SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause */ */ #include <assert.h> #include <assert.h> #include <stdbool.h> #include <stdbool.h> #include <stdint.h> #include <stdint.h> #include <arch.h> #include <arch.h> #include <arch_features.h> #include <arch_features.h> #include <arch_helpers.h> | #include <fvp_r_arch_helpers.h> #include <lib/cassert.h> #include <lib/cassert.h> #include <lib/utils_def.h> #include <lib/utils_def.h> #include <lib/xlat_tables/xlat_tables_v2.h> #include <lib/xlat_tables/xlat_tables_v2.h> #include "../xlat_tables_private.h" | #include "../xlat_mpu_private.h" < /* < * Returns true if the provided granule size is supported, < */ < bool xlat_arch_is_granule_size_supported(size_t size) < { < u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_ < < if (size == PAGE_SIZE_4KB) { < return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_ < ID_AA64MMFR0_EL1_TGRAN4_MASK) == < ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED < } else if (size == PAGE_SIZE_16KB) { < return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_ < ID_AA64MMFR0_EL1_TGRAN16_MASK) == < ID_AA64MMFR0_EL1_TGRAN16_SUPPORTE < } else if (size == PAGE_SIZE_64KB) { < return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_ < ID_AA64MMFR0_EL1_TGRAN64_MASK) == < ID_AA64MMFR0_EL1_TGRAN64_SUPPORTE < } else { < return 0; < } < } < < size_t xlat_arch_get_max_supported_granule_size(void) < { < if (xlat_arch_is_granule_size_supported(PAGE_SIZE_ < return PAGE_SIZE_64KB; < } else if (xlat_arch_is_granule_size_supported(PAG < return PAGE_SIZE_16KB; < } else { < assert(xlat_arch_is_granule_size_supported < return PAGE_SIZE_4KB; < } < } < < unsigned long long tcr_physical_addr_size_bits(unsigned lo < { < /* Physical address can't exceed 48 bits */ < assert((max_addr & ADDR_MASK_48_TO_63) == 0U); < < /* 48 bits address */ < if ((max_addr & ADDR_MASK_44_TO_47) != 0U) < return TCR_PS_BITS_256TB; < < /* 44 bits address */ < if ((max_addr & ADDR_MASK_42_TO_43) != 0U) < return TCR_PS_BITS_16TB; < < /* 42 bits address */ < if ((max_addr & ADDR_MASK_40_TO_41) != 0U) < return TCR_PS_BITS_4TB; < < /* 40 bits address */ < if ((max_addr & ADDR_MASK_36_TO_39) != 0U) < return TCR_PS_BITS_1TB; < < /* 36 bits address */ < if ((max_addr & ADDR_MASK_32_TO_35) != 0U) < return TCR_PS_BITS_64GB; < < return TCR_PS_BITS_4GB; < } < #if ENABLE_ASSERTIONS #if ENABLE_ASSERTIONS /* /* * Physical Address ranges supported in the AArch64 Memory < * supported in ARMv8.2 onwards. < */ < static const unsigned int pa_range_bits_arr[] = { < PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_ < PARANGE_0101, PARANGE_0110 < }; < < unsigned long long xlat_arch_get_max_supported_pa(void) < { < u_register_t pa_range = read_id_aa64mmfr0_el1() & < ID_AA64MMF < < /* All other values are reserved */ < assert(pa_range < ARRAY_SIZE(pa_range_bits_arr)); < < return (1ULL << pa_range_bits_arr[pa_range]) - 1UL < } < < /* < * Return minimum virtual address space size supported by * Return minimum virtual address space size supported by */ */ uintptr_t xlat_get_min_virt_addr_space_size(void) uintptr_t xlat_get_min_virt_addr_space_size(void) { { uintptr_t ret; uintptr_t ret; if (is_armv8_4_ttst_present()) if (is_armv8_4_ttst_present()) ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST; else else ret = MIN_VIRT_ADDR_SPACE_SIZE; ret = MIN_VIRT_ADDR_SPACE_SIZE; return ret; return ret; } } #endif /* ENABLE_ASSERTIONS*/ #endif /* ENABLE_ASSERTIONS*/ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx) | bool is_mpu_enabled_ctx(const xlat_ctx_t *ctx) { { if (ctx->xlat_regime == EL1_EL0_REGIME) { if (ctx->xlat_regime == EL1_EL0_REGIME) { assert(xlat_arch_current_el() >= 1U); assert(xlat_arch_current_el() >= 1U); return (read_sctlr_el1() & SCTLR_M_BIT) != return (read_sctlr_el1() & SCTLR_M_BIT) != } else if (ctx->xlat_regime == EL2_REGIME) { } else if (ctx->xlat_regime == EL2_REGIME) { assert(xlat_arch_current_el() >= 2U); assert(xlat_arch_current_el() >= 2U); return (read_sctlr_el2() & SCTLR_M_BIT) != return (read_sctlr_el2() & SCTLR_M_BIT) != } else { } else { assert(ctx->xlat_regime == EL3_REGIME); assert(ctx->xlat_regime == EL3_REGIME); assert(xlat_arch_current_el() >= 3U); assert(xlat_arch_current_el() >= 3U); return (read_sctlr_el3() & SCTLR_M_BIT) != return (read_sctlr_el3() & SCTLR_M_BIT) != } } } } bool is_dcache_enabled(void) bool is_dcache_enabled(void) { { unsigned int el = get_current_el_maybe_constant(); | unsigned int el = get_current_el(); if (el == 1U) { if (el == 1U) { return (read_sctlr_el1() & SCTLR_C_BIT) != return (read_sctlr_el1() & SCTLR_C_BIT) != } else if (el == 2U) { } else if (el == 2U) { return (read_sctlr_el2() & SCTLR_C_BIT) != return (read_sctlr_el2() & SCTLR_C_BIT) != } else { } else { return (read_sctlr_el3() & SCTLR_C_BIT) != return (read_sctlr_el3() & SCTLR_C_BIT) != } } } } uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime) < { < if (xlat_regime == EL1_EL0_REGIME) { < return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN) < } else { < assert((xlat_regime == EL2_REGIME) || < (xlat_regime == EL3_REGIME)); < return UPPER_ATTRS(XN); < } < } < < void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime) < { < /* < * Ensure the translation table write has drained < * invalidating the TLB entry. < */ < dsbishst(); < < /* < * This function only supports invalidation of TLB < * and EL1&0 translation regimes. < * < * Also, it is architecturally UNDEFINED to invali < * exception level (see section D4.9.2 of the ARM < */ < if (xlat_regime == EL1_EL0_REGIME) { < assert(xlat_arch_current_el() >= 1U); < tlbivaae1is(TLBI_ADDR(va)); < } else if (xlat_regime == EL2_REGIME) { < assert(xlat_arch_current_el() >= 2U); < tlbivae2is(TLBI_ADDR(va)); < } else { < assert(xlat_regime == EL3_REGIME); < assert(xlat_arch_current_el() >= 3U); < tlbivae3is(TLBI_ADDR(va)); < } < } < < void xlat_arch_tlbi_va_sync(void) < { < /* < * A TLB maintenance instruction can complete at a < * it is issued, but is only guaranteed to be comp < * execution of DSB by the PE that executed the TL < * instruction. After the TLB invalidate instructi < * complete, no new memory accesses using the inva < * entries will be observed by any observer of the < * domain. See section D4.8.2 of the ARMv8 (issue < * "Ordering and completion of TLB maintenance ins < */ < dsbish(); < < /* < * The effects of a completed TLB maintenance inst < * only guaranteed to be visible on the PE that ex < * instruction after the execution of an ISB instr < * PE that executed the TLB maintenance instructio < */ < isb(); < } < < unsigned int xlat_arch_current_el(void) unsigned int xlat_arch_current_el(void) { { unsigned int el = (unsigned int)GET_EL(read_Curren unsigned int el = (unsigned int)GET_EL(read_Curren assert(el > 0U); assert(el > 0U); return el; return el; } } void setup_mmu_cfg(uint64_t *params, unsigned int flags, < const uint64_t *base_table, unsigned lo < uintptr_t max_va, int xlat_regime) < { < uint64_t mair, ttbr0, tcr; < uintptr_t virtual_addr_space_size; < < /* Set attributes in the right indices of the MAIR < mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDE < mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_I < mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON < < /* < * Limit the input address ranges and memory regio < * using TTBR0 to the given virtual address space < */ < assert(max_va < ((uint64_t)UINTPTR_MAX)); < < virtual_addr_space_size = (uintptr_t)max_va + 1U; < < assert(virtual_addr_space_size >= < xlat_get_min_virt_addr_space_size()); < assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SP < assert(IS_POWER_OF_TWO(virtual_addr_space_size)); < < /* < * __builtin_ctzll(0) is undefined but here we are < * virtual_addr_space_size is in the range [1,UINT < */ < int t0sz = 64 - __builtin_ctzll(virtual_addr_space < < tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT; < < /* < * Set the cacheability and shareability attribute < * associated with translation table walks. < */ < if ((flags & XLAT_TABLE_NC) != 0U) { < /* Inner & outer non-cacheable non-shareab < tcr |= TCR_SH_NON_SHAREABLE | < TCR_RGN_OUTER_NC | TCR_RGN_INNER_N < } else { < /* Inner & outer WBWA & shareable. */ < tcr |= TCR_SH_INNER_SHAREABLE | < TCR_RGN_OUTER_WBA | TCR_RGN_INNER_ < } < < /* < * It is safer to restrict the max physical addres < * hardware as much as possible. < */ < unsigned long long tcr_ps_bits = tcr_physical_addr < < if (xlat_regime == EL1_EL0_REGIME) { < /* < * TCR_EL1.EPD1: Disable translation table < * that are translated using TTBR1_EL1. < */ < tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_ < } else if (xlat_regime == EL2_REGIME) { < tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_ < } else { < assert(xlat_regime == EL3_REGIME); < tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_ < } < < /* Set TTBR bits as well */ < ttbr0 = (uint64_t) base_table; < < if (is_armv8_2_ttcnp_present()) { < /* Enable CnP bit so as to share page tabl < ttbr0 |= TTBR_CNP_BIT; < } < < params[MMU_CFG_MAIR] = mair; < params[MMU_CFG_TCR] = tcr; < params[MMU_CFG_TTBR0] = ttbr0; < } <
Tags
None
Subscribers
None
- Last Author
- garymorrison-arm
- Last Edited
- Jul 2 2021, 10:56 PM