Page MenuHomePhabricator

Libxlat Tables V2xlat Tables Contextcvslibxlat Mpuxlat Mpu Contextc
Updated 1,268 Days AgoPublic

/*                                                            /*
 * Copyright (c) 2017-2020, ARM Limited and Contributors.  |   * Copyright (c) 2017-2021, ARM Limited and Contributors. 
 *                                                             *
 * SPDX-License-Identifier: BSD-3-Clause                       * SPDX-License-Identifier: BSD-3-Clause
 */                                                            */

#include <arch_helpers.h>                                  |  #include <fvp_r_arch_helpers.h>
#include <assert.h>                                           #include <assert.h>

#include <platform_def.h>                                     #include <platform_def.h>

#include <common/debug.h>                                     #include <common/debug.h>
#include <lib/xlat_tables/xlat_tables_defs.h>                 #include <lib/xlat_tables/xlat_tables_defs.h>
#include <lib/xlat_tables/xlat_tables_v2.h>                   #include <lib/xlat_tables/xlat_tables_v2.h>

#include "xlat_tables_private.h"                           |  #include "lib/xlat_mpu/xlat_mpu.h"
                                                           >  #include "xlat_mpu_private.h"

/*                                                            /*
 * MMU configuration register values for the active transl     * MMU configuration register values for the active transl
 * from the MMU assembly helpers.                              * from the MMU assembly helpers.
 */                                                            */
uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];                   uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];

/*                                                            /*
 * Allocate and initialise the default translation context     * Allocate and initialise the default translation context
 * currently executing.                                        * currently executing.
 */                                                            */
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLE    REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLE
                      PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_                          PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_

void mmap_add_region(unsigned long long base_pa, uintptr_t    void mmap_add_region(unsigned long long base_pa, uintptr_t
                     unsigned int attr)                                            unsigned int attr)
{                                                             {
        mmap_region_t mm = MAP_REGION(base_pa, base_va, si            mmap_region_t mm = MAP_REGION(base_pa, base_va, si

        mmap_add_region_ctx(&tf_xlat_ctx, &mm);                       mmap_add_region_ctx(&tf_xlat_ctx, &mm);
}                                                             }

void mmap_add(const mmap_region_t *mm)                        void mmap_add(const mmap_region_t *mm)
{                                                             {
        mmap_add_ctx(&tf_xlat_ctx, mm);                               mmap_add_ctx(&tf_xlat_ctx, mm);
}                                                             }

void mmap_add_region_alloc_va(unsigned long long base_pa,     void mmap_add_region_alloc_va(unsigned long long base_pa, 
                              size_t size, unsigned int at                                  size_t size, unsigned int at
{                                                             {
        mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, si            mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, si

        mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);              mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm);

        *base_va = mm.base_va;                                        *base_va = mm.base_va;
}                                                             }

void mmap_add_alloc_va(mmap_region_t *mm)                     void mmap_add_alloc_va(mmap_region_t *mm)
{                                                             {
        while (mm->granularity != 0U) {                               while (mm->granularity != 0U) {
                assert(mm->base_va == 0U);                                    assert(mm->base_va == 0U);
                mmap_add_region_alloc_va_ctx(&tf_xlat_ctx,                    mmap_add_region_alloc_va_ctx(&tf_xlat_ctx,
                mm++;                                                         mm++;
        }                                                             }
}                                                             }

#if PLAT_XLAT_TABLES_DYNAMIC                                  #if PLAT_XLAT_TABLES_DYNAMIC

int mmap_add_dynamic_region(unsigned long long base_pa, ui    int mmap_add_dynamic_region(unsigned long long base_pa, ui
                            size_t size, unsigned int attr                                size_t size, unsigned int attr
{                                                             {
        mmap_region_t mm = MAP_REGION(base_pa, base_va, si            mmap_region_t mm = MAP_REGION(base_pa, base_va, si

        return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &            return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &
}                                                             }

int mmap_add_dynamic_region_alloc_va(unsigned long long ba    int mmap_add_dynamic_region_alloc_va(unsigned long long ba
                                     uintptr_t *base_va, s                                         uintptr_t *base_va, s
                                     unsigned int attr)                                            unsigned int attr)
{                                                             {
        mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, si            mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, si

        int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_            int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_

        *base_va = mm.base_va;                                        *base_va = mm.base_va;

        return rc;                                                    return rc;
}                                                             }


int mmap_remove_dynamic_region(uintptr_t base_va, size_t s    int mmap_remove_dynamic_region(uintptr_t base_va, size_t s
{                                                             {
        return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx            return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx
                                        base_va, size);                                               base_va, size);
}                                                             }

#endif /* PLAT_XLAT_TABLES_DYNAMIC */                         #endif /* PLAT_XLAT_TABLES_DYNAMIC */

void __init init_xlat_tables(void)                            void __init init_xlat_tables(void)
{                                                             {
        assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALI            assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALI

        unsigned int current_el = xlat_arch_current_el();             unsigned int current_el = xlat_arch_current_el();

        if (current_el == 1U) {                                       if (current_el == 1U) {
                tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;                     tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
                                                           >  #ifdef NO_EL3
                                                           >          } else {
                                                           >                  assert(current_el == 2U);
                                                           >                  tf_xlat_ctx.xlat_regime = EL2_REGIME;
                                                           >  #else
        } else if (current_el == 2U) {                                } else if (current_el == 2U) {
                tf_xlat_ctx.xlat_regime = EL2_REGIME;                         tf_xlat_ctx.xlat_regime = EL2_REGIME;
        } else {                                                      } else {
                assert(current_el == 3U);                                     assert(current_el == 3U);
                tf_xlat_ctx.xlat_regime = EL3_REGIME;                         tf_xlat_ctx.xlat_regime = EL3_REGIME;
                                                           >  #endif
        }                                                             }
                                                           <
        init_xlat_tables_ctx(&tf_xlat_ctx);                           init_xlat_tables_ctx(&tf_xlat_ctx);
}                                                             }

int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *a    int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *a
{                                                             {
        return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, b            return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, b
}                                                             }

int xlat_change_mem_attributes(uintptr_t base_va, size_t s    int xlat_change_mem_attributes(uintptr_t base_va, size_t s
{                                                             {
        return xlat_change_mem_attributes_ctx(&tf_xlat_ctx            return xlat_change_mem_attributes_ctx(&tf_xlat_ctx
}                                                             }

#if PLAT_RO_XLAT_TABLES                                       #if PLAT_RO_XLAT_TABLES
/* Change the memory attributes of the descriptors which r    /* Change the memory attributes of the descriptors which r
 * range that belongs to the translation tables themselves     * range that belongs to the translation tables themselves
 * mapped as part of read-write data in the BL image's mem     * mapped as part of read-write data in the BL image's mem
 *                                                             *
 * Since the translation tables map themselves via these l     * Since the translation tables map themselves via these l
 * descriptors, any change applied to them with the MMU on     * descriptors, any change applied to them with the MMU on
 * chicken and egg problem because of the break-before-mak     * chicken and egg problem because of the break-before-mak
 * Eventually, it would reach the descriptor that resolves     * Eventually, it would reach the descriptor that resolves
 * belongs to and the invalidation (break step) would caus     * belongs to and the invalidation (break step) would caus
 * (make step) to it to generate an MMU fault. Therefore,      * (make step) to it to generate an MMU fault. Therefore, 
 * before making the change.                                   * before making the change.
 *                                                             *
 * No assumption is made about what data this function nee     * No assumption is made about what data this function nee
 * caches are flushed in order to ensure coherency. A futu     * caches are flushed in order to ensure coherency. A futu
 * be to only flush the required data to main memory.          * be to only flush the required data to main memory.
 */                                                            */
int xlat_make_tables_readonly(void)                           int xlat_make_tables_readonly(void)
{                                                             {
        assert(tf_xlat_ctx.initialized == true);                      assert(tf_xlat_ctx.initialized == true);
#ifdef __aarch64__                                         <
        if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {              if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
                disable_mmu_el1();                                            disable_mmu_el1();
                                                           >  #ifdef NO_EL3
                                                           >          } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) 
                                                           >                  return -1;
                                                           >          } else {
                                                           >                  assert(tf_xlat_ctx.xlat_regime == EL2_REGI
                                                           >                  disable_mpu_el2();
                                                           >          }
                                                           >  #else
        } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME)             } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) 
                disable_mmu_el3();                                            disable_mmu_el3();
        } else {                                                      } else {
                assert(tf_xlat_ctx.xlat_regime == EL2_REGI                    assert(tf_xlat_ctx.xlat_regime == EL2_REGI
                return -1;                                                    return -1;
        }                                                             }
                                                           >  #endif

        /* Flush all caches. */                                       /* Flush all caches. */
        dcsw_op_all(DCCISW);                                          dcsw_op_all(DCCISW);
#else /* !__aarch64__ */                                   <
        assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME); <
        /* On AArch32, we flush the caches before disablin <
         * for this is that the dcsw_op_all AArch32 functi <
         * registers onto the stack under the assumption t <
         * cache, which is not true with the MMU off. This <
         * stack becoming corrupted and a wrong/junk value <
         * restored at the end of the routine.             <
         */                                                <
        dcsw_op_all(DC_OP_CISW);                           <
        disable_mmu_secure();                              <
#endif                                                     <

        int rc = xlat_change_mem_attributes_ctx(&tf_xlat_c            int rc = xlat_change_mem_attributes_ctx(&tf_xlat_c
                                (uintptr_t)tf_xlat_ctx.tab                                    (uintptr_t)tf_xlat_ctx.tab
                                tf_xlat_ctx.tables_num * X                                    tf_xlat_ctx.tables_num * X
                                MT_RO_DATA | MT_SECURE);                                      MT_RO_DATA | MT_SECURE);

#ifdef __aarch64__                                         <
        if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {              if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
                enable_mmu_el1(0U);                                           enable_mmu_el1(0U);
        } else {                                                      } else {
                                                           >  #ifdef NO_EL3
                                                           >                  assert(tf_xlat_ctx.xlat_regime == EL2_REGI
                                                           >                  enable_mpu_el2(0U);
                                                           >  #else
                assert(tf_xlat_ctx.xlat_regime == EL3_REGI                    assert(tf_xlat_ctx.xlat_regime == EL3_REGI
                enable_mmu_el3(0U);                                           enable_mmu_el3(0U);
        }                                                  <
#else /* !__aarch64__ */                                   <
        enable_mmu_svc_mon(0U);                            <
#endif                                                        #endif
                                                           >          }

        if (rc == 0) {                                                if (rc == 0) {
                tf_xlat_ctx.readonly_tables = true;                           tf_xlat_ctx.readonly_tables = true;
        }                                                             }

        return rc;                                                    return rc;
}                                                             }
#endif /* PLAT_RO_XLAT_TABLES */                              #endif /* PLAT_RO_XLAT_TABLES */

/*                                                            /*
 * If dynamic allocation of new regions is disabled then b     * If dynamic allocation of new regions is disabled then b
 * function enabling the MMU, we'll have registered all th     * function enabling the MMU, we'll have registered all th
 * map for the system's lifetime. Therefore, at this point     * map for the system's lifetime. Therefore, at this point
 * physical address that will ever be mapped.                  * physical address that will ever be mapped.
 *                                                             *
 * If dynamic allocation is enabled then we can't make any     * If dynamic allocation is enabled then we can't make any
 * because the maximum physical address could get pushed w     * because the maximum physical address could get pushed w
 * region. Therefore, in this case we have to assume that      * region. Therefore, in this case we have to assume that 
 * space size might be mapped.                                 * space size might be mapped.
 */                                                            */
#ifdef PLAT_XLAT_TABLES_DYNAMIC                               #ifdef PLAT_XLAT_TABLES_DYNAMIC
#define MAX_PHYS_ADDR   tf_xlat_ctx.pa_max_address            #define MAX_PHYS_ADDR   tf_xlat_ctx.pa_max_address
#else                                                         #else
#define MAX_PHYS_ADDR   tf_xlat_ctx.max_pa                    #define MAX_PHYS_ADDR   tf_xlat_ctx.max_pa
#endif                                                        #endif

#ifdef __aarch64__                                         |  void enable_mpu_el2(unsigned int flags)
                                                           <
void enable_mmu_el1(unsigned int flags)                    <
{                                                          <
        setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,  <
                      tf_xlat_ctx.base_table, MAX_PHYS_ADD <
                      tf_xlat_ctx.va_max_address, EL1_EL0_ <
        enable_mmu_direct_el1(flags);                      <
}                                                          <
                                                           <
void enable_mmu_el2(unsigned int flags)                    <
{                                                             {
        setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,  |          /* EL2 is strictly MPU on v8-R64, so no need for s
                      tf_xlat_ctx.base_table, MAX_PHYS_ADD |          enable_mpu_direct_el2(flags);
                      tf_xlat_ctx.va_max_address, EL2_REGI <
        enable_mmu_direct_el2(flags);                      <
}                                                          <
                                                           <
void enable_mmu_el3(unsigned int flags)                    <
{                                                          <
        setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,  <
                      tf_xlat_ctx.base_table, MAX_PHYS_ADD <
                      tf_xlat_ctx.va_max_address, EL3_REGI <
        enable_mmu_direct_el3(flags);                      <
}                                                             }

void enable_mmu(unsigned int flags)                           void enable_mmu(unsigned int flags)
{                                                             {
        switch (get_current_el_maybe_constant()) {                    switch (get_current_el_maybe_constant()) {
        case 1:                                                       case 1:
                enable_mmu_el1(flags);                                        enable_mmu_el1(flags);
                break;                                                        break;
        case 2:                                                       case 2:
                enable_mmu_el2(flags);                     |                  enable_mpu_el2(flags);
                break;                                                        break;
                                                           >  #ifndef NO_EL3
        case 3:                                                       case 3:
                enable_mmu_el3(flags);                                        enable_mmu_el3(flags);
                break;                                                        break;
                                                           >  #endif
        default:                                                      default:
                panic();                                                      panic();
        }                                                             }
}                                                             }
                                                           <
#else /* !__aarch64__ */                                   <
                                                           <
void enable_mmu_svc_mon(unsigned int flags)                <
{                                                          <
        setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,  <
                      tf_xlat_ctx.base_table, MAX_PHYS_ADD <
                      tf_xlat_ctx.va_max_address, EL1_EL0_ <
        enable_mmu_direct_svc_mon(flags);                  <
}                                                          <
                                                           <
void enable_mmu_hyp(unsigned int flags)                    <
{                                                          <
        setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,  <
                      tf_xlat_ctx.base_table, MAX_PHYS_ADD <
                      tf_xlat_ctx.va_max_address, EL2_REGI <
        enable_mmu_direct_hyp(flags);                      <
}                                                          <
                                                           <
#endif /* __aarch64__ */                                   <
Last Author
garymorrison-arm
Last Edited
Jul 2 2021, 10:53 PM