The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
AnnaBridge
Date:
Fri May 26 12:30:20 2017 +0100
Revision:
143:86740a56073b
Parent:
139:856d2700e60b
Release 143 of the mbed library.

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 139:856d2700e60b 1 /**************************************************************************//**
<> 139:856d2700e60b 2 * @file core_caFunc.h
<> 139:856d2700e60b 3 * @brief CMSIS Cortex-A Core Function Access Header File
<> 139:856d2700e60b 4 * @version V3.10
<> 139:856d2700e60b 5 * @date 30 Oct 2013
<> 139:856d2700e60b 6 *
<> 139:856d2700e60b 7 * @note
<> 139:856d2700e60b 8 *
<> 139:856d2700e60b 9 ******************************************************************************/
<> 139:856d2700e60b 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
<> 139:856d2700e60b 11
<> 139:856d2700e60b 12 All rights reserved.
<> 139:856d2700e60b 13 Redistribution and use in source and binary forms, with or without
<> 139:856d2700e60b 14 modification, are permitted provided that the following conditions are met:
<> 139:856d2700e60b 15 - Redistributions of source code must retain the above copyright
<> 139:856d2700e60b 16 notice, this list of conditions and the following disclaimer.
<> 139:856d2700e60b 17 - Redistributions in binary form must reproduce the above copyright
<> 139:856d2700e60b 18 notice, this list of conditions and the following disclaimer in the
<> 139:856d2700e60b 19 documentation and/or other materials provided with the distribution.
<> 139:856d2700e60b 20 - Neither the name of ARM nor the names of its contributors may be used
<> 139:856d2700e60b 21 to endorse or promote products derived from this software without
<> 139:856d2700e60b 22 specific prior written permission.
<> 139:856d2700e60b 23 *
<> 139:856d2700e60b 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
<> 139:856d2700e60b 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
<> 139:856d2700e60b 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<> 139:856d2700e60b 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
<> 139:856d2700e60b 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
<> 139:856d2700e60b 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
<> 139:856d2700e60b 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
<> 139:856d2700e60b 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
<> 139:856d2700e60b 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
<> 139:856d2700e60b 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
<> 139:856d2700e60b 34 POSSIBILITY OF SUCH DAMAGE.
<> 139:856d2700e60b 35 ---------------------------------------------------------------------------*/
<> 139:856d2700e60b 36
<> 139:856d2700e60b 37
<> 139:856d2700e60b 38 #ifndef __CORE_CAFUNC_H__
<> 139:856d2700e60b 39 #define __CORE_CAFUNC_H__
<> 139:856d2700e60b 40
<> 139:856d2700e60b 41
<> 139:856d2700e60b 42 /* ########################### Core Function Access ########################### */
<> 139:856d2700e60b 43 /** \ingroup CMSIS_Core_FunctionInterface
<> 139:856d2700e60b 44 \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
<> 139:856d2700e60b 45 @{
<> 139:856d2700e60b 46 */
<> 139:856d2700e60b 47
<> 139:856d2700e60b 48 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
<> 139:856d2700e60b 49 /* ARM armcc specific functions */
<> 139:856d2700e60b 50
<> 139:856d2700e60b 51 #if (__ARMCC_VERSION < 400677)
<> 139:856d2700e60b 52 #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
<> 139:856d2700e60b 53 #endif
<> 139:856d2700e60b 54
<> 139:856d2700e60b 55 #define MODE_USR 0x10
<> 139:856d2700e60b 56 #define MODE_FIQ 0x11
<> 139:856d2700e60b 57 #define MODE_IRQ 0x12
<> 139:856d2700e60b 58 #define MODE_SVC 0x13
<> 139:856d2700e60b 59 #define MODE_MON 0x16
<> 139:856d2700e60b 60 #define MODE_ABT 0x17
<> 139:856d2700e60b 61 #define MODE_HYP 0x1A
<> 139:856d2700e60b 62 #define MODE_UND 0x1B
<> 139:856d2700e60b 63 #define MODE_SYS 0x1F
<> 139:856d2700e60b 64
<> 139:856d2700e60b 65 /** \brief Get APSR Register
<> 139:856d2700e60b 66
<> 139:856d2700e60b 67 This function returns the content of the APSR Register.
<> 139:856d2700e60b 68
<> 139:856d2700e60b 69 \return APSR Register value
<> 139:856d2700e60b 70 */
<> 139:856d2700e60b 71 __STATIC_INLINE uint32_t __get_APSR(void)
<> 139:856d2700e60b 72 {
<> 139:856d2700e60b 73 register uint32_t __regAPSR __ASM("apsr");
<> 139:856d2700e60b 74 return(__regAPSR);
<> 139:856d2700e60b 75 }
<> 139:856d2700e60b 76
<> 139:856d2700e60b 77
<> 139:856d2700e60b 78 /** \brief Get CPSR Register
<> 139:856d2700e60b 79
<> 139:856d2700e60b 80 This function returns the content of the CPSR Register.
<> 139:856d2700e60b 81
<> 139:856d2700e60b 82 \return CPSR Register value
<> 139:856d2700e60b 83 */
<> 139:856d2700e60b 84 __STATIC_INLINE uint32_t __get_CPSR(void)
<> 139:856d2700e60b 85 {
<> 139:856d2700e60b 86 register uint32_t __regCPSR __ASM("cpsr");
<> 139:856d2700e60b 87 return(__regCPSR);
<> 139:856d2700e60b 88 }
<> 139:856d2700e60b 89
<> 139:856d2700e60b 90 /** \brief Set Stack Pointer
<> 139:856d2700e60b 91
<> 139:856d2700e60b 92 This function assigns the given value to the current stack pointer.
<> 139:856d2700e60b 93
<> 139:856d2700e60b 94 \param [in] topOfStack Stack Pointer value to set
<> 139:856d2700e60b 95 */
<> 139:856d2700e60b 96 register uint32_t __regSP __ASM("sp");
<> 139:856d2700e60b 97 __STATIC_INLINE void __set_SP(uint32_t topOfStack)
<> 139:856d2700e60b 98 {
<> 139:856d2700e60b 99 __regSP = topOfStack;
<> 139:856d2700e60b 100 }
<> 139:856d2700e60b 101
<> 139:856d2700e60b 102
<> 139:856d2700e60b 103 /** \brief Get link register
<> 139:856d2700e60b 104
<> 139:856d2700e60b 105 This function returns the value of the link register
<> 139:856d2700e60b 106
<> 139:856d2700e60b 107 \return Value of link register
<> 139:856d2700e60b 108 */
<> 139:856d2700e60b 109 register uint32_t __reglr __ASM("lr");
<> 139:856d2700e60b 110 __STATIC_INLINE uint32_t __get_LR(void)
<> 139:856d2700e60b 111 {
<> 139:856d2700e60b 112 return(__reglr);
<> 139:856d2700e60b 113 }
<> 139:856d2700e60b 114
<> 139:856d2700e60b 115 /** \brief Set link register
<> 139:856d2700e60b 116
<> 139:856d2700e60b 117 This function sets the value of the link register
<> 139:856d2700e60b 118
<> 139:856d2700e60b 119 \param [in] lr LR value to set
<> 139:856d2700e60b 120 */
<> 139:856d2700e60b 121 __STATIC_INLINE void __set_LR(uint32_t lr)
<> 139:856d2700e60b 122 {
<> 139:856d2700e60b 123 __reglr = lr;
<> 139:856d2700e60b 124 }
<> 139:856d2700e60b 125
<> 139:856d2700e60b 126 /** \brief Set Process Stack Pointer
<> 139:856d2700e60b 127
<> 139:856d2700e60b 128 This function assigns the given value to the USR/SYS Stack Pointer (PSP).
<> 139:856d2700e60b 129
<> 139:856d2700e60b 130 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
<> 139:856d2700e60b 131 */
<> 139:856d2700e60b 132 __STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
<> 139:856d2700e60b 133 {
<> 139:856d2700e60b 134 ARM
<> 139:856d2700e60b 135 PRESERVE8
<> 139:856d2700e60b 136
<> 139:856d2700e60b 137 BIC R0, R0, #7 ;ensure stack is 8-byte aligned
<> 139:856d2700e60b 138 MRS R1, CPSR
<> 139:856d2700e60b 139 CPS #MODE_SYS ;no effect in USR mode
<> 139:856d2700e60b 140 MOV SP, R0
<> 139:856d2700e60b 141 MSR CPSR_c, R1 ;no effect in USR mode
<> 139:856d2700e60b 142 ISB
<> 139:856d2700e60b 143 BX LR
<> 139:856d2700e60b 144
<> 139:856d2700e60b 145 }
<> 139:856d2700e60b 146
<> 139:856d2700e60b 147 /** \brief Set User Mode
<> 139:856d2700e60b 148
<> 139:856d2700e60b 149 This function changes the processor state to User Mode
<> 139:856d2700e60b 150 */
<> 139:856d2700e60b 151 __STATIC_ASM void __set_CPS_USR(void)
<> 139:856d2700e60b 152 {
<> 139:856d2700e60b 153 ARM
<> 139:856d2700e60b 154
<> 139:856d2700e60b 155 CPS #MODE_USR
<> 139:856d2700e60b 156 BX LR
<> 139:856d2700e60b 157 }
<> 139:856d2700e60b 158
<> 139:856d2700e60b 159
<> 139:856d2700e60b 160 /** \brief Enable FIQ
<> 139:856d2700e60b 161
<> 139:856d2700e60b 162 This function enables FIQ interrupts by clearing the F-bit in the CPSR.
<> 139:856d2700e60b 163 Can only be executed in Privileged modes.
<> 139:856d2700e60b 164 */
<> 139:856d2700e60b 165 #define __enable_fault_irq __enable_fiq
<> 139:856d2700e60b 166
<> 139:856d2700e60b 167
<> 139:856d2700e60b 168 /** \brief Disable FIQ
<> 139:856d2700e60b 169
<> 139:856d2700e60b 170 This function disables FIQ interrupts by setting the F-bit in the CPSR.
<> 139:856d2700e60b 171 Can only be executed in Privileged modes.
<> 139:856d2700e60b 172 */
<> 139:856d2700e60b 173 #define __disable_fault_irq __disable_fiq
<> 139:856d2700e60b 174
<> 139:856d2700e60b 175
<> 139:856d2700e60b 176 /** \brief Get FPSCR
<> 139:856d2700e60b 177
<> 139:856d2700e60b 178 This function returns the current value of the Floating Point Status/Control register.
<> 139:856d2700e60b 179
<> 139:856d2700e60b 180 \return Floating Point Status/Control register value
<> 139:856d2700e60b 181 */
<> 139:856d2700e60b 182 __STATIC_INLINE uint32_t __get_FPSCR(void)
<> 139:856d2700e60b 183 {
<> 139:856d2700e60b 184 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
<> 139:856d2700e60b 185 register uint32_t __regfpscr __ASM("fpscr");
<> 139:856d2700e60b 186 return(__regfpscr);
<> 139:856d2700e60b 187 #else
<> 139:856d2700e60b 188 return(0);
<> 139:856d2700e60b 189 #endif
<> 139:856d2700e60b 190 }
<> 139:856d2700e60b 191
<> 139:856d2700e60b 192
<> 139:856d2700e60b 193 /** \brief Set FPSCR
<> 139:856d2700e60b 194
<> 139:856d2700e60b 195 This function assigns the given value to the Floating Point Status/Control register.
<> 139:856d2700e60b 196
<> 139:856d2700e60b 197 \param [in] fpscr Floating Point Status/Control value to set
<> 139:856d2700e60b 198 */
<> 139:856d2700e60b 199 __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
<> 139:856d2700e60b 200 {
<> 139:856d2700e60b 201 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
<> 139:856d2700e60b 202 register uint32_t __regfpscr __ASM("fpscr");
<> 139:856d2700e60b 203 __regfpscr = (fpscr);
<> 139:856d2700e60b 204 #endif
<> 139:856d2700e60b 205 }
<> 139:856d2700e60b 206
<> 139:856d2700e60b 207 /** \brief Get FPEXC
<> 139:856d2700e60b 208
<> 139:856d2700e60b 209 This function returns the current value of the Floating Point Exception Control register.
<> 139:856d2700e60b 210
<> 139:856d2700e60b 211 \return Floating Point Exception Control register value
<> 139:856d2700e60b 212 */
<> 139:856d2700e60b 213 __STATIC_INLINE uint32_t __get_FPEXC(void)
<> 139:856d2700e60b 214 {
<> 139:856d2700e60b 215 #if (__FPU_PRESENT == 1)
<> 139:856d2700e60b 216 register uint32_t __regfpexc __ASM("fpexc");
<> 139:856d2700e60b 217 return(__regfpexc);
<> 139:856d2700e60b 218 #else
<> 139:856d2700e60b 219 return(0);
<> 139:856d2700e60b 220 #endif
<> 139:856d2700e60b 221 }
<> 139:856d2700e60b 222
<> 139:856d2700e60b 223
<> 139:856d2700e60b 224 /** \brief Set FPEXC
<> 139:856d2700e60b 225
<> 139:856d2700e60b 226 This function assigns the given value to the Floating Point Exception Control register.
<> 139:856d2700e60b 227
<> 139:856d2700e60b 228 \param [in] fpscr Floating Point Exception Control value to set
<> 139:856d2700e60b 229 */
<> 139:856d2700e60b 230 __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
<> 139:856d2700e60b 231 {
<> 139:856d2700e60b 232 #if (__FPU_PRESENT == 1)
<> 139:856d2700e60b 233 register uint32_t __regfpexc __ASM("fpexc");
<> 139:856d2700e60b 234 __regfpexc = (fpexc);
<> 139:856d2700e60b 235 #endif
<> 139:856d2700e60b 236 }
<> 139:856d2700e60b 237
<> 139:856d2700e60b 238 /** \brief Get CPACR
<> 139:856d2700e60b 239
<> 139:856d2700e60b 240 This function returns the current value of the Coprocessor Access Control register.
<> 139:856d2700e60b 241
<> 139:856d2700e60b 242 \return Coprocessor Access Control register value
<> 139:856d2700e60b 243 */
<> 139:856d2700e60b 244 __STATIC_INLINE uint32_t __get_CPACR(void)
<> 139:856d2700e60b 245 {
<> 139:856d2700e60b 246 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
<> 139:856d2700e60b 247 return __regCPACR;
<> 139:856d2700e60b 248 }
<> 139:856d2700e60b 249
<> 139:856d2700e60b 250 /** \brief Set CPACR
<> 139:856d2700e60b 251
<> 139:856d2700e60b 252 This function assigns the given value to the Coprocessor Access Control register.
<> 139:856d2700e60b 253
<> 139:856d2700e60b 254 \param [in] cpacr Coprocessor Acccess Control value to set
<> 139:856d2700e60b 255 */
<> 139:856d2700e60b 256 __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
<> 139:856d2700e60b 257 {
<> 139:856d2700e60b 258 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
<> 139:856d2700e60b 259 __regCPACR = cpacr;
<> 139:856d2700e60b 260 __ISB();
<> 139:856d2700e60b 261 }
<> 139:856d2700e60b 262
<> 139:856d2700e60b 263 /** \brief Get CBAR
<> 139:856d2700e60b 264
<> 139:856d2700e60b 265 This function returns the value of the Configuration Base Address register.
<> 139:856d2700e60b 266
<> 139:856d2700e60b 267 \return Configuration Base Address register value
<> 139:856d2700e60b 268 */
<> 139:856d2700e60b 269 __STATIC_INLINE uint32_t __get_CBAR() {
<> 139:856d2700e60b 270 register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
<> 139:856d2700e60b 271 return(__regCBAR);
<> 139:856d2700e60b 272 }
<> 139:856d2700e60b 273
<> 139:856d2700e60b 274 /** \brief Get TTBR0
<> 139:856d2700e60b 275
<> 139:856d2700e60b 276 This function returns the value of the Translation Table Base Register 0.
<> 139:856d2700e60b 277
<> 139:856d2700e60b 278 \return Translation Table Base Register 0 value
<> 139:856d2700e60b 279 */
<> 139:856d2700e60b 280 __STATIC_INLINE uint32_t __get_TTBR0() {
<> 139:856d2700e60b 281 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
<> 139:856d2700e60b 282 return(__regTTBR0);
<> 139:856d2700e60b 283 }
<> 139:856d2700e60b 284
<> 139:856d2700e60b 285 /** \brief Set TTBR0
<> 139:856d2700e60b 286
<> 139:856d2700e60b 287 This function assigns the given value to the Translation Table Base Register 0.
<> 139:856d2700e60b 288
<> 139:856d2700e60b 289 \param [in] ttbr0 Translation Table Base Register 0 value to set
<> 139:856d2700e60b 290 */
<> 139:856d2700e60b 291 __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
<> 139:856d2700e60b 292 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
<> 139:856d2700e60b 293 __regTTBR0 = ttbr0;
<> 139:856d2700e60b 294 __ISB();
<> 139:856d2700e60b 295 }
<> 139:856d2700e60b 296
<> 139:856d2700e60b 297 /** \brief Get DACR
<> 139:856d2700e60b 298
<> 139:856d2700e60b 299 This function returns the value of the Domain Access Control Register.
<> 139:856d2700e60b 300
<> 139:856d2700e60b 301 \return Domain Access Control Register value
<> 139:856d2700e60b 302 */
<> 139:856d2700e60b 303 __STATIC_INLINE uint32_t __get_DACR() {
<> 139:856d2700e60b 304 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
<> 139:856d2700e60b 305 return(__regDACR);
<> 139:856d2700e60b 306 }
<> 139:856d2700e60b 307
<> 139:856d2700e60b 308 /** \brief Set DACR
<> 139:856d2700e60b 309
<> 139:856d2700e60b 310 This function assigns the given value to the Domain Access Control Register.
<> 139:856d2700e60b 311
<> 139:856d2700e60b 312 \param [in] dacr Domain Access Control Register value to set
<> 139:856d2700e60b 313 */
<> 139:856d2700e60b 314 __STATIC_INLINE void __set_DACR(uint32_t dacr) {
<> 139:856d2700e60b 315 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
<> 139:856d2700e60b 316 __regDACR = dacr;
<> 139:856d2700e60b 317 __ISB();
<> 139:856d2700e60b 318 }
<> 139:856d2700e60b 319
<> 139:856d2700e60b 320 /******************************** Cache and BTAC enable ****************************************************/
<> 139:856d2700e60b 321
<> 139:856d2700e60b 322 /** \brief Set SCTLR
<> 139:856d2700e60b 323
<> 139:856d2700e60b 324 This function assigns the given value to the System Control Register.
<> 139:856d2700e60b 325
<> 139:856d2700e60b 326 \param [in] sctlr System Control Register value to set
<> 139:856d2700e60b 327 */
<> 139:856d2700e60b 328 __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
<> 139:856d2700e60b 329 {
<> 139:856d2700e60b 330 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
<> 139:856d2700e60b 331 __regSCTLR = sctlr;
<> 139:856d2700e60b 332 }
<> 139:856d2700e60b 333
<> 139:856d2700e60b 334 /** \brief Get SCTLR
<> 139:856d2700e60b 335
<> 139:856d2700e60b 336 This function returns the value of the System Control Register.
<> 139:856d2700e60b 337
<> 139:856d2700e60b 338 \return System Control Register value
<> 139:856d2700e60b 339 */
<> 139:856d2700e60b 340 __STATIC_INLINE uint32_t __get_SCTLR() {
<> 139:856d2700e60b 341 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
<> 139:856d2700e60b 342 return(__regSCTLR);
<> 139:856d2700e60b 343 }
<> 139:856d2700e60b 344
<> 139:856d2700e60b 345 /** \brief Enable Caches
<> 139:856d2700e60b 346
<> 139:856d2700e60b 347 Enable Caches
<> 139:856d2700e60b 348 */
<> 139:856d2700e60b 349 __STATIC_INLINE void __enable_caches(void) {
<> 139:856d2700e60b 350 // Set I bit 12 to enable I Cache
<> 139:856d2700e60b 351 // Set C bit 2 to enable D Cache
<> 139:856d2700e60b 352 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
<> 139:856d2700e60b 353 }
<> 139:856d2700e60b 354
<> 139:856d2700e60b 355 /** \brief Disable Caches
<> 139:856d2700e60b 356
<> 139:856d2700e60b 357 Disable Caches
<> 139:856d2700e60b 358 */
<> 139:856d2700e60b 359 __STATIC_INLINE void __disable_caches(void) {
<> 139:856d2700e60b 360 // Clear I bit 12 to disable I Cache
<> 139:856d2700e60b 361 // Clear C bit 2 to disable D Cache
<> 139:856d2700e60b 362 __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
<> 139:856d2700e60b 363 __ISB();
<> 139:856d2700e60b 364 }
<> 139:856d2700e60b 365
<> 139:856d2700e60b 366 /** \brief Enable BTAC
<> 139:856d2700e60b 367
<> 139:856d2700e60b 368 Enable BTAC
<> 139:856d2700e60b 369 */
<> 139:856d2700e60b 370 __STATIC_INLINE void __enable_btac(void) {
<> 139:856d2700e60b 371 // Set Z bit 11 to enable branch prediction
<> 139:856d2700e60b 372 __set_SCTLR( __get_SCTLR() | (1 << 11));
<> 139:856d2700e60b 373 __ISB();
<> 139:856d2700e60b 374 }
<> 139:856d2700e60b 375
<> 139:856d2700e60b 376 /** \brief Disable BTAC
<> 139:856d2700e60b 377
<> 139:856d2700e60b 378 Disable BTAC
<> 139:856d2700e60b 379 */
<> 139:856d2700e60b 380 __STATIC_INLINE void __disable_btac(void) {
<> 139:856d2700e60b 381 // Clear Z bit 11 to disable branch prediction
<> 139:856d2700e60b 382 __set_SCTLR( __get_SCTLR() & ~(1 << 11));
<> 139:856d2700e60b 383 }
<> 139:856d2700e60b 384
<> 139:856d2700e60b 385
<> 139:856d2700e60b 386 /** \brief Enable MMU
<> 139:856d2700e60b 387
<> 139:856d2700e60b 388 Enable MMU
<> 139:856d2700e60b 389 */
<> 139:856d2700e60b 390 __STATIC_INLINE void __enable_mmu(void) {
<> 139:856d2700e60b 391 // Set M bit 0 to enable the MMU
<> 139:856d2700e60b 392 // Set AFE bit to enable simplified access permissions model
<> 139:856d2700e60b 393 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
<> 139:856d2700e60b 394 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
<> 139:856d2700e60b 395 __ISB();
<> 139:856d2700e60b 396 }
<> 139:856d2700e60b 397
<> 139:856d2700e60b 398 /** \brief Disable MMU
<> 139:856d2700e60b 399
<> 139:856d2700e60b 400 Disable MMU
<> 139:856d2700e60b 401 */
<> 139:856d2700e60b 402 __STATIC_INLINE void __disable_mmu(void) {
<> 139:856d2700e60b 403 // Clear M bit 0 to disable the MMU
<> 139:856d2700e60b 404 __set_SCTLR( __get_SCTLR() & ~1);
<> 139:856d2700e60b 405 __ISB();
<> 139:856d2700e60b 406 }
<> 139:856d2700e60b 407
<> 139:856d2700e60b 408 /******************************** TLB maintenance operations ************************************************/
<> 139:856d2700e60b 409 /** \brief Invalidate the whole tlb
<> 139:856d2700e60b 410
<> 139:856d2700e60b 411 TLBIALL. Invalidate the whole tlb
<> 139:856d2700e60b 412 */
<> 139:856d2700e60b 413
<> 139:856d2700e60b 414 __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
<> 139:856d2700e60b 415 register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
<> 139:856d2700e60b 416 __TLBIALL = 0;
<> 139:856d2700e60b 417 __DSB();
<> 139:856d2700e60b 418 __ISB();
<> 139:856d2700e60b 419 }
<> 139:856d2700e60b 420
<> 139:856d2700e60b 421 /******************************** BTB maintenance operations ************************************************/
<> 139:856d2700e60b 422 /** \brief Invalidate entire branch predictor array
<> 139:856d2700e60b 423
<> 139:856d2700e60b 424 BPIALL. Branch Predictor Invalidate All.
<> 139:856d2700e60b 425 */
<> 139:856d2700e60b 426
<> 139:856d2700e60b 427 __STATIC_INLINE void __v7_inv_btac(void) {
<> 139:856d2700e60b 428 register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
<> 139:856d2700e60b 429 __BPIALL = 0;
<> 139:856d2700e60b 430 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 431 __ISB(); //ensure instruction fetch path sees new state
<> 139:856d2700e60b 432 }
<> 139:856d2700e60b 433
<> 139:856d2700e60b 434
<> 139:856d2700e60b 435 /******************************** L1 cache operations ******************************************************/
<> 139:856d2700e60b 436
<> 139:856d2700e60b 437 /** \brief Invalidate the whole I$
<> 139:856d2700e60b 438
<> 139:856d2700e60b 439 ICIALLU. Instruction Cache Invalidate All to PoU
<> 139:856d2700e60b 440 */
<> 139:856d2700e60b 441 __STATIC_INLINE void __v7_inv_icache_all(void) {
<> 139:856d2700e60b 442 register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
<> 139:856d2700e60b 443 __ICIALLU = 0;
<> 139:856d2700e60b 444 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 445 __ISB(); //ensure instruction fetch path sees new I cache state
<> 139:856d2700e60b 446 }
<> 139:856d2700e60b 447
<> 139:856d2700e60b 448 /** \brief Clean D$ by MVA
<> 139:856d2700e60b 449
<> 139:856d2700e60b 450 DCCMVAC. Data cache clean by MVA to PoC
<> 139:856d2700e60b 451 */
<> 139:856d2700e60b 452 __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
<> 139:856d2700e60b 453 register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
<> 139:856d2700e60b 454 __DCCMVAC = (uint32_t)va;
<> 139:856d2700e60b 455 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 456 }
<> 139:856d2700e60b 457
<> 139:856d2700e60b 458 /** \brief Invalidate D$ by MVA
<> 139:856d2700e60b 459
<> 139:856d2700e60b 460 DCIMVAC. Data cache invalidate by MVA to PoC
<> 139:856d2700e60b 461 */
<> 139:856d2700e60b 462 __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
<> 139:856d2700e60b 463 register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
<> 139:856d2700e60b 464 __DCIMVAC = (uint32_t)va;
<> 139:856d2700e60b 465 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 466 }
<> 139:856d2700e60b 467
<> 139:856d2700e60b 468 /** \brief Clean and Invalidate D$ by MVA
<> 139:856d2700e60b 469
<> 139:856d2700e60b 470 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
<> 139:856d2700e60b 471 */
<> 139:856d2700e60b 472 __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
<> 139:856d2700e60b 473 register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
<> 139:856d2700e60b 474 __DCCIMVAC = (uint32_t)va;
<> 139:856d2700e60b 475 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 476 }
<> 139:856d2700e60b 477
<> 139:856d2700e60b 478 /** \brief Clean and Invalidate the entire data or unified cache
<> 139:856d2700e60b 479
<> 139:856d2700e60b 480 Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
<> 139:856d2700e60b 481 */
<> 139:856d2700e60b 482 #pragma push
<> 139:856d2700e60b 483 #pragma arm
<> 139:856d2700e60b 484 __STATIC_ASM void __v7_all_cache(uint32_t op) {
<> 139:856d2700e60b 485 ARM
<> 139:856d2700e60b 486
<> 139:856d2700e60b 487 PUSH {R4-R11}
<> 139:856d2700e60b 488
<> 139:856d2700e60b 489 MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
<> 139:856d2700e60b 490 ANDS R3, R6, #0x07000000 // Extract coherency level
<> 139:856d2700e60b 491 MOV R3, R3, LSR #23 // Total cache levels << 1
<> 139:856d2700e60b 492 BEQ Finished // If 0, no need to clean
<> 139:856d2700e60b 493
<> 139:856d2700e60b 494 MOV R10, #0 // R10 holds current cache level << 1
<> 139:856d2700e60b 495 Loop1 ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
<> 139:856d2700e60b 496 MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
<> 139:856d2700e60b 497 AND R1, R1, #7 // Isolate those lower 3 bits
<> 139:856d2700e60b 498 CMP R1, #2
<> 139:856d2700e60b 499 BLT Skip // No cache or only instruction cache at this level
<> 139:856d2700e60b 500
<> 139:856d2700e60b 501 MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
<> 139:856d2700e60b 502 ISB // ISB to sync the change to the CacheSizeID reg
<> 139:856d2700e60b 503 MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
<> 139:856d2700e60b 504 AND R2, R1, #7 // Extract the line length field
<> 139:856d2700e60b 505 ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
<> 139:856d2700e60b 506 LDR R4, =0x3FF
<> 139:856d2700e60b 507 ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
<> 139:856d2700e60b 508 CLZ R5, R4 // R5 is the bit position of the way size increment
<> 139:856d2700e60b 509 LDR R7, =0x7FFF
<> 139:856d2700e60b 510 ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
<> 139:856d2700e60b 511
<> 139:856d2700e60b 512 Loop2 MOV R9, R4 // R9 working copy of the max way size (right aligned)
<> 139:856d2700e60b 513
<> 139:856d2700e60b 514 Loop3 ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
<> 139:856d2700e60b 515 ORR R11, R11, R7, LSL R2 // Factor in the Set number
<> 139:856d2700e60b 516 CMP R0, #0
<> 139:856d2700e60b 517 BNE Dccsw
<> 139:856d2700e60b 518 MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
<> 139:856d2700e60b 519 B cont
<> 139:856d2700e60b 520 Dccsw CMP R0, #1
<> 139:856d2700e60b 521 BNE Dccisw
<> 139:856d2700e60b 522 MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
<> 139:856d2700e60b 523 B cont
<> 139:856d2700e60b 524 Dccisw MCR p15, 0, R11, c7, c14, 2 // DCCISW. Clean and Invalidate by Set/Way
<> 139:856d2700e60b 525 cont SUBS R9, R9, #1 // Decrement the Way number
<> 139:856d2700e60b 526 BGE Loop3
<> 139:856d2700e60b 527 SUBS R7, R7, #1 // Decrement the Set number
<> 139:856d2700e60b 528 BGE Loop2
<> 139:856d2700e60b 529 Skip ADD R10, R10, #2 // Increment the cache number
<> 139:856d2700e60b 530 CMP R3, R10
<> 139:856d2700e60b 531 BGT Loop1
<> 139:856d2700e60b 532
<> 139:856d2700e60b 533 Finished
<> 139:856d2700e60b 534 DSB
<> 139:856d2700e60b 535 POP {R4-R11}
<> 139:856d2700e60b 536 BX lr
<> 139:856d2700e60b 537
<> 139:856d2700e60b 538 }
<> 139:856d2700e60b 539 #pragma pop
<> 139:856d2700e60b 540
<> 139:856d2700e60b 541
<> 139:856d2700e60b 542 /** \brief Invalidate the whole D$
<> 139:856d2700e60b 543
<> 139:856d2700e60b 544 DCISW. Invalidate by Set/Way
<> 139:856d2700e60b 545 */
<> 139:856d2700e60b 546
<> 139:856d2700e60b 547 __STATIC_INLINE void __v7_inv_dcache_all(void) {
<> 139:856d2700e60b 548 __v7_all_cache(0);
<> 139:856d2700e60b 549 }
<> 139:856d2700e60b 550
<> 139:856d2700e60b 551 /** \brief Clean the whole D$
<> 139:856d2700e60b 552
<> 139:856d2700e60b 553 DCCSW. Clean by Set/Way
<> 139:856d2700e60b 554 */
<> 139:856d2700e60b 555
<> 139:856d2700e60b 556 __STATIC_INLINE void __v7_clean_dcache_all(void) {
<> 139:856d2700e60b 557 __v7_all_cache(1);
<> 139:856d2700e60b 558 }
<> 139:856d2700e60b 559
<> 139:856d2700e60b 560 /** \brief Clean and invalidate the whole D$
<> 139:856d2700e60b 561
<> 139:856d2700e60b 562 DCCISW. Clean and Invalidate by Set/Way
<> 139:856d2700e60b 563 */
<> 139:856d2700e60b 564
<> 139:856d2700e60b 565 __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
<> 139:856d2700e60b 566 __v7_all_cache(2);
<> 139:856d2700e60b 567 }
<> 139:856d2700e60b 568
<> 139:856d2700e60b 569 #include "core_ca_mmu.h"
<> 139:856d2700e60b 570
<> 139:856d2700e60b 571 #elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
<> 139:856d2700e60b 572
<> 139:856d2700e60b 573 #define __inline inline
<> 139:856d2700e60b 574
<> 139:856d2700e60b 575 inline static uint32_t __disable_irq_iar() {
<> 139:856d2700e60b 576 int irq_dis = __get_CPSR() & 0x80; // 7bit CPSR.I
<> 139:856d2700e60b 577 __disable_irq();
<> 139:856d2700e60b 578 return irq_dis;
<> 139:856d2700e60b 579 }
<> 139:856d2700e60b 580
<> 139:856d2700e60b 581 #define MODE_USR 0x10
<> 139:856d2700e60b 582 #define MODE_FIQ 0x11
<> 139:856d2700e60b 583 #define MODE_IRQ 0x12
<> 139:856d2700e60b 584 #define MODE_SVC 0x13
<> 139:856d2700e60b 585 #define MODE_MON 0x16
<> 139:856d2700e60b 586 #define MODE_ABT 0x17
<> 139:856d2700e60b 587 #define MODE_HYP 0x1A
<> 139:856d2700e60b 588 #define MODE_UND 0x1B
<> 139:856d2700e60b 589 #define MODE_SYS 0x1F
<> 139:856d2700e60b 590
<> 139:856d2700e60b 591 /** \brief Set Process Stack Pointer
<> 139:856d2700e60b 592
<> 139:856d2700e60b 593 This function assigns the given value to the USR/SYS Stack Pointer (PSP).
<> 139:856d2700e60b 594
<> 139:856d2700e60b 595 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
<> 139:856d2700e60b 596 */
<> 139:856d2700e60b 597 // from rt_CMSIS.c
<> 139:856d2700e60b 598 __arm static inline void __set_PSP(uint32_t topOfProcStack) {
<> 139:856d2700e60b 599 __asm(
<> 139:856d2700e60b 600 " ARM\n"
<> 139:856d2700e60b 601 // " PRESERVE8\n"
<> 139:856d2700e60b 602
<> 139:856d2700e60b 603 " BIC R0, R0, #7 ;ensure stack is 8-byte aligned \n"
<> 139:856d2700e60b 604 " MRS R1, CPSR \n"
<> 139:856d2700e60b 605 " CPS #0x1F ;no effect in USR mode \n" // MODE_SYS
<> 139:856d2700e60b 606 " MOV SP, R0 \n"
<> 139:856d2700e60b 607 " MSR CPSR_c, R1 ;no effect in USR mode \n"
<> 139:856d2700e60b 608 " ISB \n"
<> 139:856d2700e60b 609 " BX LR \n");
<> 139:856d2700e60b 610 }
<> 139:856d2700e60b 611
<> 139:856d2700e60b 612 /** \brief Set User Mode
<> 139:856d2700e60b 613
<> 139:856d2700e60b 614 This function changes the processor state to User Mode
<> 139:856d2700e60b 615 */
<> 139:856d2700e60b 616 // from rt_CMSIS.c
<> 139:856d2700e60b 617 __arm static inline void __set_CPS_USR(void) {
<> 139:856d2700e60b 618 __asm(
<> 139:856d2700e60b 619 " ARM \n"
<> 139:856d2700e60b 620
<> 139:856d2700e60b 621 " CPS #0x10 \n" // MODE_USR
<> 139:856d2700e60b 622 " BX LR\n");
<> 139:856d2700e60b 623 }
<> 139:856d2700e60b 624
<> 139:856d2700e60b 625 /** \brief Set TTBR0
<> 139:856d2700e60b 626
<> 139:856d2700e60b 627 This function assigns the given value to the Translation Table Base Register 0.
<> 139:856d2700e60b 628
<> 139:856d2700e60b 629 \param [in] ttbr0 Translation Table Base Register 0 value to set
<> 139:856d2700e60b 630 */
<> 139:856d2700e60b 631 // from mmu_Renesas_RZ_A1.c
<> 139:856d2700e60b 632 __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
<> 139:856d2700e60b 633 __MCR(15, 0, ttbr0, 2, 0, 0); // reg to cp15
<> 139:856d2700e60b 634 __ISB();
<> 139:856d2700e60b 635 }
<> 139:856d2700e60b 636
<> 139:856d2700e60b 637 /** \brief Set DACR
<> 139:856d2700e60b 638
<> 139:856d2700e60b 639 This function assigns the given value to the Domain Access Control Register.
<> 139:856d2700e60b 640
<> 139:856d2700e60b 641 \param [in] dacr Domain Access Control Register value to set
<> 139:856d2700e60b 642 */
<> 139:856d2700e60b 643 // from mmu_Renesas_RZ_A1.c
<> 139:856d2700e60b 644 __STATIC_INLINE void __set_DACR(uint32_t dacr) {
<> 139:856d2700e60b 645 __MCR(15, 0, dacr, 3, 0, 0); // reg to cp15
<> 139:856d2700e60b 646 __ISB();
<> 139:856d2700e60b 647 }
<> 139:856d2700e60b 648
<> 139:856d2700e60b 649
<> 139:856d2700e60b 650 /******************************** Cache and BTAC enable ****************************************************/
<> 139:856d2700e60b 651 /** \brief Set SCTLR
<> 139:856d2700e60b 652
<> 139:856d2700e60b 653 This function assigns the given value to the System Control Register.
<> 139:856d2700e60b 654
<> 139:856d2700e60b 655 \param [in] sctlr System Control Register value to set
<> 139:856d2700e60b 656 */
<> 139:856d2700e60b 657 // from __enable_mmu()
<> 139:856d2700e60b 658 __STATIC_INLINE void __set_SCTLR(uint32_t sctlr) {
<> 139:856d2700e60b 659 __MCR(15, 0, sctlr, 1, 0, 0); // reg to cp15
<> 139:856d2700e60b 660 }
<> 139:856d2700e60b 661
<> 139:856d2700e60b 662 /** \brief Get SCTLR
<> 139:856d2700e60b 663
<> 139:856d2700e60b 664 This function returns the value of the System Control Register.
<> 139:856d2700e60b 665
<> 139:856d2700e60b 666 \return System Control Register value
<> 139:856d2700e60b 667 */
<> 139:856d2700e60b 668 // from __enable_mmu()
<> 139:856d2700e60b 669 __STATIC_INLINE uint32_t __get_SCTLR() {
<> 139:856d2700e60b 670 uint32_t __regSCTLR = __MRC(15, 0, 1, 0, 0);
<> 139:856d2700e60b 671 return __regSCTLR;
<> 139:856d2700e60b 672 }
<> 139:856d2700e60b 673
<> 139:856d2700e60b 674 /** \brief Enable Caches
<> 139:856d2700e60b 675
<> 139:856d2700e60b 676 Enable Caches
<> 139:856d2700e60b 677 */
<> 139:856d2700e60b 678 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 679 __STATIC_INLINE void __enable_caches(void) {
<> 139:856d2700e60b 680 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
<> 139:856d2700e60b 681 }
<> 139:856d2700e60b 682
<> 139:856d2700e60b 683 /** \brief Enable BTAC
<> 139:856d2700e60b 684
<> 139:856d2700e60b 685 Enable BTAC
<> 139:856d2700e60b 686 */
<> 139:856d2700e60b 687 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 688 __STATIC_INLINE void __enable_btac(void) {
<> 139:856d2700e60b 689 __set_SCTLR( __get_SCTLR() | (1 << 11));
<> 139:856d2700e60b 690 __ISB();
<> 139:856d2700e60b 691 }
<> 139:856d2700e60b 692
<> 139:856d2700e60b 693 /** \brief Enable MMU
<> 139:856d2700e60b 694
<> 139:856d2700e60b 695 Enable MMU
<> 139:856d2700e60b 696 */
<> 139:856d2700e60b 697 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 698 __STATIC_INLINE void __enable_mmu(void) {
<> 139:856d2700e60b 699 // Set M bit 0 to enable the MMU
<> 139:856d2700e60b 700 // Set AFE bit to enable simplified access permissions model
<> 139:856d2700e60b 701 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
<> 139:856d2700e60b 702 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
<> 139:856d2700e60b 703 __ISB();
<> 139:856d2700e60b 704 }
<> 139:856d2700e60b 705
<> 139:856d2700e60b 706 /******************************** TLB maintenance operations ************************************************/
<> 139:856d2700e60b 707 /** \brief Invalidate the whole tlb
<> 139:856d2700e60b 708
<> 139:856d2700e60b 709 TLBIALL. Invalidate the whole tlb
<> 139:856d2700e60b 710 */
<> 139:856d2700e60b 711 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 712 __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
<> 139:856d2700e60b 713 uint32_t val = 0;
<> 139:856d2700e60b 714 __MCR(15, 0, val, 8, 7, 0); // reg to cp15
<> 139:856d2700e60b 715 __MCR(15, 0, val, 8, 6, 0); // reg to cp15
<> 139:856d2700e60b 716 __MCR(15, 0, val, 8, 5, 0); // reg to cp15
<> 139:856d2700e60b 717 __DSB();
<> 139:856d2700e60b 718 __ISB();
<> 139:856d2700e60b 719 }
<> 139:856d2700e60b 720
<> 139:856d2700e60b 721 /******************************** BTB maintenance operations ************************************************/
<> 139:856d2700e60b 722 /** \brief Invalidate entire branch predictor array
<> 139:856d2700e60b 723
<> 139:856d2700e60b 724 BPIALL. Branch Predictor Invalidate All.
<> 139:856d2700e60b 725 */
<> 139:856d2700e60b 726 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 727 __STATIC_INLINE void __v7_inv_btac(void) {
<> 139:856d2700e60b 728 uint32_t val = 0;
<> 139:856d2700e60b 729 __MCR(15, 0, val, 7, 5, 6); // reg to cp15
<> 139:856d2700e60b 730 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 731 __ISB(); //ensure instruction fetch path sees new state
<> 139:856d2700e60b 732 }
<> 139:856d2700e60b 733
<> 139:856d2700e60b 734
<> 139:856d2700e60b 735 /******************************** L1 cache operations ******************************************************/
<> 139:856d2700e60b 736
<> 139:856d2700e60b 737 /** \brief Invalidate the whole I$
<> 139:856d2700e60b 738
<> 139:856d2700e60b 739 ICIALLU. Instruction Cache Invalidate All to PoU
<> 139:856d2700e60b 740 */
<> 139:856d2700e60b 741 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 742 __STATIC_INLINE void __v7_inv_icache_all(void) {
<> 139:856d2700e60b 743 uint32_t val = 0;
<> 139:856d2700e60b 744 __MCR(15, 0, val, 7, 5, 0); // reg to cp15
<> 139:856d2700e60b 745 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 746 __ISB(); //ensure instruction fetch path sees new I cache state
<> 139:856d2700e60b 747 }
<> 139:856d2700e60b 748
<> 139:856d2700e60b 749 // from __v7_inv_dcache_all()
<> 139:856d2700e60b 750 __arm static inline void __v7_all_cache(uint32_t op) {
<> 139:856d2700e60b 751 __asm(
<> 139:856d2700e60b 752 " ARM \n"
<> 139:856d2700e60b 753
<> 139:856d2700e60b 754 " PUSH {R4-R11} \n"
<> 139:856d2700e60b 755
<> 139:856d2700e60b 756 " MRC p15, 1, R6, c0, c0, 1\n" // Read CLIDR
<> 139:856d2700e60b 757 " ANDS R3, R6, #0x07000000\n" // Extract coherency level
<> 139:856d2700e60b 758 " MOV R3, R3, LSR #23\n" // Total cache levels << 1
<> 139:856d2700e60b 759 " BEQ Finished\n" // If 0, no need to clean
<> 139:856d2700e60b 760
<> 139:856d2700e60b 761 " MOV R10, #0\n" // R10 holds current cache level << 1
<> 139:856d2700e60b 762 "Loop1: ADD R2, R10, R10, LSR #1\n" // R2 holds cache "Set" position
<> 139:856d2700e60b 763 " MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
<> 139:856d2700e60b 764 " AND R1, R1, #7 \n" // Isolate those lower 3 bits
<> 139:856d2700e60b 765 " CMP R1, #2 \n"
<> 139:856d2700e60b 766 " BLT Skip \n" // No cache or only instruction cache at this level
<> 139:856d2700e60b 767
<> 139:856d2700e60b 768 " MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
<> 139:856d2700e60b 769 " ISB \n" // ISB to sync the change to the CacheSizeID reg
<> 139:856d2700e60b 770 " MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
<> 139:856d2700e60b 771 " AND R2, R1, #7 \n" // Extract the line length field
<> 139:856d2700e60b 772 " ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
<> 139:856d2700e60b 773 " movw R4, #0x3FF \n"
<> 139:856d2700e60b 774 " ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
<> 139:856d2700e60b 775 " CLZ R5, R4 \n" // R5 is the bit position of the way size increment
<> 139:856d2700e60b 776 " movw R7, #0x7FFF \n"
<> 139:856d2700e60b 777 " ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
<> 139:856d2700e60b 778
<> 139:856d2700e60b 779 "Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
<> 139:856d2700e60b 780
<> 139:856d2700e60b 781 "Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
<> 139:856d2700e60b 782 " ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
<> 139:856d2700e60b 783 " CMP R0, #0 \n"
<> 139:856d2700e60b 784 " BNE Dccsw \n"
<> 139:856d2700e60b 785 " MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
<> 139:856d2700e60b 786 " B cont \n"
<> 139:856d2700e60b 787 "Dccsw: CMP R0, #1 \n"
<> 139:856d2700e60b 788 " BNE Dccisw \n"
<> 139:856d2700e60b 789 " MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
<> 139:856d2700e60b 790 " B cont \n"
<> 139:856d2700e60b 791 "Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW, Clean and Invalidate by Set/Way
<> 139:856d2700e60b 792 "cont: SUBS R9, R9, #1 \n" // Decrement the Way number
<> 139:856d2700e60b 793 " BGE Loop3 \n"
<> 139:856d2700e60b 794 " SUBS R7, R7, #1 \n" // Decrement the Set number
<> 139:856d2700e60b 795 " BGE Loop2 \n"
<> 139:856d2700e60b 796 "Skip: ADD R10, R10, #2 \n" // increment the cache number
<> 139:856d2700e60b 797 " CMP R3, R10 \n"
<> 139:856d2700e60b 798 " BGT Loop1 \n"
<> 139:856d2700e60b 799
<> 139:856d2700e60b 800 "Finished: \n"
<> 139:856d2700e60b 801 " DSB \n"
<> 139:856d2700e60b 802 " POP {R4-R11} \n"
<> 139:856d2700e60b 803 " BX lr \n" );
<> 139:856d2700e60b 804 }
<> 139:856d2700e60b 805
<> 139:856d2700e60b 806 /** \brief Invalidate the whole D$
<> 139:856d2700e60b 807
<> 139:856d2700e60b 808 DCISW. Invalidate by Set/Way
<> 139:856d2700e60b 809 */
<> 139:856d2700e60b 810 // from system_Renesas_RZ_A1.c
<> 139:856d2700e60b 811 __STATIC_INLINE void __v7_inv_dcache_all(void) {
<> 139:856d2700e60b 812 __v7_all_cache(0);
<> 139:856d2700e60b 813 }
<> 139:856d2700e60b 814 /** \brief Clean the whole D$
<> 139:856d2700e60b 815
<> 139:856d2700e60b 816 DCCSW. Clean by Set/Way
<> 139:856d2700e60b 817 */
<> 139:856d2700e60b 818
<> 139:856d2700e60b 819 __STATIC_INLINE void __v7_clean_dcache_all(void) {
<> 139:856d2700e60b 820 __v7_all_cache(1);
<> 139:856d2700e60b 821 }
<> 139:856d2700e60b 822
<> 139:856d2700e60b 823 /** \brief Clean and invalidate the whole D$
<> 139:856d2700e60b 824
<> 139:856d2700e60b 825 DCCISW. Clean and Invalidate by Set/Way
<> 139:856d2700e60b 826 */
<> 139:856d2700e60b 827
<> 139:856d2700e60b 828 __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
<> 139:856d2700e60b 829 __v7_all_cache(2);
<> 139:856d2700e60b 830 }
<> 139:856d2700e60b 831 /** \brief Clean and Invalidate D$ by MVA
<> 139:856d2700e60b 832
<> 139:856d2700e60b 833 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
<> 139:856d2700e60b 834 */
<> 139:856d2700e60b 835 __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
<> 139:856d2700e60b 836 __MCR(15, 0, (uint32_t)va, 7, 14, 1);
<> 139:856d2700e60b 837 __DMB();
<> 139:856d2700e60b 838 }
<> 139:856d2700e60b 839
<> 139:856d2700e60b 840 #include "core_ca_mmu.h"
<> 139:856d2700e60b 841
<> 139:856d2700e60b 842 #elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
<> 139:856d2700e60b 843 /* GNU gcc specific functions */
<> 139:856d2700e60b 844
<> 139:856d2700e60b 845 #define MODE_USR 0x10
<> 139:856d2700e60b 846 #define MODE_FIQ 0x11
<> 139:856d2700e60b 847 #define MODE_IRQ 0x12
<> 139:856d2700e60b 848 #define MODE_SVC 0x13
<> 139:856d2700e60b 849 #define MODE_MON 0x16
<> 139:856d2700e60b 850 #define MODE_ABT 0x17
<> 139:856d2700e60b 851 #define MODE_HYP 0x1A
<> 139:856d2700e60b 852 #define MODE_UND 0x1B
<> 139:856d2700e60b 853 #define MODE_SYS 0x1F
<> 139:856d2700e60b 854
<> 139:856d2700e60b 855
<> 139:856d2700e60b 856 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
<> 139:856d2700e60b 857 {
<> 139:856d2700e60b 858 __ASM volatile ("cpsie i");
<> 139:856d2700e60b 859 }
<> 139:856d2700e60b 860
<> 139:856d2700e60b 861 /** \brief Disable IRQ Interrupts
<> 139:856d2700e60b 862
<> 139:856d2700e60b 863 This function disables IRQ interrupts by setting the I-bit in the CPSR.
<> 139:856d2700e60b 864 Can only be executed in Privileged modes.
<> 139:856d2700e60b 865 */
<> 139:856d2700e60b 866 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __disable_irq(void)
<> 139:856d2700e60b 867 {
<> 139:856d2700e60b 868 uint32_t result;
<> 139:856d2700e60b 869
<> 139:856d2700e60b 870 __ASM volatile ("mrs %0, cpsr" : "=r" (result));
<> 139:856d2700e60b 871 __ASM volatile ("cpsid i");
<> 139:856d2700e60b 872 return(result & 0x80);
<> 139:856d2700e60b 873 }
<> 139:856d2700e60b 874
<> 139:856d2700e60b 875
<> 139:856d2700e60b 876 /** \brief Get APSR Register
<> 139:856d2700e60b 877
<> 139:856d2700e60b 878 This function returns the content of the APSR Register.
<> 139:856d2700e60b 879
<> 139:856d2700e60b 880 \return APSR Register value
<> 139:856d2700e60b 881 */
<> 139:856d2700e60b 882 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
<> 139:856d2700e60b 883 {
<> 139:856d2700e60b 884 #if 1
<> 139:856d2700e60b 885 register uint32_t __regAPSR;
<> 139:856d2700e60b 886 __ASM volatile ("mrs %0, apsr" : "=r" (__regAPSR) );
<> 139:856d2700e60b 887 #else
<> 139:856d2700e60b 888 register uint32_t __regAPSR __ASM("apsr");
<> 139:856d2700e60b 889 #endif
<> 139:856d2700e60b 890 return(__regAPSR);
<> 139:856d2700e60b 891 }
<> 139:856d2700e60b 892
<> 139:856d2700e60b 893
<> 139:856d2700e60b 894 /** \brief Get CPSR Register
<> 139:856d2700e60b 895
<> 139:856d2700e60b 896 This function returns the content of the CPSR Register.
<> 139:856d2700e60b 897
<> 139:856d2700e60b 898 \return CPSR Register value
<> 139:856d2700e60b 899 */
<> 139:856d2700e60b 900 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPSR(void)
<> 139:856d2700e60b 901 {
<> 139:856d2700e60b 902 #if 1
<> 139:856d2700e60b 903 register uint32_t __regCPSR;
<> 139:856d2700e60b 904 __ASM volatile ("mrs %0, cpsr" : "=r" (__regCPSR));
<> 139:856d2700e60b 905 #else
<> 139:856d2700e60b 906 register uint32_t __regCPSR __ASM("cpsr");
<> 139:856d2700e60b 907 #endif
<> 139:856d2700e60b 908 return(__regCPSR);
<> 139:856d2700e60b 909 }
<> 139:856d2700e60b 910
<> 139:856d2700e60b 911 #if 0
<> 139:856d2700e60b 912 /** \brief Set Stack Pointer
<> 139:856d2700e60b 913
<> 139:856d2700e60b 914 This function assigns the given value to the current stack pointer.
<> 139:856d2700e60b 915
<> 139:856d2700e60b 916 \param [in] topOfStack Stack Pointer value to set
<> 139:856d2700e60b 917 */
<> 139:856d2700e60b 918 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SP(uint32_t topOfStack)
<> 139:856d2700e60b 919 {
<> 139:856d2700e60b 920 register uint32_t __regSP __ASM("sp");
<> 139:856d2700e60b 921 __regSP = topOfStack;
<> 139:856d2700e60b 922 }
<> 139:856d2700e60b 923 #endif
<> 139:856d2700e60b 924
<> 139:856d2700e60b 925 /** \brief Get link register
<> 139:856d2700e60b 926
<> 139:856d2700e60b 927 This function returns the value of the link register
<> 139:856d2700e60b 928
<> 139:856d2700e60b 929 \return Value of link register
<> 139:856d2700e60b 930 */
<> 139:856d2700e60b 931 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_LR(void)
<> 139:856d2700e60b 932 {
<> 139:856d2700e60b 933 register uint32_t __reglr __ASM("lr");
<> 139:856d2700e60b 934 return(__reglr);
<> 139:856d2700e60b 935 }
<> 139:856d2700e60b 936
<> 139:856d2700e60b 937 #if 0
<> 139:856d2700e60b 938 /** \brief Set link register
<> 139:856d2700e60b 939
<> 139:856d2700e60b 940 This function sets the value of the link register
<> 139:856d2700e60b 941
<> 139:856d2700e60b 942 \param [in] lr LR value to set
<> 139:856d2700e60b 943 */
<> 139:856d2700e60b 944 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_LR(uint32_t lr)
<> 139:856d2700e60b 945 {
<> 139:856d2700e60b 946 register uint32_t __reglr __ASM("lr");
<> 139:856d2700e60b 947 __reglr = lr;
<> 139:856d2700e60b 948 }
<> 139:856d2700e60b 949 #endif
<> 139:856d2700e60b 950
<> 139:856d2700e60b 951 /** \brief Set Process Stack Pointer
<> 139:856d2700e60b 952
<> 139:856d2700e60b 953 This function assigns the given value to the USR/SYS Stack Pointer (PSP).
<> 139:856d2700e60b 954
<> 139:856d2700e60b 955 \param [in] topOfProcStack USR/SYS Stack Pointer value to set
<> 139:856d2700e60b 956 */
<> 139:856d2700e60b 957 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
<> 139:856d2700e60b 958 {
<> 139:856d2700e60b 959 __asm__ volatile (
<> 139:856d2700e60b 960 ".ARM;"
<> 139:856d2700e60b 961 ".eabi_attribute Tag_ABI_align8_preserved,1;"
<> 139:856d2700e60b 962
<> 139:856d2700e60b 963 "BIC R0, R0, #7;" /* ;ensure stack is 8-byte aligned */
<> 139:856d2700e60b 964 "MRS R1, CPSR;"
<> 139:856d2700e60b 965 "CPS %0;" /* ;no effect in USR mode */
<> 139:856d2700e60b 966 "MOV SP, R0;"
<> 139:856d2700e60b 967 "MSR CPSR_c, R1;" /* ;no effect in USR mode */
<> 139:856d2700e60b 968 "ISB;"
<> 139:856d2700e60b 969 //"BX LR;"
<> 139:856d2700e60b 970 :
<> 139:856d2700e60b 971 : "i"(MODE_SYS)
<> 139:856d2700e60b 972 : "r0", "r1");
<> 139:856d2700e60b 973 return;
<> 139:856d2700e60b 974 }
<> 139:856d2700e60b 975
<> 139:856d2700e60b 976 /** \brief Set User Mode
<> 139:856d2700e60b 977
<> 139:856d2700e60b 978 This function changes the processor state to User Mode
<> 139:856d2700e60b 979 */
<> 139:856d2700e60b 980 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPS_USR(void)
<> 139:856d2700e60b 981 {
<> 139:856d2700e60b 982 __asm__ volatile (
<> 139:856d2700e60b 983 ".ARM;"
<> 139:856d2700e60b 984
<> 139:856d2700e60b 985 "CPS %0;"
<> 139:856d2700e60b 986 //"BX LR;"
<> 139:856d2700e60b 987 :
<> 139:856d2700e60b 988 : "i"(MODE_USR)
<> 139:856d2700e60b 989 : );
<> 139:856d2700e60b 990 return;
<> 139:856d2700e60b 991 }
<> 139:856d2700e60b 992
<> 139:856d2700e60b 993
<> 139:856d2700e60b 994 /** \brief Enable FIQ
<> 139:856d2700e60b 995
<> 139:856d2700e60b 996 This function enables FIQ interrupts by clearing the F-bit in the CPSR.
<> 139:856d2700e60b 997 Can only be executed in Privileged modes.
<> 139:856d2700e60b 998 */
<> 139:856d2700e60b 999 #define __enable_fault_irq() __asm__ volatile ("cpsie f")
<> 139:856d2700e60b 1000
<> 139:856d2700e60b 1001
<> 139:856d2700e60b 1002 /** \brief Disable FIQ
<> 139:856d2700e60b 1003
<> 139:856d2700e60b 1004 This function disables FIQ interrupts by setting the F-bit in the CPSR.
<> 139:856d2700e60b 1005 Can only be executed in Privileged modes.
<> 139:856d2700e60b 1006 */
<> 139:856d2700e60b 1007 #define __disable_fault_irq() __asm__ volatile ("cpsid f")
<> 139:856d2700e60b 1008
<> 139:856d2700e60b 1009
<> 139:856d2700e60b 1010 /** \brief Get FPSCR
<> 139:856d2700e60b 1011
<> 139:856d2700e60b 1012 This function returns the current value of the Floating Point Status/Control register.
<> 139:856d2700e60b 1013
<> 139:856d2700e60b 1014 \return Floating Point Status/Control register value
<> 139:856d2700e60b 1015 */
<> 139:856d2700e60b 1016 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
<> 139:856d2700e60b 1017 {
<> 139:856d2700e60b 1018 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
<> 139:856d2700e60b 1019 #if 1
<> 139:856d2700e60b 1020 uint32_t result;
<> 139:856d2700e60b 1021
<> 139:856d2700e60b 1022 __ASM volatile ("vmrs %0, fpscr" : "=r" (result) );
<> 139:856d2700e60b 1023 return (result);
<> 139:856d2700e60b 1024 #else
<> 139:856d2700e60b 1025 register uint32_t __regfpscr __ASM("fpscr");
<> 139:856d2700e60b 1026 return(__regfpscr);
<> 139:856d2700e60b 1027 #endif
<> 139:856d2700e60b 1028 #else
<> 139:856d2700e60b 1029 return(0);
<> 139:856d2700e60b 1030 #endif
<> 139:856d2700e60b 1031 }
<> 139:856d2700e60b 1032
<> 139:856d2700e60b 1033
<> 139:856d2700e60b 1034 /** \brief Set FPSCR
<> 139:856d2700e60b 1035
<> 139:856d2700e60b 1036 This function assigns the given value to the Floating Point Status/Control register.
<> 139:856d2700e60b 1037
<> 139:856d2700e60b 1038 \param [in] fpscr Floating Point Status/Control value to set
<> 139:856d2700e60b 1039 */
<> 139:856d2700e60b 1040 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
<> 139:856d2700e60b 1041 {
<> 139:856d2700e60b 1042 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
<> 139:856d2700e60b 1043 #if 1
<> 139:856d2700e60b 1044 __ASM volatile ("vmsr fpscr, %0" : : "r" (fpscr) );
<> 139:856d2700e60b 1045 #else
<> 139:856d2700e60b 1046 register uint32_t __regfpscr __ASM("fpscr");
<> 139:856d2700e60b 1047 __regfpscr = (fpscr);
<> 139:856d2700e60b 1048 #endif
<> 139:856d2700e60b 1049 #endif
<> 139:856d2700e60b 1050 }
<> 139:856d2700e60b 1051
<> 139:856d2700e60b 1052 /** \brief Get FPEXC
<> 139:856d2700e60b 1053
<> 139:856d2700e60b 1054 This function returns the current value of the Floating Point Exception Control register.
<> 139:856d2700e60b 1055
<> 139:856d2700e60b 1056 \return Floating Point Exception Control register value
<> 139:856d2700e60b 1057 */
<> 139:856d2700e60b 1058 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPEXC(void)
<> 139:856d2700e60b 1059 {
<> 139:856d2700e60b 1060 #if (__FPU_PRESENT == 1)
<> 139:856d2700e60b 1061 #if 1
<> 139:856d2700e60b 1062 uint32_t result;
<> 139:856d2700e60b 1063
<> 139:856d2700e60b 1064 __ASM volatile ("vmrs %0, fpexc" : "=r" (result));
<> 139:856d2700e60b 1065 return (result);
<> 139:856d2700e60b 1066 #else
<> 139:856d2700e60b 1067 register uint32_t __regfpexc __ASM("fpexc");
<> 139:856d2700e60b 1068 return(__regfpexc);
<> 139:856d2700e60b 1069 #endif
<> 139:856d2700e60b 1070 #else
<> 139:856d2700e60b 1071 return(0);
<> 139:856d2700e60b 1072 #endif
<> 139:856d2700e60b 1073 }
<> 139:856d2700e60b 1074
<> 139:856d2700e60b 1075
<> 139:856d2700e60b 1076 /** \brief Set FPEXC
<> 139:856d2700e60b 1077
<> 139:856d2700e60b 1078 This function assigns the given value to the Floating Point Exception Control register.
<> 139:856d2700e60b 1079
<> 139:856d2700e60b 1080 \param [in] fpscr Floating Point Exception Control value to set
<> 139:856d2700e60b 1081 */
<> 139:856d2700e60b 1082 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
<> 139:856d2700e60b 1083 {
<> 139:856d2700e60b 1084 #if (__FPU_PRESENT == 1)
<> 139:856d2700e60b 1085 #if 1
<> 139:856d2700e60b 1086 __ASM volatile ("vmsr fpexc, %0" : : "r" (fpexc));
<> 139:856d2700e60b 1087 #else
<> 139:856d2700e60b 1088 register uint32_t __regfpexc __ASM("fpexc");
<> 139:856d2700e60b 1089 __regfpexc = (fpexc);
<> 139:856d2700e60b 1090 #endif
<> 139:856d2700e60b 1091 #endif
<> 139:856d2700e60b 1092 }
<> 139:856d2700e60b 1093
<> 139:856d2700e60b 1094 /** \brief Get CPACR
<> 139:856d2700e60b 1095
<> 139:856d2700e60b 1096 This function returns the current value of the Coprocessor Access Control register.
<> 139:856d2700e60b 1097
<> 139:856d2700e60b 1098 \return Coprocessor Access Control register value
<> 139:856d2700e60b 1099 */
<> 139:856d2700e60b 1100 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPACR(void)
<> 139:856d2700e60b 1101 {
<> 139:856d2700e60b 1102 #if 1
<> 139:856d2700e60b 1103 register uint32_t __regCPACR;
<> 139:856d2700e60b 1104 __ASM volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (__regCPACR));
<> 139:856d2700e60b 1105 #else
<> 139:856d2700e60b 1106 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
<> 139:856d2700e60b 1107 #endif
<> 139:856d2700e60b 1108 return __regCPACR;
<> 139:856d2700e60b 1109 }
<> 139:856d2700e60b 1110
<> 139:856d2700e60b 1111 /** \brief Set CPACR
<> 139:856d2700e60b 1112
<> 139:856d2700e60b 1113 This function assigns the given value to the Coprocessor Access Control register.
<> 139:856d2700e60b 1114
<> 139:856d2700e60b 1115 \param [in] cpacr Coprocessor Acccess Control value to set
<> 139:856d2700e60b 1116 */
<> 139:856d2700e60b 1117 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
<> 139:856d2700e60b 1118 {
<> 139:856d2700e60b 1119 #if 1
<> 139:856d2700e60b 1120 __ASM volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r" (cpacr));
<> 139:856d2700e60b 1121 #else
<> 139:856d2700e60b 1122 register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
<> 139:856d2700e60b 1123 __regCPACR = cpacr;
<> 139:856d2700e60b 1124 #endif
<> 139:856d2700e60b 1125 __ISB();
<> 139:856d2700e60b 1126 }
<> 139:856d2700e60b 1127
<> 139:856d2700e60b 1128 /** \brief Get CBAR
<> 139:856d2700e60b 1129
<> 139:856d2700e60b 1130 This function returns the value of the Configuration Base Address register.
<> 139:856d2700e60b 1131
<> 139:856d2700e60b 1132 \return Configuration Base Address register value
<> 139:856d2700e60b 1133 */
<> 139:856d2700e60b 1134 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CBAR() {
<> 139:856d2700e60b 1135 #if 1
<> 139:856d2700e60b 1136 register uint32_t __regCBAR;
<> 139:856d2700e60b 1137 __ASM volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (__regCBAR));
<> 139:856d2700e60b 1138 #else
<> 139:856d2700e60b 1139 register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
<> 139:856d2700e60b 1140 #endif
<> 139:856d2700e60b 1141 return(__regCBAR);
<> 139:856d2700e60b 1142 }
<> 139:856d2700e60b 1143
<> 139:856d2700e60b 1144 /** \brief Get TTBR0
<> 139:856d2700e60b 1145
<> 139:856d2700e60b 1146 This function returns the value of the Translation Table Base Register 0.
<> 139:856d2700e60b 1147
<> 139:856d2700e60b 1148 \return Translation Table Base Register 0 value
<> 139:856d2700e60b 1149 */
<> 139:856d2700e60b 1150 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_TTBR0() {
<> 139:856d2700e60b 1151 #if 1
<> 139:856d2700e60b 1152 register uint32_t __regTTBR0;
<> 139:856d2700e60b 1153 __ASM volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (__regTTBR0));
<> 139:856d2700e60b 1154 #else
<> 139:856d2700e60b 1155 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
<> 139:856d2700e60b 1156 #endif
<> 139:856d2700e60b 1157 return(__regTTBR0);
<> 139:856d2700e60b 1158 }
<> 139:856d2700e60b 1159
<> 139:856d2700e60b 1160 /** \brief Set TTBR0
<> 139:856d2700e60b 1161
<> 139:856d2700e60b 1162 This function assigns the given value to the Translation Table Base Register 0.
<> 139:856d2700e60b 1163
<> 139:856d2700e60b 1164 \param [in] ttbr0 Translation Table Base Register 0 value to set
<> 139:856d2700e60b 1165 */
<> 139:856d2700e60b 1166 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
<> 139:856d2700e60b 1167 #if 1
<> 139:856d2700e60b 1168 __ASM volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttbr0));
<> 139:856d2700e60b 1169 #else
<> 139:856d2700e60b 1170 register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
<> 139:856d2700e60b 1171 __regTTBR0 = ttbr0;
<> 139:856d2700e60b 1172 #endif
<> 139:856d2700e60b 1173 __ISB();
<> 139:856d2700e60b 1174 }
<> 139:856d2700e60b 1175
<> 139:856d2700e60b 1176 /** \brief Get DACR
<> 139:856d2700e60b 1177
<> 139:856d2700e60b 1178 This function returns the value of the Domain Access Control Register.
<> 139:856d2700e60b 1179
<> 139:856d2700e60b 1180 \return Domain Access Control Register value
<> 139:856d2700e60b 1181 */
<> 139:856d2700e60b 1182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_DACR() {
<> 139:856d2700e60b 1183 #if 1
<> 139:856d2700e60b 1184 register uint32_t __regDACR;
<> 139:856d2700e60b 1185 __ASM volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (__regDACR));
<> 139:856d2700e60b 1186 #else
<> 139:856d2700e60b 1187 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
<> 139:856d2700e60b 1188 #endif
<> 139:856d2700e60b 1189 return(__regDACR);
<> 139:856d2700e60b 1190 }
<> 139:856d2700e60b 1191
<> 139:856d2700e60b 1192 /** \brief Set DACR
<> 139:856d2700e60b 1193
<> 139:856d2700e60b 1194 This function assigns the given value to the Domain Access Control Register.
<> 139:856d2700e60b 1195
<> 139:856d2700e60b 1196 \param [in] dacr Domain Access Control Register value to set
<> 139:856d2700e60b 1197 */
<> 139:856d2700e60b 1198 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_DACR(uint32_t dacr) {
<> 139:856d2700e60b 1199 #if 1
<> 139:856d2700e60b 1200 __ASM volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r" (dacr));
<> 139:856d2700e60b 1201 #else
<> 139:856d2700e60b 1202 register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
<> 139:856d2700e60b 1203 __regDACR = dacr;
<> 139:856d2700e60b 1204 #endif
<> 139:856d2700e60b 1205 __ISB();
<> 139:856d2700e60b 1206 }
<> 139:856d2700e60b 1207
<> 139:856d2700e60b 1208 /******************************** Cache and BTAC enable ****************************************************/
<> 139:856d2700e60b 1209
<> 139:856d2700e60b 1210 /** \brief Set SCTLR
<> 139:856d2700e60b 1211
<> 139:856d2700e60b 1212 This function assigns the given value to the System Control Register.
<> 139:856d2700e60b 1213
<> 139:856d2700e60b 1214 \param [in] sctlr System Control Register value to set
<> 139:856d2700e60b 1215 */
<> 139:856d2700e60b 1216 __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
<> 139:856d2700e60b 1217 {
<> 139:856d2700e60b 1218 #if 1
<> 139:856d2700e60b 1219 __ASM volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
<> 139:856d2700e60b 1220 #else
<> 139:856d2700e60b 1221 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
<> 139:856d2700e60b 1222 __regSCTLR = sctlr;
<> 139:856d2700e60b 1223 #endif
<> 139:856d2700e60b 1224 }
<> 139:856d2700e60b 1225
<> 139:856d2700e60b 1226 /** \brief Get SCTLR
<> 139:856d2700e60b 1227
<> 139:856d2700e60b 1228 This function returns the value of the System Control Register.
<> 139:856d2700e60b 1229
<> 139:856d2700e60b 1230 \return System Control Register value
<> 139:856d2700e60b 1231 */
<> 139:856d2700e60b 1232 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_SCTLR() {
<> 139:856d2700e60b 1233 #if 1
<> 139:856d2700e60b 1234 register uint32_t __regSCTLR;
<> 139:856d2700e60b 1235 __ASM volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (__regSCTLR));
<> 139:856d2700e60b 1236 #else
<> 139:856d2700e60b 1237 register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
<> 139:856d2700e60b 1238 #endif
<> 139:856d2700e60b 1239 return(__regSCTLR);
<> 139:856d2700e60b 1240 }
<> 139:856d2700e60b 1241
<> 139:856d2700e60b 1242 /** \brief Enable Caches
<> 139:856d2700e60b 1243
<> 139:856d2700e60b 1244 Enable Caches
<> 139:856d2700e60b 1245 */
<> 139:856d2700e60b 1246 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_caches(void) {
<> 139:856d2700e60b 1247 // Set I bit 12 to enable I Cache
<> 139:856d2700e60b 1248 // Set C bit 2 to enable D Cache
<> 139:856d2700e60b 1249 __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
<> 139:856d2700e60b 1250 }
<> 139:856d2700e60b 1251
<> 139:856d2700e60b 1252 /** \brief Disable Caches
<> 139:856d2700e60b 1253
<> 139:856d2700e60b 1254 Disable Caches
<> 139:856d2700e60b 1255 */
<> 139:856d2700e60b 1256 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_caches(void) {
<> 139:856d2700e60b 1257 // Clear I bit 12 to disable I Cache
<> 139:856d2700e60b 1258 // Clear C bit 2 to disable D Cache
<> 139:856d2700e60b 1259 __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
<> 139:856d2700e60b 1260 __ISB();
<> 139:856d2700e60b 1261 }
<> 139:856d2700e60b 1262
<> 139:856d2700e60b 1263 /** \brief Enable BTAC
<> 139:856d2700e60b 1264
<> 139:856d2700e60b 1265 Enable BTAC
<> 139:856d2700e60b 1266 */
<> 139:856d2700e60b 1267 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_btac(void) {
<> 139:856d2700e60b 1268 // Set Z bit 11 to enable branch prediction
<> 139:856d2700e60b 1269 __set_SCTLR( __get_SCTLR() | (1 << 11));
<> 139:856d2700e60b 1270 __ISB();
<> 139:856d2700e60b 1271 }
<> 139:856d2700e60b 1272
<> 139:856d2700e60b 1273 /** \brief Disable BTAC
<> 139:856d2700e60b 1274
<> 139:856d2700e60b 1275 Disable BTAC
<> 139:856d2700e60b 1276 */
<> 139:856d2700e60b 1277 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_btac(void) {
<> 139:856d2700e60b 1278 // Clear Z bit 11 to disable branch prediction
<> 139:856d2700e60b 1279 __set_SCTLR( __get_SCTLR() & ~(1 << 11));
<> 139:856d2700e60b 1280 }
<> 139:856d2700e60b 1281
<> 139:856d2700e60b 1282
<> 139:856d2700e60b 1283 /** \brief Enable MMU
<> 139:856d2700e60b 1284
<> 139:856d2700e60b 1285 Enable MMU
<> 139:856d2700e60b 1286 */
<> 139:856d2700e60b 1287 __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_mmu(void) {
<> 139:856d2700e60b 1288 // Set M bit 0 to enable the MMU
<> 139:856d2700e60b 1289 // Set AFE bit to enable simplified access permissions model
<> 139:856d2700e60b 1290 // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
<> 139:856d2700e60b 1291 __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
<> 139:856d2700e60b 1292 __ISB();
<> 139:856d2700e60b 1293 }
<> 139:856d2700e60b 1294
<> 139:856d2700e60b 1295 /** \brief Disable MMU
<> 139:856d2700e60b 1296
<> 139:856d2700e60b 1297 Disable MMU
<> 139:856d2700e60b 1298 */
<> 139:856d2700e60b 1299 __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_mmu(void) {
<> 139:856d2700e60b 1300 // Clear M bit 0 to disable the MMU
<> 139:856d2700e60b 1301 __set_SCTLR( __get_SCTLR() & ~1);
<> 139:856d2700e60b 1302 __ISB();
<> 139:856d2700e60b 1303 }
<> 139:856d2700e60b 1304
<> 139:856d2700e60b 1305 /******************************** TLB maintenance operations ************************************************/
<> 139:856d2700e60b 1306 /** \brief Invalidate the whole tlb
<> 139:856d2700e60b 1307
<> 139:856d2700e60b 1308 TLBIALL. Invalidate the whole tlb
<> 139:856d2700e60b 1309 */
<> 139:856d2700e60b 1310
<> 139:856d2700e60b 1311 __attribute__( ( always_inline ) ) __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
<> 139:856d2700e60b 1312 #if 1
<> 139:856d2700e60b 1313 __ASM volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
<> 139:856d2700e60b 1314 #else
<> 139:856d2700e60b 1315 register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
<> 139:856d2700e60b 1316 __TLBIALL = 0;
<> 139:856d2700e60b 1317 #endif
<> 139:856d2700e60b 1318 __DSB();
<> 139:856d2700e60b 1319 __ISB();
<> 139:856d2700e60b 1320 }
<> 139:856d2700e60b 1321
<> 139:856d2700e60b 1322 /******************************** BTB maintenance operations ************************************************/
<> 139:856d2700e60b 1323 /** \brief Invalidate entire branch predictor array
<> 139:856d2700e60b 1324
<> 139:856d2700e60b 1325 BPIALL. Branch Predictor Invalidate All.
<> 139:856d2700e60b 1326 */
<> 139:856d2700e60b 1327
<> 139:856d2700e60b 1328 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_btac(void) {
<> 139:856d2700e60b 1329 #if 1
<> 139:856d2700e60b 1330 __ASM volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
<> 139:856d2700e60b 1331 #else
<> 139:856d2700e60b 1332 register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
<> 139:856d2700e60b 1333 __BPIALL = 0;
<> 139:856d2700e60b 1334 #endif
<> 139:856d2700e60b 1335 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 1336 __ISB(); //ensure instruction fetch path sees new state
<> 139:856d2700e60b 1337 }
<> 139:856d2700e60b 1338
<> 139:856d2700e60b 1339
<> 139:856d2700e60b 1340 /******************************** L1 cache operations ******************************************************/
<> 139:856d2700e60b 1341
<> 139:856d2700e60b 1342 /** \brief Invalidate the whole I$
<> 139:856d2700e60b 1343
<> 139:856d2700e60b 1344 ICIALLU. Instruction Cache Invalidate All to PoU
<> 139:856d2700e60b 1345 */
<> 139:856d2700e60b 1346 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_icache_all(void) {
<> 139:856d2700e60b 1347 #if 1
<> 139:856d2700e60b 1348 __ASM volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
<> 139:856d2700e60b 1349 #else
<> 139:856d2700e60b 1350 register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
<> 139:856d2700e60b 1351 __ICIALLU = 0;
<> 139:856d2700e60b 1352 #endif
<> 139:856d2700e60b 1353 __DSB(); //ensure completion of the invalidation
<> 139:856d2700e60b 1354 __ISB(); //ensure instruction fetch path sees new I cache state
<> 139:856d2700e60b 1355 }
<> 139:856d2700e60b 1356
<> 139:856d2700e60b 1357 /** \brief Clean D$ by MVA
<> 139:856d2700e60b 1358
<> 139:856d2700e60b 1359 DCCMVAC. Data cache clean by MVA to PoC
<> 139:856d2700e60b 1360 */
<> 139:856d2700e60b 1361 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
<> 139:856d2700e60b 1362 #if 1
<> 139:856d2700e60b 1363 __ASM volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((uint32_t)va));
<> 139:856d2700e60b 1364 #else
<> 139:856d2700e60b 1365 register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
<> 139:856d2700e60b 1366 __DCCMVAC = (uint32_t)va;
<> 139:856d2700e60b 1367 #endif
<> 139:856d2700e60b 1368 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 1369 }
<> 139:856d2700e60b 1370
<> 139:856d2700e60b 1371 /** \brief Invalidate D$ by MVA
<> 139:856d2700e60b 1372
<> 139:856d2700e60b 1373 DCIMVAC. Data cache invalidate by MVA to PoC
<> 139:856d2700e60b 1374 */
<> 139:856d2700e60b 1375 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
<> 139:856d2700e60b 1376 #if 1
<> 139:856d2700e60b 1377 __ASM volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" ((uint32_t)va));
<> 139:856d2700e60b 1378 #else
<> 139:856d2700e60b 1379 register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
<> 139:856d2700e60b 1380 __DCIMVAC = (uint32_t)va;
<> 139:856d2700e60b 1381 #endif
<> 139:856d2700e60b 1382 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 1383 }
<> 139:856d2700e60b 1384
<> 139:856d2700e60b 1385 /** \brief Clean and Invalidate D$ by MVA
<> 139:856d2700e60b 1386
<> 139:856d2700e60b 1387 DCCIMVAC. Data cache clean and invalidate by MVA to PoC
<> 139:856d2700e60b 1388 */
<> 139:856d2700e60b 1389 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
<> 139:856d2700e60b 1390 #if 1
<> 139:856d2700e60b 1391 __ASM volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" ((uint32_t)va));
<> 139:856d2700e60b 1392 #else
<> 139:856d2700e60b 1393 register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
<> 139:856d2700e60b 1394 __DCCIMVAC = (uint32_t)va;
<> 139:856d2700e60b 1395 #endif
<> 139:856d2700e60b 1396 __DMB(); //ensure the ordering of data cache maintenance operations and their effects
<> 139:856d2700e60b 1397 }
<> 139:856d2700e60b 1398
<> 139:856d2700e60b 1399 /** \brief Clean and Invalidate the entire data or unified cache
<> 139:856d2700e60b 1400
<> 139:856d2700e60b 1401 Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
<> 139:856d2700e60b 1402 */
<> 139:856d2700e60b 1403 extern void __v7_all_cache(uint32_t op);
<> 139:856d2700e60b 1404
<> 139:856d2700e60b 1405
<> 139:856d2700e60b 1406 /** \brief Invalidate the whole D$
<> 139:856d2700e60b 1407
<> 139:856d2700e60b 1408 DCISW. Invalidate by Set/Way
<> 139:856d2700e60b 1409 */
<> 139:856d2700e60b 1410
<> 139:856d2700e60b 1411 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_all(void) {
<> 139:856d2700e60b 1412 __v7_all_cache(0);
<> 139:856d2700e60b 1413 }
<> 139:856d2700e60b 1414
<> 139:856d2700e60b 1415 /** \brief Clean the whole D$
<> 139:856d2700e60b 1416
<> 139:856d2700e60b 1417 DCCSW. Clean by Set/Way
<> 139:856d2700e60b 1418 */
<> 139:856d2700e60b 1419
<> 139:856d2700e60b 1420 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_all(void) {
<> 139:856d2700e60b 1421 __v7_all_cache(1);
<> 139:856d2700e60b 1422 }
<> 139:856d2700e60b 1423
<> 139:856d2700e60b 1424 /** \brief Clean and invalidate the whole D$
<> 139:856d2700e60b 1425
<> 139:856d2700e60b 1426 DCCISW. Clean and Invalidate by Set/Way
<> 139:856d2700e60b 1427 */
<> 139:856d2700e60b 1428
<> 139:856d2700e60b 1429 __attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
<> 139:856d2700e60b 1430 __v7_all_cache(2);
<> 139:856d2700e60b 1431 }
<> 139:856d2700e60b 1432
<> 139:856d2700e60b 1433 #include "core_ca_mmu.h"
<> 139:856d2700e60b 1434
<> 139:856d2700e60b 1435 #elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/
<> 139:856d2700e60b 1436
<> 139:856d2700e60b 1437 #error TASKING Compiler support not implemented for Cortex-A
<> 139:856d2700e60b 1438
<> 139:856d2700e60b 1439 #endif
<> 139:856d2700e60b 1440
<> 139:856d2700e60b 1441 /*@} end of CMSIS_Core_RegAccFunctions */
<> 139:856d2700e60b 1442
<> 139:856d2700e60b 1443
<> 139:856d2700e60b 1444 #endif /* __CORE_CAFUNC_H__ */