The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
<>
Date:
Wed Apr 12 16:07:08 2017 +0100
Revision:
140:97feb9bacc10
Release 140 of the mbed library

Ports for Upcoming Targets

3841: Add nRf52840 target https://github.com/ARMmbed/mbed-os/pull/3841
3992: Introducing UBLOX_C030 platform. https://github.com/ARMmbed/mbed-os/pull/3992

Fixes and Changes

3951: [NUCLEO_F303ZE] Correct ARDUINO pin https://github.com/ARMmbed/mbed-os/pull/3951
4021: Fixing a macro to detect when RTOS was in use for the NRF52840_DK https://github.com/ARMmbed/mbed-os/pull/4021
3979: KW24D: Add missing SPI defines and Arduino connector definitions https://github.com/ARMmbed/mbed-os/pull/3979
3990: UBLOX_C027: construct a ticker-based wait, rather than calling wait_ms(), in the https://github.com/ARMmbed/mbed-os/pull/3990
4003: Fixed OBOE in async serial tx for NRF52 target, fixes #4002 https://github.com/ARMmbed/mbed-os/pull/4003
4012: STM32: Correct I2C master error handling https://github.com/ARMmbed/mbed-os/pull/4012
4020: NUCLEO_L011K4 remove unsupported tool chain files https://github.com/ARMmbed/mbed-os/pull/4020
4065: K66F: Move bss section to m_data_2 Section https://github.com/ARMmbed/mbed-os/pull/4065
4014: Issue 3763: Reduce heap allocation in the GCC linker file https://github.com/ARMmbed/mbed-os/pull/4014
4030: [STM32L0] reduce IAR heap and stack size for small targets https://github.com/ARMmbed/mbed-os/pull/4030
4109: NUCLEO_L476RG : minor serial pin update https://github.com/ARMmbed/mbed-os/pull/4109
3982: Ticker - kl25z bugfix for handling events in the past https://github.com/ARMmbed/mbed-os/pull/3982

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 140:97feb9bacc10 1 /**************************************************************************//**
<> 140:97feb9bacc10 2 * @file core_cm4_simd.h
<> 140:97feb9bacc10 3 * @brief CMSIS Cortex-M4 SIMD Header File
<> 140:97feb9bacc10 4 * @version V3.20
<> 140:97feb9bacc10 5 * @date 25. February 2013
<> 140:97feb9bacc10 6 *
<> 140:97feb9bacc10 7 * @note
<> 140:97feb9bacc10 8 *
<> 140:97feb9bacc10 9 ******************************************************************************/
<> 140:97feb9bacc10 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
<> 140:97feb9bacc10 11
<> 140:97feb9bacc10 12 All rights reserved.
<> 140:97feb9bacc10 13 Redistribution and use in source and binary forms, with or without
<> 140:97feb9bacc10 14 modification, are permitted provided that the following conditions are met:
<> 140:97feb9bacc10 15 - Redistributions of source code must retain the above copyright
<> 140:97feb9bacc10 16 notice, this list of conditions and the following disclaimer.
<> 140:97feb9bacc10 17 - Redistributions in binary form must reproduce the above copyright
<> 140:97feb9bacc10 18 notice, this list of conditions and the following disclaimer in the
<> 140:97feb9bacc10 19 documentation and/or other materials provided with the distribution.
<> 140:97feb9bacc10 20 - Neither the name of ARM nor the names of its contributors may be used
<> 140:97feb9bacc10 21 to endorse or promote products derived from this software without
<> 140:97feb9bacc10 22 specific prior written permission.
<> 140:97feb9bacc10 23 *
<> 140:97feb9bacc10 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
<> 140:97feb9bacc10 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
<> 140:97feb9bacc10 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<> 140:97feb9bacc10 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
<> 140:97feb9bacc10 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
<> 140:97feb9bacc10 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
<> 140:97feb9bacc10 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
<> 140:97feb9bacc10 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
<> 140:97feb9bacc10 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
<> 140:97feb9bacc10 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
<> 140:97feb9bacc10 34 POSSIBILITY OF SUCH DAMAGE.
<> 140:97feb9bacc10 35 ---------------------------------------------------------------------------*/
<> 140:97feb9bacc10 36
<> 140:97feb9bacc10 37
<> 140:97feb9bacc10 38 #ifdef __cplusplus
<> 140:97feb9bacc10 39 extern "C" {
<> 140:97feb9bacc10 40 #endif
<> 140:97feb9bacc10 41
<> 140:97feb9bacc10 42 #ifndef __CORE_CM4_SIMD_H
<> 140:97feb9bacc10 43 #define __CORE_CM4_SIMD_H
<> 140:97feb9bacc10 44
<> 140:97feb9bacc10 45
<> 140:97feb9bacc10 46 /*******************************************************************************
<> 140:97feb9bacc10 47 * Hardware Abstraction Layer
<> 140:97feb9bacc10 48 ******************************************************************************/
<> 140:97feb9bacc10 49
<> 140:97feb9bacc10 50
<> 140:97feb9bacc10 51 /* ################### Compiler specific Intrinsics ########################### */
<> 140:97feb9bacc10 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
<> 140:97feb9bacc10 53 Access to dedicated SIMD instructions
<> 140:97feb9bacc10 54 @{
<> 140:97feb9bacc10 55 */
<> 140:97feb9bacc10 56
<> 140:97feb9bacc10 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
<> 140:97feb9bacc10 58 /* ARM armcc specific functions */
<> 140:97feb9bacc10 59
<> 140:97feb9bacc10 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 61 #define __SADD8 __sadd8
<> 140:97feb9bacc10 62 #define __QADD8 __qadd8
<> 140:97feb9bacc10 63 #define __SHADD8 __shadd8
<> 140:97feb9bacc10 64 #define __UADD8 __uadd8
<> 140:97feb9bacc10 65 #define __UQADD8 __uqadd8
<> 140:97feb9bacc10 66 #define __UHADD8 __uhadd8
<> 140:97feb9bacc10 67 #define __SSUB8 __ssub8
<> 140:97feb9bacc10 68 #define __QSUB8 __qsub8
<> 140:97feb9bacc10 69 #define __SHSUB8 __shsub8
<> 140:97feb9bacc10 70 #define __USUB8 __usub8
<> 140:97feb9bacc10 71 #define __UQSUB8 __uqsub8
<> 140:97feb9bacc10 72 #define __UHSUB8 __uhsub8
<> 140:97feb9bacc10 73 #define __SADD16 __sadd16
<> 140:97feb9bacc10 74 #define __QADD16 __qadd16
<> 140:97feb9bacc10 75 #define __SHADD16 __shadd16
<> 140:97feb9bacc10 76 #define __UADD16 __uadd16
<> 140:97feb9bacc10 77 #define __UQADD16 __uqadd16
<> 140:97feb9bacc10 78 #define __UHADD16 __uhadd16
<> 140:97feb9bacc10 79 #define __SSUB16 __ssub16
<> 140:97feb9bacc10 80 #define __QSUB16 __qsub16
<> 140:97feb9bacc10 81 #define __SHSUB16 __shsub16
<> 140:97feb9bacc10 82 #define __USUB16 __usub16
<> 140:97feb9bacc10 83 #define __UQSUB16 __uqsub16
<> 140:97feb9bacc10 84 #define __UHSUB16 __uhsub16
<> 140:97feb9bacc10 85 #define __SASX __sasx
<> 140:97feb9bacc10 86 #define __QASX __qasx
<> 140:97feb9bacc10 87 #define __SHASX __shasx
<> 140:97feb9bacc10 88 #define __UASX __uasx
<> 140:97feb9bacc10 89 #define __UQASX __uqasx
<> 140:97feb9bacc10 90 #define __UHASX __uhasx
<> 140:97feb9bacc10 91 #define __SSAX __ssax
<> 140:97feb9bacc10 92 #define __QSAX __qsax
<> 140:97feb9bacc10 93 #define __SHSAX __shsax
<> 140:97feb9bacc10 94 #define __USAX __usax
<> 140:97feb9bacc10 95 #define __UQSAX __uqsax
<> 140:97feb9bacc10 96 #define __UHSAX __uhsax
<> 140:97feb9bacc10 97 #define __USAD8 __usad8
<> 140:97feb9bacc10 98 #define __USADA8 __usada8
<> 140:97feb9bacc10 99 #define __SSAT16 __ssat16
<> 140:97feb9bacc10 100 #define __USAT16 __usat16
<> 140:97feb9bacc10 101 #define __UXTB16 __uxtb16
<> 140:97feb9bacc10 102 #define __UXTAB16 __uxtab16
<> 140:97feb9bacc10 103 #define __SXTB16 __sxtb16
<> 140:97feb9bacc10 104 #define __SXTAB16 __sxtab16
<> 140:97feb9bacc10 105 #define __SMUAD __smuad
<> 140:97feb9bacc10 106 #define __SMUADX __smuadx
<> 140:97feb9bacc10 107 #define __SMLAD __smlad
<> 140:97feb9bacc10 108 #define __SMLADX __smladx
<> 140:97feb9bacc10 109 #define __SMLALD __smlald
<> 140:97feb9bacc10 110 #define __SMLALDX __smlaldx
<> 140:97feb9bacc10 111 #define __SMUSD __smusd
<> 140:97feb9bacc10 112 #define __SMUSDX __smusdx
<> 140:97feb9bacc10 113 #define __SMLSD __smlsd
<> 140:97feb9bacc10 114 #define __SMLSDX __smlsdx
<> 140:97feb9bacc10 115 #define __SMLSLD __smlsld
<> 140:97feb9bacc10 116 #define __SMLSLDX __smlsldx
<> 140:97feb9bacc10 117 #define __SEL __sel
<> 140:97feb9bacc10 118 #define __QADD __qadd
<> 140:97feb9bacc10 119 #define __QSUB __qsub
<> 140:97feb9bacc10 120
<> 140:97feb9bacc10 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
<> 140:97feb9bacc10 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
<> 140:97feb9bacc10 123
<> 140:97feb9bacc10 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
<> 140:97feb9bacc10 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
<> 140:97feb9bacc10 126
<> 140:97feb9bacc10 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
<> 140:97feb9bacc10 128 ((int64_t)(ARG3) << 32) ) >> 32))
<> 140:97feb9bacc10 129
<> 140:97feb9bacc10 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 131
<> 140:97feb9bacc10 132
<> 140:97feb9bacc10 133
<> 140:97feb9bacc10 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
<> 140:97feb9bacc10 135 /* IAR iccarm specific functions */
<> 140:97feb9bacc10 136
<> 140:97feb9bacc10 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 138 #include <cmsis_iar.h>
<> 140:97feb9bacc10 139
<> 140:97feb9bacc10 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 141
<> 140:97feb9bacc10 142
<> 140:97feb9bacc10 143
<> 140:97feb9bacc10 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
<> 140:97feb9bacc10 145 /* TI CCS specific functions */
<> 140:97feb9bacc10 146
<> 140:97feb9bacc10 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 148 #include <cmsis_ccs.h>
<> 140:97feb9bacc10 149
<> 140:97feb9bacc10 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 151
<> 140:97feb9bacc10 152
<> 140:97feb9bacc10 153
<> 140:97feb9bacc10 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
<> 140:97feb9bacc10 155 /* GNU gcc specific functions */
<> 140:97feb9bacc10 156
<> 140:97feb9bacc10 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 159 {
<> 140:97feb9bacc10 160 uint32_t result;
<> 140:97feb9bacc10 161
<> 140:97feb9bacc10 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 163 return(result);
<> 140:97feb9bacc10 164 }
<> 140:97feb9bacc10 165
<> 140:97feb9bacc10 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 167 {
<> 140:97feb9bacc10 168 uint32_t result;
<> 140:97feb9bacc10 169
<> 140:97feb9bacc10 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 171 return(result);
<> 140:97feb9bacc10 172 }
<> 140:97feb9bacc10 173
<> 140:97feb9bacc10 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 175 {
<> 140:97feb9bacc10 176 uint32_t result;
<> 140:97feb9bacc10 177
<> 140:97feb9bacc10 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 179 return(result);
<> 140:97feb9bacc10 180 }
<> 140:97feb9bacc10 181
<> 140:97feb9bacc10 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 183 {
<> 140:97feb9bacc10 184 uint32_t result;
<> 140:97feb9bacc10 185
<> 140:97feb9bacc10 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 187 return(result);
<> 140:97feb9bacc10 188 }
<> 140:97feb9bacc10 189
<> 140:97feb9bacc10 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 191 {
<> 140:97feb9bacc10 192 uint32_t result;
<> 140:97feb9bacc10 193
<> 140:97feb9bacc10 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 195 return(result);
<> 140:97feb9bacc10 196 }
<> 140:97feb9bacc10 197
<> 140:97feb9bacc10 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 199 {
<> 140:97feb9bacc10 200 uint32_t result;
<> 140:97feb9bacc10 201
<> 140:97feb9bacc10 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 203 return(result);
<> 140:97feb9bacc10 204 }
<> 140:97feb9bacc10 205
<> 140:97feb9bacc10 206
<> 140:97feb9bacc10 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 208 {
<> 140:97feb9bacc10 209 uint32_t result;
<> 140:97feb9bacc10 210
<> 140:97feb9bacc10 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 212 return(result);
<> 140:97feb9bacc10 213 }
<> 140:97feb9bacc10 214
<> 140:97feb9bacc10 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 216 {
<> 140:97feb9bacc10 217 uint32_t result;
<> 140:97feb9bacc10 218
<> 140:97feb9bacc10 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 220 return(result);
<> 140:97feb9bacc10 221 }
<> 140:97feb9bacc10 222
<> 140:97feb9bacc10 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 224 {
<> 140:97feb9bacc10 225 uint32_t result;
<> 140:97feb9bacc10 226
<> 140:97feb9bacc10 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 228 return(result);
<> 140:97feb9bacc10 229 }
<> 140:97feb9bacc10 230
<> 140:97feb9bacc10 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 232 {
<> 140:97feb9bacc10 233 uint32_t result;
<> 140:97feb9bacc10 234
<> 140:97feb9bacc10 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 236 return(result);
<> 140:97feb9bacc10 237 }
<> 140:97feb9bacc10 238
<> 140:97feb9bacc10 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 240 {
<> 140:97feb9bacc10 241 uint32_t result;
<> 140:97feb9bacc10 242
<> 140:97feb9bacc10 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 244 return(result);
<> 140:97feb9bacc10 245 }
<> 140:97feb9bacc10 246
<> 140:97feb9bacc10 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 248 {
<> 140:97feb9bacc10 249 uint32_t result;
<> 140:97feb9bacc10 250
<> 140:97feb9bacc10 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 252 return(result);
<> 140:97feb9bacc10 253 }
<> 140:97feb9bacc10 254
<> 140:97feb9bacc10 255
<> 140:97feb9bacc10 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 257 {
<> 140:97feb9bacc10 258 uint32_t result;
<> 140:97feb9bacc10 259
<> 140:97feb9bacc10 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 261 return(result);
<> 140:97feb9bacc10 262 }
<> 140:97feb9bacc10 263
<> 140:97feb9bacc10 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 265 {
<> 140:97feb9bacc10 266 uint32_t result;
<> 140:97feb9bacc10 267
<> 140:97feb9bacc10 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 269 return(result);
<> 140:97feb9bacc10 270 }
<> 140:97feb9bacc10 271
<> 140:97feb9bacc10 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 273 {
<> 140:97feb9bacc10 274 uint32_t result;
<> 140:97feb9bacc10 275
<> 140:97feb9bacc10 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 277 return(result);
<> 140:97feb9bacc10 278 }
<> 140:97feb9bacc10 279
<> 140:97feb9bacc10 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 281 {
<> 140:97feb9bacc10 282 uint32_t result;
<> 140:97feb9bacc10 283
<> 140:97feb9bacc10 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 285 return(result);
<> 140:97feb9bacc10 286 }
<> 140:97feb9bacc10 287
<> 140:97feb9bacc10 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 289 {
<> 140:97feb9bacc10 290 uint32_t result;
<> 140:97feb9bacc10 291
<> 140:97feb9bacc10 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 293 return(result);
<> 140:97feb9bacc10 294 }
<> 140:97feb9bacc10 295
<> 140:97feb9bacc10 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 297 {
<> 140:97feb9bacc10 298 uint32_t result;
<> 140:97feb9bacc10 299
<> 140:97feb9bacc10 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 301 return(result);
<> 140:97feb9bacc10 302 }
<> 140:97feb9bacc10 303
<> 140:97feb9bacc10 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 305 {
<> 140:97feb9bacc10 306 uint32_t result;
<> 140:97feb9bacc10 307
<> 140:97feb9bacc10 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 309 return(result);
<> 140:97feb9bacc10 310 }
<> 140:97feb9bacc10 311
<> 140:97feb9bacc10 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 313 {
<> 140:97feb9bacc10 314 uint32_t result;
<> 140:97feb9bacc10 315
<> 140:97feb9bacc10 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 317 return(result);
<> 140:97feb9bacc10 318 }
<> 140:97feb9bacc10 319
<> 140:97feb9bacc10 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 321 {
<> 140:97feb9bacc10 322 uint32_t result;
<> 140:97feb9bacc10 323
<> 140:97feb9bacc10 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 325 return(result);
<> 140:97feb9bacc10 326 }
<> 140:97feb9bacc10 327
<> 140:97feb9bacc10 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 329 {
<> 140:97feb9bacc10 330 uint32_t result;
<> 140:97feb9bacc10 331
<> 140:97feb9bacc10 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 333 return(result);
<> 140:97feb9bacc10 334 }
<> 140:97feb9bacc10 335
<> 140:97feb9bacc10 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 337 {
<> 140:97feb9bacc10 338 uint32_t result;
<> 140:97feb9bacc10 339
<> 140:97feb9bacc10 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 341 return(result);
<> 140:97feb9bacc10 342 }
<> 140:97feb9bacc10 343
<> 140:97feb9bacc10 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 345 {
<> 140:97feb9bacc10 346 uint32_t result;
<> 140:97feb9bacc10 347
<> 140:97feb9bacc10 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 349 return(result);
<> 140:97feb9bacc10 350 }
<> 140:97feb9bacc10 351
<> 140:97feb9bacc10 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 353 {
<> 140:97feb9bacc10 354 uint32_t result;
<> 140:97feb9bacc10 355
<> 140:97feb9bacc10 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 357 return(result);
<> 140:97feb9bacc10 358 }
<> 140:97feb9bacc10 359
<> 140:97feb9bacc10 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 361 {
<> 140:97feb9bacc10 362 uint32_t result;
<> 140:97feb9bacc10 363
<> 140:97feb9bacc10 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 365 return(result);
<> 140:97feb9bacc10 366 }
<> 140:97feb9bacc10 367
<> 140:97feb9bacc10 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 369 {
<> 140:97feb9bacc10 370 uint32_t result;
<> 140:97feb9bacc10 371
<> 140:97feb9bacc10 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 373 return(result);
<> 140:97feb9bacc10 374 }
<> 140:97feb9bacc10 375
<> 140:97feb9bacc10 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 377 {
<> 140:97feb9bacc10 378 uint32_t result;
<> 140:97feb9bacc10 379
<> 140:97feb9bacc10 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 381 return(result);
<> 140:97feb9bacc10 382 }
<> 140:97feb9bacc10 383
<> 140:97feb9bacc10 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 385 {
<> 140:97feb9bacc10 386 uint32_t result;
<> 140:97feb9bacc10 387
<> 140:97feb9bacc10 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 389 return(result);
<> 140:97feb9bacc10 390 }
<> 140:97feb9bacc10 391
<> 140:97feb9bacc10 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 393 {
<> 140:97feb9bacc10 394 uint32_t result;
<> 140:97feb9bacc10 395
<> 140:97feb9bacc10 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 397 return(result);
<> 140:97feb9bacc10 398 }
<> 140:97feb9bacc10 399
<> 140:97feb9bacc10 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 401 {
<> 140:97feb9bacc10 402 uint32_t result;
<> 140:97feb9bacc10 403
<> 140:97feb9bacc10 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 405 return(result);
<> 140:97feb9bacc10 406 }
<> 140:97feb9bacc10 407
<> 140:97feb9bacc10 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 409 {
<> 140:97feb9bacc10 410 uint32_t result;
<> 140:97feb9bacc10 411
<> 140:97feb9bacc10 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 413 return(result);
<> 140:97feb9bacc10 414 }
<> 140:97feb9bacc10 415
<> 140:97feb9bacc10 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 417 {
<> 140:97feb9bacc10 418 uint32_t result;
<> 140:97feb9bacc10 419
<> 140:97feb9bacc10 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 421 return(result);
<> 140:97feb9bacc10 422 }
<> 140:97feb9bacc10 423
<> 140:97feb9bacc10 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 425 {
<> 140:97feb9bacc10 426 uint32_t result;
<> 140:97feb9bacc10 427
<> 140:97feb9bacc10 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 429 return(result);
<> 140:97feb9bacc10 430 }
<> 140:97feb9bacc10 431
<> 140:97feb9bacc10 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 433 {
<> 140:97feb9bacc10 434 uint32_t result;
<> 140:97feb9bacc10 435
<> 140:97feb9bacc10 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 437 return(result);
<> 140:97feb9bacc10 438 }
<> 140:97feb9bacc10 439
<> 140:97feb9bacc10 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 441 {
<> 140:97feb9bacc10 442 uint32_t result;
<> 140:97feb9bacc10 443
<> 140:97feb9bacc10 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 445 return(result);
<> 140:97feb9bacc10 446 }
<> 140:97feb9bacc10 447
<> 140:97feb9bacc10 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 449 {
<> 140:97feb9bacc10 450 uint32_t result;
<> 140:97feb9bacc10 451
<> 140:97feb9bacc10 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 453 return(result);
<> 140:97feb9bacc10 454 }
<> 140:97feb9bacc10 455
<> 140:97feb9bacc10 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
<> 140:97feb9bacc10 457 {
<> 140:97feb9bacc10 458 uint32_t result;
<> 140:97feb9bacc10 459
<> 140:97feb9bacc10 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 461 return(result);
<> 140:97feb9bacc10 462 }
<> 140:97feb9bacc10 463
<> 140:97feb9bacc10 464 #define __SSAT16(ARG1,ARG2) \
<> 140:97feb9bacc10 465 ({ \
<> 140:97feb9bacc10 466 uint32_t __RES, __ARG1 = (ARG1); \
<> 140:97feb9bacc10 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 140:97feb9bacc10 468 __RES; \
<> 140:97feb9bacc10 469 })
<> 140:97feb9bacc10 470
<> 140:97feb9bacc10 471 #define __USAT16(ARG1,ARG2) \
<> 140:97feb9bacc10 472 ({ \
<> 140:97feb9bacc10 473 uint32_t __RES, __ARG1 = (ARG1); \
<> 140:97feb9bacc10 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 140:97feb9bacc10 475 __RES; \
<> 140:97feb9bacc10 476 })
<> 140:97feb9bacc10 477
<> 140:97feb9bacc10 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
<> 140:97feb9bacc10 479 {
<> 140:97feb9bacc10 480 uint32_t result;
<> 140:97feb9bacc10 481
<> 140:97feb9bacc10 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 140:97feb9bacc10 483 return(result);
<> 140:97feb9bacc10 484 }
<> 140:97feb9bacc10 485
<> 140:97feb9bacc10 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 487 {
<> 140:97feb9bacc10 488 uint32_t result;
<> 140:97feb9bacc10 489
<> 140:97feb9bacc10 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 491 return(result);
<> 140:97feb9bacc10 492 }
<> 140:97feb9bacc10 493
<> 140:97feb9bacc10 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
<> 140:97feb9bacc10 495 {
<> 140:97feb9bacc10 496 uint32_t result;
<> 140:97feb9bacc10 497
<> 140:97feb9bacc10 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 140:97feb9bacc10 499 return(result);
<> 140:97feb9bacc10 500 }
<> 140:97feb9bacc10 501
<> 140:97feb9bacc10 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 503 {
<> 140:97feb9bacc10 504 uint32_t result;
<> 140:97feb9bacc10 505
<> 140:97feb9bacc10 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 507 return(result);
<> 140:97feb9bacc10 508 }
<> 140:97feb9bacc10 509
<> 140:97feb9bacc10 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 511 {
<> 140:97feb9bacc10 512 uint32_t result;
<> 140:97feb9bacc10 513
<> 140:97feb9bacc10 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 515 return(result);
<> 140:97feb9bacc10 516 }
<> 140:97feb9bacc10 517
<> 140:97feb9bacc10 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 519 {
<> 140:97feb9bacc10 520 uint32_t result;
<> 140:97feb9bacc10 521
<> 140:97feb9bacc10 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 523 return(result);
<> 140:97feb9bacc10 524 }
<> 140:97feb9bacc10 525
<> 140:97feb9bacc10 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 140:97feb9bacc10 527 {
<> 140:97feb9bacc10 528 uint32_t result;
<> 140:97feb9bacc10 529
<> 140:97feb9bacc10 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 531 return(result);
<> 140:97feb9bacc10 532 }
<> 140:97feb9bacc10 533
<> 140:97feb9bacc10 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 140:97feb9bacc10 535 {
<> 140:97feb9bacc10 536 uint32_t result;
<> 140:97feb9bacc10 537
<> 140:97feb9bacc10 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 539 return(result);
<> 140:97feb9bacc10 540 }
<> 140:97feb9bacc10 541
<> 140:97feb9bacc10 542 #define __SMLALD(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 543 ({ \
<> 140:97feb9bacc10 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 140:97feb9bacc10 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 140:97feb9bacc10 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 140:97feb9bacc10 547 })
<> 140:97feb9bacc10 548
<> 140:97feb9bacc10 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 550 ({ \
<> 140:97feb9bacc10 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 140:97feb9bacc10 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 140:97feb9bacc10 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 140:97feb9bacc10 554 })
<> 140:97feb9bacc10 555
<> 140:97feb9bacc10 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 557 {
<> 140:97feb9bacc10 558 uint32_t result;
<> 140:97feb9bacc10 559
<> 140:97feb9bacc10 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 561 return(result);
<> 140:97feb9bacc10 562 }
<> 140:97feb9bacc10 563
<> 140:97feb9bacc10 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 565 {
<> 140:97feb9bacc10 566 uint32_t result;
<> 140:97feb9bacc10 567
<> 140:97feb9bacc10 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 569 return(result);
<> 140:97feb9bacc10 570 }
<> 140:97feb9bacc10 571
<> 140:97feb9bacc10 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 140:97feb9bacc10 573 {
<> 140:97feb9bacc10 574 uint32_t result;
<> 140:97feb9bacc10 575
<> 140:97feb9bacc10 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 577 return(result);
<> 140:97feb9bacc10 578 }
<> 140:97feb9bacc10 579
<> 140:97feb9bacc10 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 140:97feb9bacc10 581 {
<> 140:97feb9bacc10 582 uint32_t result;
<> 140:97feb9bacc10 583
<> 140:97feb9bacc10 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 585 return(result);
<> 140:97feb9bacc10 586 }
<> 140:97feb9bacc10 587
<> 140:97feb9bacc10 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 589 ({ \
<> 140:97feb9bacc10 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 140:97feb9bacc10 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 140:97feb9bacc10 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 140:97feb9bacc10 593 })
<> 140:97feb9bacc10 594
<> 140:97feb9bacc10 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 596 ({ \
<> 140:97feb9bacc10 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 140:97feb9bacc10 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 140:97feb9bacc10 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 140:97feb9bacc10 600 })
<> 140:97feb9bacc10 601
<> 140:97feb9bacc10 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 603 {
<> 140:97feb9bacc10 604 uint32_t result;
<> 140:97feb9bacc10 605
<> 140:97feb9bacc10 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 607 return(result);
<> 140:97feb9bacc10 608 }
<> 140:97feb9bacc10 609
<> 140:97feb9bacc10 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 611 {
<> 140:97feb9bacc10 612 uint32_t result;
<> 140:97feb9bacc10 613
<> 140:97feb9bacc10 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 615 return(result);
<> 140:97feb9bacc10 616 }
<> 140:97feb9bacc10 617
<> 140:97feb9bacc10 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
<> 140:97feb9bacc10 619 {
<> 140:97feb9bacc10 620 uint32_t result;
<> 140:97feb9bacc10 621
<> 140:97feb9bacc10 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 140:97feb9bacc10 623 return(result);
<> 140:97feb9bacc10 624 }
<> 140:97feb9bacc10 625
<> 140:97feb9bacc10 626 #define __PKHBT(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 627 ({ \
<> 140:97feb9bacc10 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 140:97feb9bacc10 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 140:97feb9bacc10 630 __RES; \
<> 140:97feb9bacc10 631 })
<> 140:97feb9bacc10 632
<> 140:97feb9bacc10 633 #define __PKHTB(ARG1,ARG2,ARG3) \
<> 140:97feb9bacc10 634 ({ \
<> 140:97feb9bacc10 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 140:97feb9bacc10 636 if (ARG3 == 0) \
<> 140:97feb9bacc10 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
<> 140:97feb9bacc10 638 else \
<> 140:97feb9bacc10 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 140:97feb9bacc10 640 __RES; \
<> 140:97feb9bacc10 641 })
<> 140:97feb9bacc10 642
<> 140:97feb9bacc10 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
<> 140:97feb9bacc10 644 {
<> 140:97feb9bacc10 645 int32_t result;
<> 140:97feb9bacc10 646
<> 140:97feb9bacc10 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
<> 140:97feb9bacc10 648 return(result);
<> 140:97feb9bacc10 649 }
<> 140:97feb9bacc10 650
<> 140:97feb9bacc10 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 652
<> 140:97feb9bacc10 653
<> 140:97feb9bacc10 654
<> 140:97feb9bacc10 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
<> 140:97feb9bacc10 656 /* TASKING carm specific functions */
<> 140:97feb9bacc10 657
<> 140:97feb9bacc10 658
<> 140:97feb9bacc10 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 660 /* not yet supported */
<> 140:97feb9bacc10 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 140:97feb9bacc10 662
<> 140:97feb9bacc10 663
<> 140:97feb9bacc10 664 #endif
<> 140:97feb9bacc10 665
<> 140:97feb9bacc10 666 /*@} end of group CMSIS_SIMD_intrinsics */
<> 140:97feb9bacc10 667
<> 140:97feb9bacc10 668
<> 140:97feb9bacc10 669 #endif /* __CORE_CM4_SIMD_H */
<> 140:97feb9bacc10 670
<> 140:97feb9bacc10 671 #ifdef __cplusplus
<> 140:97feb9bacc10 672 }
<> 140:97feb9bacc10 673 #endif