The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
<>
Date:
Mon Jan 16 12:05:23 2017 +0000
Revision:
134:ad3be0349dc5
Parent:
132:9baf128c2fab
Release 134 of the mbed library

Ports for Upcoming Targets


Fixes and Changes

3488: Dev stm i2c v2 unitary functions https://github.com/ARMmbed/mbed-os/pull/3488
3492: Fix #3463 CAN read() return value https://github.com/ARMmbed/mbed-os/pull/3492
3503: [LPC15xx] Ensure that PWM=1 is resolved correctly https://github.com/ARMmbed/mbed-os/pull/3503
3504: [LPC15xx] CAN implementation improvements https://github.com/ARMmbed/mbed-os/pull/3504
3539: NUCLEO_F412ZG - Add support of TRNG peripheral https://github.com/ARMmbed/mbed-os/pull/3539
3540: STM: SPI: Initialize Rx in spi_master_write https://github.com/ARMmbed/mbed-os/pull/3540
3438: K64F: Add support for SERIAL ASYNCH API https://github.com/ARMmbed/mbed-os/pull/3438
3519: MCUXpresso: Fix ENET driver to enable interrupts after interrupt handler is set https://github.com/ARMmbed/mbed-os/pull/3519
3544: STM32L4 deepsleep improvement https://github.com/ARMmbed/mbed-os/pull/3544
3546: NUCLEO-F412ZG - Add CAN peripheral https://github.com/ARMmbed/mbed-os/pull/3546
3551: Fix I2C driver for RZ/A1H https://github.com/ARMmbed/mbed-os/pull/3551
3558: K64F UART Asynch API: Fix synchronization issue https://github.com/ARMmbed/mbed-os/pull/3558
3563: LPC4088 - Fix vector checksum https://github.com/ARMmbed/mbed-os/pull/3563
3567: Dev stm32 F0 v1.7.0 https://github.com/ARMmbed/mbed-os/pull/3567
3577: Fixes linking errors when building with debug profile https://github.com/ARMmbed/mbed-os/pull/3577

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 132:9baf128c2fab 1 /**************************************************************************//**
<> 132:9baf128c2fab 2 * @file core_cm4_simd.h
<> 132:9baf128c2fab 3 * @brief CMSIS Cortex-M4 SIMD Header File
<> 132:9baf128c2fab 4 * @version V3.20
<> 132:9baf128c2fab 5 * @date 25. February 2013
<> 132:9baf128c2fab 6 *
<> 132:9baf128c2fab 7 * @note
<> 132:9baf128c2fab 8 *
<> 132:9baf128c2fab 9 ******************************************************************************/
<> 132:9baf128c2fab 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
<> 132:9baf128c2fab 11
<> 132:9baf128c2fab 12 All rights reserved.
<> 132:9baf128c2fab 13 Redistribution and use in source and binary forms, with or without
<> 132:9baf128c2fab 14 modification, are permitted provided that the following conditions are met:
<> 132:9baf128c2fab 15 - Redistributions of source code must retain the above copyright
<> 132:9baf128c2fab 16 notice, this list of conditions and the following disclaimer.
<> 132:9baf128c2fab 17 - Redistributions in binary form must reproduce the above copyright
<> 132:9baf128c2fab 18 notice, this list of conditions and the following disclaimer in the
<> 132:9baf128c2fab 19 documentation and/or other materials provided with the distribution.
<> 132:9baf128c2fab 20 - Neither the name of ARM nor the names of its contributors may be used
<> 132:9baf128c2fab 21 to endorse or promote products derived from this software without
<> 132:9baf128c2fab 22 specific prior written permission.
<> 132:9baf128c2fab 23 *
<> 132:9baf128c2fab 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
<> 132:9baf128c2fab 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
<> 132:9baf128c2fab 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<> 132:9baf128c2fab 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
<> 132:9baf128c2fab 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
<> 132:9baf128c2fab 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
<> 132:9baf128c2fab 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
<> 132:9baf128c2fab 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
<> 132:9baf128c2fab 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
<> 132:9baf128c2fab 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
<> 132:9baf128c2fab 34 POSSIBILITY OF SUCH DAMAGE.
<> 132:9baf128c2fab 35 ---------------------------------------------------------------------------*/
<> 132:9baf128c2fab 36
<> 132:9baf128c2fab 37
<> 132:9baf128c2fab 38 #ifdef __cplusplus
<> 132:9baf128c2fab 39 extern "C" {
<> 132:9baf128c2fab 40 #endif
<> 132:9baf128c2fab 41
<> 132:9baf128c2fab 42 #ifndef __CORE_CM4_SIMD_H
<> 132:9baf128c2fab 43 #define __CORE_CM4_SIMD_H
<> 132:9baf128c2fab 44
<> 132:9baf128c2fab 45
<> 132:9baf128c2fab 46 /*******************************************************************************
<> 132:9baf128c2fab 47 * Hardware Abstraction Layer
<> 132:9baf128c2fab 48 ******************************************************************************/
<> 132:9baf128c2fab 49
<> 132:9baf128c2fab 50
<> 132:9baf128c2fab 51 /* ################### Compiler specific Intrinsics ########################### */
<> 132:9baf128c2fab 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
<> 132:9baf128c2fab 53 Access to dedicated SIMD instructions
<> 132:9baf128c2fab 54 @{
<> 132:9baf128c2fab 55 */
<> 132:9baf128c2fab 56
<> 132:9baf128c2fab 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
<> 132:9baf128c2fab 58 /* ARM armcc specific functions */
<> 132:9baf128c2fab 59
<> 132:9baf128c2fab 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 61 #define __SADD8 __sadd8
<> 132:9baf128c2fab 62 #define __QADD8 __qadd8
<> 132:9baf128c2fab 63 #define __SHADD8 __shadd8
<> 132:9baf128c2fab 64 #define __UADD8 __uadd8
<> 132:9baf128c2fab 65 #define __UQADD8 __uqadd8
<> 132:9baf128c2fab 66 #define __UHADD8 __uhadd8
<> 132:9baf128c2fab 67 #define __SSUB8 __ssub8
<> 132:9baf128c2fab 68 #define __QSUB8 __qsub8
<> 132:9baf128c2fab 69 #define __SHSUB8 __shsub8
<> 132:9baf128c2fab 70 #define __USUB8 __usub8
<> 132:9baf128c2fab 71 #define __UQSUB8 __uqsub8
<> 132:9baf128c2fab 72 #define __UHSUB8 __uhsub8
<> 132:9baf128c2fab 73 #define __SADD16 __sadd16
<> 132:9baf128c2fab 74 #define __QADD16 __qadd16
<> 132:9baf128c2fab 75 #define __SHADD16 __shadd16
<> 132:9baf128c2fab 76 #define __UADD16 __uadd16
<> 132:9baf128c2fab 77 #define __UQADD16 __uqadd16
<> 132:9baf128c2fab 78 #define __UHADD16 __uhadd16
<> 132:9baf128c2fab 79 #define __SSUB16 __ssub16
<> 132:9baf128c2fab 80 #define __QSUB16 __qsub16
<> 132:9baf128c2fab 81 #define __SHSUB16 __shsub16
<> 132:9baf128c2fab 82 #define __USUB16 __usub16
<> 132:9baf128c2fab 83 #define __UQSUB16 __uqsub16
<> 132:9baf128c2fab 84 #define __UHSUB16 __uhsub16
<> 132:9baf128c2fab 85 #define __SASX __sasx
<> 132:9baf128c2fab 86 #define __QASX __qasx
<> 132:9baf128c2fab 87 #define __SHASX __shasx
<> 132:9baf128c2fab 88 #define __UASX __uasx
<> 132:9baf128c2fab 89 #define __UQASX __uqasx
<> 132:9baf128c2fab 90 #define __UHASX __uhasx
<> 132:9baf128c2fab 91 #define __SSAX __ssax
<> 132:9baf128c2fab 92 #define __QSAX __qsax
<> 132:9baf128c2fab 93 #define __SHSAX __shsax
<> 132:9baf128c2fab 94 #define __USAX __usax
<> 132:9baf128c2fab 95 #define __UQSAX __uqsax
<> 132:9baf128c2fab 96 #define __UHSAX __uhsax
<> 132:9baf128c2fab 97 #define __USAD8 __usad8
<> 132:9baf128c2fab 98 #define __USADA8 __usada8
<> 132:9baf128c2fab 99 #define __SSAT16 __ssat16
<> 132:9baf128c2fab 100 #define __USAT16 __usat16
<> 132:9baf128c2fab 101 #define __UXTB16 __uxtb16
<> 132:9baf128c2fab 102 #define __UXTAB16 __uxtab16
<> 132:9baf128c2fab 103 #define __SXTB16 __sxtb16
<> 132:9baf128c2fab 104 #define __SXTAB16 __sxtab16
<> 132:9baf128c2fab 105 #define __SMUAD __smuad
<> 132:9baf128c2fab 106 #define __SMUADX __smuadx
<> 132:9baf128c2fab 107 #define __SMLAD __smlad
<> 132:9baf128c2fab 108 #define __SMLADX __smladx
<> 132:9baf128c2fab 109 #define __SMLALD __smlald
<> 132:9baf128c2fab 110 #define __SMLALDX __smlaldx
<> 132:9baf128c2fab 111 #define __SMUSD __smusd
<> 132:9baf128c2fab 112 #define __SMUSDX __smusdx
<> 132:9baf128c2fab 113 #define __SMLSD __smlsd
<> 132:9baf128c2fab 114 #define __SMLSDX __smlsdx
<> 132:9baf128c2fab 115 #define __SMLSLD __smlsld
<> 132:9baf128c2fab 116 #define __SMLSLDX __smlsldx
<> 132:9baf128c2fab 117 #define __SEL __sel
<> 132:9baf128c2fab 118 #define __QADD __qadd
<> 132:9baf128c2fab 119 #define __QSUB __qsub
<> 132:9baf128c2fab 120
<> 132:9baf128c2fab 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
<> 132:9baf128c2fab 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
<> 132:9baf128c2fab 123
<> 132:9baf128c2fab 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
<> 132:9baf128c2fab 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
<> 132:9baf128c2fab 126
<> 132:9baf128c2fab 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
<> 132:9baf128c2fab 128 ((int64_t)(ARG3) << 32) ) >> 32))
<> 132:9baf128c2fab 129
<> 132:9baf128c2fab 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 131
<> 132:9baf128c2fab 132
<> 132:9baf128c2fab 133
<> 132:9baf128c2fab 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
<> 132:9baf128c2fab 135 /* IAR iccarm specific functions */
<> 132:9baf128c2fab 136
<> 132:9baf128c2fab 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 138 #include <cmsis_iar.h>
<> 132:9baf128c2fab 139
<> 132:9baf128c2fab 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 141
<> 132:9baf128c2fab 142
<> 132:9baf128c2fab 143
<> 132:9baf128c2fab 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
<> 132:9baf128c2fab 145 /* TI CCS specific functions */
<> 132:9baf128c2fab 146
<> 132:9baf128c2fab 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 148 #include <cmsis_ccs.h>
<> 132:9baf128c2fab 149
<> 132:9baf128c2fab 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 151
<> 132:9baf128c2fab 152
<> 132:9baf128c2fab 153
<> 132:9baf128c2fab 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
<> 132:9baf128c2fab 155 /* GNU gcc specific functions */
<> 132:9baf128c2fab 156
<> 132:9baf128c2fab 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 159 {
<> 132:9baf128c2fab 160 uint32_t result;
<> 132:9baf128c2fab 161
<> 132:9baf128c2fab 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 163 return(result);
<> 132:9baf128c2fab 164 }
<> 132:9baf128c2fab 165
<> 132:9baf128c2fab 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 167 {
<> 132:9baf128c2fab 168 uint32_t result;
<> 132:9baf128c2fab 169
<> 132:9baf128c2fab 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 171 return(result);
<> 132:9baf128c2fab 172 }
<> 132:9baf128c2fab 173
<> 132:9baf128c2fab 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 175 {
<> 132:9baf128c2fab 176 uint32_t result;
<> 132:9baf128c2fab 177
<> 132:9baf128c2fab 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 179 return(result);
<> 132:9baf128c2fab 180 }
<> 132:9baf128c2fab 181
<> 132:9baf128c2fab 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 183 {
<> 132:9baf128c2fab 184 uint32_t result;
<> 132:9baf128c2fab 185
<> 132:9baf128c2fab 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 187 return(result);
<> 132:9baf128c2fab 188 }
<> 132:9baf128c2fab 189
<> 132:9baf128c2fab 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 191 {
<> 132:9baf128c2fab 192 uint32_t result;
<> 132:9baf128c2fab 193
<> 132:9baf128c2fab 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 195 return(result);
<> 132:9baf128c2fab 196 }
<> 132:9baf128c2fab 197
<> 132:9baf128c2fab 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 199 {
<> 132:9baf128c2fab 200 uint32_t result;
<> 132:9baf128c2fab 201
<> 132:9baf128c2fab 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 203 return(result);
<> 132:9baf128c2fab 204 }
<> 132:9baf128c2fab 205
<> 132:9baf128c2fab 206
<> 132:9baf128c2fab 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 208 {
<> 132:9baf128c2fab 209 uint32_t result;
<> 132:9baf128c2fab 210
<> 132:9baf128c2fab 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 212 return(result);
<> 132:9baf128c2fab 213 }
<> 132:9baf128c2fab 214
<> 132:9baf128c2fab 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 216 {
<> 132:9baf128c2fab 217 uint32_t result;
<> 132:9baf128c2fab 218
<> 132:9baf128c2fab 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 220 return(result);
<> 132:9baf128c2fab 221 }
<> 132:9baf128c2fab 222
<> 132:9baf128c2fab 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 224 {
<> 132:9baf128c2fab 225 uint32_t result;
<> 132:9baf128c2fab 226
<> 132:9baf128c2fab 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 228 return(result);
<> 132:9baf128c2fab 229 }
<> 132:9baf128c2fab 230
<> 132:9baf128c2fab 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 232 {
<> 132:9baf128c2fab 233 uint32_t result;
<> 132:9baf128c2fab 234
<> 132:9baf128c2fab 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 236 return(result);
<> 132:9baf128c2fab 237 }
<> 132:9baf128c2fab 238
<> 132:9baf128c2fab 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 240 {
<> 132:9baf128c2fab 241 uint32_t result;
<> 132:9baf128c2fab 242
<> 132:9baf128c2fab 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 244 return(result);
<> 132:9baf128c2fab 245 }
<> 132:9baf128c2fab 246
<> 132:9baf128c2fab 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 248 {
<> 132:9baf128c2fab 249 uint32_t result;
<> 132:9baf128c2fab 250
<> 132:9baf128c2fab 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 252 return(result);
<> 132:9baf128c2fab 253 }
<> 132:9baf128c2fab 254
<> 132:9baf128c2fab 255
<> 132:9baf128c2fab 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 257 {
<> 132:9baf128c2fab 258 uint32_t result;
<> 132:9baf128c2fab 259
<> 132:9baf128c2fab 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 261 return(result);
<> 132:9baf128c2fab 262 }
<> 132:9baf128c2fab 263
<> 132:9baf128c2fab 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 265 {
<> 132:9baf128c2fab 266 uint32_t result;
<> 132:9baf128c2fab 267
<> 132:9baf128c2fab 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 269 return(result);
<> 132:9baf128c2fab 270 }
<> 132:9baf128c2fab 271
<> 132:9baf128c2fab 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 273 {
<> 132:9baf128c2fab 274 uint32_t result;
<> 132:9baf128c2fab 275
<> 132:9baf128c2fab 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 277 return(result);
<> 132:9baf128c2fab 278 }
<> 132:9baf128c2fab 279
<> 132:9baf128c2fab 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 281 {
<> 132:9baf128c2fab 282 uint32_t result;
<> 132:9baf128c2fab 283
<> 132:9baf128c2fab 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 285 return(result);
<> 132:9baf128c2fab 286 }
<> 132:9baf128c2fab 287
<> 132:9baf128c2fab 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 289 {
<> 132:9baf128c2fab 290 uint32_t result;
<> 132:9baf128c2fab 291
<> 132:9baf128c2fab 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 293 return(result);
<> 132:9baf128c2fab 294 }
<> 132:9baf128c2fab 295
<> 132:9baf128c2fab 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 297 {
<> 132:9baf128c2fab 298 uint32_t result;
<> 132:9baf128c2fab 299
<> 132:9baf128c2fab 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 301 return(result);
<> 132:9baf128c2fab 302 }
<> 132:9baf128c2fab 303
<> 132:9baf128c2fab 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 305 {
<> 132:9baf128c2fab 306 uint32_t result;
<> 132:9baf128c2fab 307
<> 132:9baf128c2fab 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 309 return(result);
<> 132:9baf128c2fab 310 }
<> 132:9baf128c2fab 311
<> 132:9baf128c2fab 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 313 {
<> 132:9baf128c2fab 314 uint32_t result;
<> 132:9baf128c2fab 315
<> 132:9baf128c2fab 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 317 return(result);
<> 132:9baf128c2fab 318 }
<> 132:9baf128c2fab 319
<> 132:9baf128c2fab 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 321 {
<> 132:9baf128c2fab 322 uint32_t result;
<> 132:9baf128c2fab 323
<> 132:9baf128c2fab 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 325 return(result);
<> 132:9baf128c2fab 326 }
<> 132:9baf128c2fab 327
<> 132:9baf128c2fab 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 329 {
<> 132:9baf128c2fab 330 uint32_t result;
<> 132:9baf128c2fab 331
<> 132:9baf128c2fab 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 333 return(result);
<> 132:9baf128c2fab 334 }
<> 132:9baf128c2fab 335
<> 132:9baf128c2fab 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 337 {
<> 132:9baf128c2fab 338 uint32_t result;
<> 132:9baf128c2fab 339
<> 132:9baf128c2fab 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 341 return(result);
<> 132:9baf128c2fab 342 }
<> 132:9baf128c2fab 343
<> 132:9baf128c2fab 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 345 {
<> 132:9baf128c2fab 346 uint32_t result;
<> 132:9baf128c2fab 347
<> 132:9baf128c2fab 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 349 return(result);
<> 132:9baf128c2fab 350 }
<> 132:9baf128c2fab 351
<> 132:9baf128c2fab 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 353 {
<> 132:9baf128c2fab 354 uint32_t result;
<> 132:9baf128c2fab 355
<> 132:9baf128c2fab 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 357 return(result);
<> 132:9baf128c2fab 358 }
<> 132:9baf128c2fab 359
<> 132:9baf128c2fab 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 361 {
<> 132:9baf128c2fab 362 uint32_t result;
<> 132:9baf128c2fab 363
<> 132:9baf128c2fab 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 365 return(result);
<> 132:9baf128c2fab 366 }
<> 132:9baf128c2fab 367
<> 132:9baf128c2fab 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 369 {
<> 132:9baf128c2fab 370 uint32_t result;
<> 132:9baf128c2fab 371
<> 132:9baf128c2fab 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 373 return(result);
<> 132:9baf128c2fab 374 }
<> 132:9baf128c2fab 375
<> 132:9baf128c2fab 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 377 {
<> 132:9baf128c2fab 378 uint32_t result;
<> 132:9baf128c2fab 379
<> 132:9baf128c2fab 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 381 return(result);
<> 132:9baf128c2fab 382 }
<> 132:9baf128c2fab 383
<> 132:9baf128c2fab 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 385 {
<> 132:9baf128c2fab 386 uint32_t result;
<> 132:9baf128c2fab 387
<> 132:9baf128c2fab 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 389 return(result);
<> 132:9baf128c2fab 390 }
<> 132:9baf128c2fab 391
<> 132:9baf128c2fab 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 393 {
<> 132:9baf128c2fab 394 uint32_t result;
<> 132:9baf128c2fab 395
<> 132:9baf128c2fab 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 397 return(result);
<> 132:9baf128c2fab 398 }
<> 132:9baf128c2fab 399
<> 132:9baf128c2fab 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 401 {
<> 132:9baf128c2fab 402 uint32_t result;
<> 132:9baf128c2fab 403
<> 132:9baf128c2fab 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 405 return(result);
<> 132:9baf128c2fab 406 }
<> 132:9baf128c2fab 407
<> 132:9baf128c2fab 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 409 {
<> 132:9baf128c2fab 410 uint32_t result;
<> 132:9baf128c2fab 411
<> 132:9baf128c2fab 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 413 return(result);
<> 132:9baf128c2fab 414 }
<> 132:9baf128c2fab 415
<> 132:9baf128c2fab 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 417 {
<> 132:9baf128c2fab 418 uint32_t result;
<> 132:9baf128c2fab 419
<> 132:9baf128c2fab 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 421 return(result);
<> 132:9baf128c2fab 422 }
<> 132:9baf128c2fab 423
<> 132:9baf128c2fab 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 425 {
<> 132:9baf128c2fab 426 uint32_t result;
<> 132:9baf128c2fab 427
<> 132:9baf128c2fab 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 429 return(result);
<> 132:9baf128c2fab 430 }
<> 132:9baf128c2fab 431
<> 132:9baf128c2fab 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 433 {
<> 132:9baf128c2fab 434 uint32_t result;
<> 132:9baf128c2fab 435
<> 132:9baf128c2fab 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 437 return(result);
<> 132:9baf128c2fab 438 }
<> 132:9baf128c2fab 439
<> 132:9baf128c2fab 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 441 {
<> 132:9baf128c2fab 442 uint32_t result;
<> 132:9baf128c2fab 443
<> 132:9baf128c2fab 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 445 return(result);
<> 132:9baf128c2fab 446 }
<> 132:9baf128c2fab 447
<> 132:9baf128c2fab 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 449 {
<> 132:9baf128c2fab 450 uint32_t result;
<> 132:9baf128c2fab 451
<> 132:9baf128c2fab 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 453 return(result);
<> 132:9baf128c2fab 454 }
<> 132:9baf128c2fab 455
<> 132:9baf128c2fab 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 457 {
<> 132:9baf128c2fab 458 uint32_t result;
<> 132:9baf128c2fab 459
<> 132:9baf128c2fab 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 461 return(result);
<> 132:9baf128c2fab 462 }
<> 132:9baf128c2fab 463
<> 132:9baf128c2fab 464 #define __SSAT16(ARG1,ARG2) \
<> 132:9baf128c2fab 465 ({ \
<> 132:9baf128c2fab 466 uint32_t __RES, __ARG1 = (ARG1); \
<> 132:9baf128c2fab 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 132:9baf128c2fab 468 __RES; \
<> 132:9baf128c2fab 469 })
<> 132:9baf128c2fab 470
<> 132:9baf128c2fab 471 #define __USAT16(ARG1,ARG2) \
<> 132:9baf128c2fab 472 ({ \
<> 132:9baf128c2fab 473 uint32_t __RES, __ARG1 = (ARG1); \
<> 132:9baf128c2fab 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 132:9baf128c2fab 475 __RES; \
<> 132:9baf128c2fab 476 })
<> 132:9baf128c2fab 477
<> 132:9baf128c2fab 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
<> 132:9baf128c2fab 479 {
<> 132:9baf128c2fab 480 uint32_t result;
<> 132:9baf128c2fab 481
<> 132:9baf128c2fab 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 132:9baf128c2fab 483 return(result);
<> 132:9baf128c2fab 484 }
<> 132:9baf128c2fab 485
<> 132:9baf128c2fab 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 487 {
<> 132:9baf128c2fab 488 uint32_t result;
<> 132:9baf128c2fab 489
<> 132:9baf128c2fab 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 491 return(result);
<> 132:9baf128c2fab 492 }
<> 132:9baf128c2fab 493
<> 132:9baf128c2fab 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
<> 132:9baf128c2fab 495 {
<> 132:9baf128c2fab 496 uint32_t result;
<> 132:9baf128c2fab 497
<> 132:9baf128c2fab 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 132:9baf128c2fab 499 return(result);
<> 132:9baf128c2fab 500 }
<> 132:9baf128c2fab 501
<> 132:9baf128c2fab 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 503 {
<> 132:9baf128c2fab 504 uint32_t result;
<> 132:9baf128c2fab 505
<> 132:9baf128c2fab 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 507 return(result);
<> 132:9baf128c2fab 508 }
<> 132:9baf128c2fab 509
<> 132:9baf128c2fab 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 511 {
<> 132:9baf128c2fab 512 uint32_t result;
<> 132:9baf128c2fab 513
<> 132:9baf128c2fab 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 515 return(result);
<> 132:9baf128c2fab 516 }
<> 132:9baf128c2fab 517
<> 132:9baf128c2fab 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 519 {
<> 132:9baf128c2fab 520 uint32_t result;
<> 132:9baf128c2fab 521
<> 132:9baf128c2fab 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 523 return(result);
<> 132:9baf128c2fab 524 }
<> 132:9baf128c2fab 525
<> 132:9baf128c2fab 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 527 {
<> 132:9baf128c2fab 528 uint32_t result;
<> 132:9baf128c2fab 529
<> 132:9baf128c2fab 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 531 return(result);
<> 132:9baf128c2fab 532 }
<> 132:9baf128c2fab 533
<> 132:9baf128c2fab 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 535 {
<> 132:9baf128c2fab 536 uint32_t result;
<> 132:9baf128c2fab 537
<> 132:9baf128c2fab 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 539 return(result);
<> 132:9baf128c2fab 540 }
<> 132:9baf128c2fab 541
<> 132:9baf128c2fab 542 #define __SMLALD(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 543 ({ \
<> 132:9baf128c2fab 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 132:9baf128c2fab 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 132:9baf128c2fab 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 132:9baf128c2fab 547 })
<> 132:9baf128c2fab 548
<> 132:9baf128c2fab 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 550 ({ \
<> 132:9baf128c2fab 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 132:9baf128c2fab 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 132:9baf128c2fab 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 132:9baf128c2fab 554 })
<> 132:9baf128c2fab 555
<> 132:9baf128c2fab 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 557 {
<> 132:9baf128c2fab 558 uint32_t result;
<> 132:9baf128c2fab 559
<> 132:9baf128c2fab 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 561 return(result);
<> 132:9baf128c2fab 562 }
<> 132:9baf128c2fab 563
<> 132:9baf128c2fab 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 565 {
<> 132:9baf128c2fab 566 uint32_t result;
<> 132:9baf128c2fab 567
<> 132:9baf128c2fab 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 569 return(result);
<> 132:9baf128c2fab 570 }
<> 132:9baf128c2fab 571
<> 132:9baf128c2fab 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 573 {
<> 132:9baf128c2fab 574 uint32_t result;
<> 132:9baf128c2fab 575
<> 132:9baf128c2fab 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 577 return(result);
<> 132:9baf128c2fab 578 }
<> 132:9baf128c2fab 579
<> 132:9baf128c2fab 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 581 {
<> 132:9baf128c2fab 582 uint32_t result;
<> 132:9baf128c2fab 583
<> 132:9baf128c2fab 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 585 return(result);
<> 132:9baf128c2fab 586 }
<> 132:9baf128c2fab 587
<> 132:9baf128c2fab 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 589 ({ \
<> 132:9baf128c2fab 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 132:9baf128c2fab 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 132:9baf128c2fab 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 132:9baf128c2fab 593 })
<> 132:9baf128c2fab 594
<> 132:9baf128c2fab 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 596 ({ \
<> 132:9baf128c2fab 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 132:9baf128c2fab 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 132:9baf128c2fab 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 132:9baf128c2fab 600 })
<> 132:9baf128c2fab 601
<> 132:9baf128c2fab 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 603 {
<> 132:9baf128c2fab 604 uint32_t result;
<> 132:9baf128c2fab 605
<> 132:9baf128c2fab 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 607 return(result);
<> 132:9baf128c2fab 608 }
<> 132:9baf128c2fab 609
<> 132:9baf128c2fab 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 611 {
<> 132:9baf128c2fab 612 uint32_t result;
<> 132:9baf128c2fab 613
<> 132:9baf128c2fab 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 615 return(result);
<> 132:9baf128c2fab 616 }
<> 132:9baf128c2fab 617
<> 132:9baf128c2fab 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 619 {
<> 132:9baf128c2fab 620 uint32_t result;
<> 132:9baf128c2fab 621
<> 132:9baf128c2fab 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 623 return(result);
<> 132:9baf128c2fab 624 }
<> 132:9baf128c2fab 625
<> 132:9baf128c2fab 626 #define __PKHBT(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 627 ({ \
<> 132:9baf128c2fab 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 132:9baf128c2fab 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 132:9baf128c2fab 630 __RES; \
<> 132:9baf128c2fab 631 })
<> 132:9baf128c2fab 632
<> 132:9baf128c2fab 633 #define __PKHTB(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 634 ({ \
<> 132:9baf128c2fab 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 132:9baf128c2fab 636 if (ARG3 == 0) \
<> 132:9baf128c2fab 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
<> 132:9baf128c2fab 638 else \
<> 132:9baf128c2fab 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 132:9baf128c2fab 640 __RES; \
<> 132:9baf128c2fab 641 })
<> 132:9baf128c2fab 642
<> 132:9baf128c2fab 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
<> 132:9baf128c2fab 644 {
<> 132:9baf128c2fab 645 int32_t result;
<> 132:9baf128c2fab 646
<> 132:9baf128c2fab 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 648 return(result);
<> 132:9baf128c2fab 649 }
<> 132:9baf128c2fab 650
<> 132:9baf128c2fab 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 652
<> 132:9baf128c2fab 653
<> 132:9baf128c2fab 654
<> 132:9baf128c2fab 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
<> 132:9baf128c2fab 656 /* TASKING carm specific functions */
<> 132:9baf128c2fab 657
<> 132:9baf128c2fab 658
<> 132:9baf128c2fab 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 660 /* not yet supported */
<> 132:9baf128c2fab 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 132:9baf128c2fab 662
<> 132:9baf128c2fab 663
<> 132:9baf128c2fab 664 #endif
<> 132:9baf128c2fab 665
<> 132:9baf128c2fab 666 /*@} end of group CMSIS_SIMD_intrinsics */
<> 132:9baf128c2fab 667
<> 132:9baf128c2fab 668
<> 132:9baf128c2fab 669 #endif /* __CORE_CM4_SIMD_H */
<> 132:9baf128c2fab 670
<> 132:9baf128c2fab 671 #ifdef __cplusplus
<> 132:9baf128c2fab 672 }
<> 132:9baf128c2fab 673 #endif