The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
<>
Date:
Mon Jan 16 12:05:23 2017 +0000
Revision:
134:ad3be0349dc5
Parent:
129:0ab6a29f35bf
Release 134 of the mbed library

Ports for Upcoming Targets


Fixes and Changes

3488: Dev stm i2c v2 unitary functions https://github.com/ARMmbed/mbed-os/pull/3488
3492: Fix #3463 CAN read() return value https://github.com/ARMmbed/mbed-os/pull/3492
3503: [LPC15xx] Ensure that PWM=1 is resolved correctly https://github.com/ARMmbed/mbed-os/pull/3503
3504: [LPC15xx] CAN implementation improvements https://github.com/ARMmbed/mbed-os/pull/3504
3539: NUCLEO_F412ZG - Add support of TRNG peripheral https://github.com/ARMmbed/mbed-os/pull/3539
3540: STM: SPI: Initialize Rx in spi_master_write https://github.com/ARMmbed/mbed-os/pull/3540
3438: K64F: Add support for SERIAL ASYNCH API https://github.com/ARMmbed/mbed-os/pull/3438
3519: MCUXpresso: Fix ENET driver to enable interrupts after interrupt handler is set https://github.com/ARMmbed/mbed-os/pull/3519
3544: STM32L4 deepsleep improvement https://github.com/ARMmbed/mbed-os/pull/3544
3546: NUCLEO-F412ZG - Add CAN peripheral https://github.com/ARMmbed/mbed-os/pull/3546
3551: Fix I2C driver for RZ/A1H https://github.com/ARMmbed/mbed-os/pull/3551
3558: K64F UART Asynch API: Fix synchronization issue https://github.com/ARMmbed/mbed-os/pull/3558
3563: LPC4088 - Fix vector checksum https://github.com/ARMmbed/mbed-os/pull/3563
3567: Dev stm32 F0 v1.7.0 https://github.com/ARMmbed/mbed-os/pull/3567
3577: Fixes linking errors when building with debug profile https://github.com/ARMmbed/mbed-os/pull/3577

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 129:0ab6a29f35bf 1 /**************************************************************************//**
<> 129:0ab6a29f35bf 2 * @file core_cm4_simd.h
<> 129:0ab6a29f35bf 3 * @brief CMSIS Cortex-M4 SIMD Header File
<> 129:0ab6a29f35bf 4 * @version V3.20
<> 129:0ab6a29f35bf 5 * @date 25. February 2013
<> 129:0ab6a29f35bf 6 *
<> 129:0ab6a29f35bf 7 * @note
<> 129:0ab6a29f35bf 8 *
<> 129:0ab6a29f35bf 9 ******************************************************************************/
<> 129:0ab6a29f35bf 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
<> 129:0ab6a29f35bf 11
<> 129:0ab6a29f35bf 12 All rights reserved.
<> 129:0ab6a29f35bf 13 Redistribution and use in source and binary forms, with or without
<> 129:0ab6a29f35bf 14 modification, are permitted provided that the following conditions are met:
<> 129:0ab6a29f35bf 15 - Redistributions of source code must retain the above copyright
<> 129:0ab6a29f35bf 16 notice, this list of conditions and the following disclaimer.
<> 129:0ab6a29f35bf 17 - Redistributions in binary form must reproduce the above copyright
<> 129:0ab6a29f35bf 18 notice, this list of conditions and the following disclaimer in the
<> 129:0ab6a29f35bf 19 documentation and/or other materials provided with the distribution.
<> 129:0ab6a29f35bf 20 - Neither the name of ARM nor the names of its contributors may be used
<> 129:0ab6a29f35bf 21 to endorse or promote products derived from this software without
<> 129:0ab6a29f35bf 22 specific prior written permission.
<> 129:0ab6a29f35bf 23 *
<> 129:0ab6a29f35bf 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
<> 129:0ab6a29f35bf 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
<> 129:0ab6a29f35bf 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<> 129:0ab6a29f35bf 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
<> 129:0ab6a29f35bf 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
<> 129:0ab6a29f35bf 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
<> 129:0ab6a29f35bf 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
<> 129:0ab6a29f35bf 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
<> 129:0ab6a29f35bf 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
<> 129:0ab6a29f35bf 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
<> 129:0ab6a29f35bf 34 POSSIBILITY OF SUCH DAMAGE.
<> 129:0ab6a29f35bf 35 ---------------------------------------------------------------------------*/
<> 129:0ab6a29f35bf 36
<> 129:0ab6a29f35bf 37
<> 129:0ab6a29f35bf 38 #ifdef __cplusplus
<> 129:0ab6a29f35bf 39 extern "C" {
<> 129:0ab6a29f35bf 40 #endif
<> 129:0ab6a29f35bf 41
<> 129:0ab6a29f35bf 42 #ifndef __CORE_CM4_SIMD_H
<> 129:0ab6a29f35bf 43 #define __CORE_CM4_SIMD_H
<> 129:0ab6a29f35bf 44
<> 129:0ab6a29f35bf 45
<> 129:0ab6a29f35bf 46 /*******************************************************************************
<> 129:0ab6a29f35bf 47 * Hardware Abstraction Layer
<> 129:0ab6a29f35bf 48 ******************************************************************************/
<> 129:0ab6a29f35bf 49
<> 129:0ab6a29f35bf 50
<> 129:0ab6a29f35bf 51 /* ################### Compiler specific Intrinsics ########################### */
<> 129:0ab6a29f35bf 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
<> 129:0ab6a29f35bf 53 Access to dedicated SIMD instructions
<> 129:0ab6a29f35bf 54 @{
<> 129:0ab6a29f35bf 55 */
<> 129:0ab6a29f35bf 56
<> 129:0ab6a29f35bf 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
<> 129:0ab6a29f35bf 58 /* ARM armcc specific functions */
<> 129:0ab6a29f35bf 59
<> 129:0ab6a29f35bf 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 61 #define __SADD8 __sadd8
<> 129:0ab6a29f35bf 62 #define __QADD8 __qadd8
<> 129:0ab6a29f35bf 63 #define __SHADD8 __shadd8
<> 129:0ab6a29f35bf 64 #define __UADD8 __uadd8
<> 129:0ab6a29f35bf 65 #define __UQADD8 __uqadd8
<> 129:0ab6a29f35bf 66 #define __UHADD8 __uhadd8
<> 129:0ab6a29f35bf 67 #define __SSUB8 __ssub8
<> 129:0ab6a29f35bf 68 #define __QSUB8 __qsub8
<> 129:0ab6a29f35bf 69 #define __SHSUB8 __shsub8
<> 129:0ab6a29f35bf 70 #define __USUB8 __usub8
<> 129:0ab6a29f35bf 71 #define __UQSUB8 __uqsub8
<> 129:0ab6a29f35bf 72 #define __UHSUB8 __uhsub8
<> 129:0ab6a29f35bf 73 #define __SADD16 __sadd16
<> 129:0ab6a29f35bf 74 #define __QADD16 __qadd16
<> 129:0ab6a29f35bf 75 #define __SHADD16 __shadd16
<> 129:0ab6a29f35bf 76 #define __UADD16 __uadd16
<> 129:0ab6a29f35bf 77 #define __UQADD16 __uqadd16
<> 129:0ab6a29f35bf 78 #define __UHADD16 __uhadd16
<> 129:0ab6a29f35bf 79 #define __SSUB16 __ssub16
<> 129:0ab6a29f35bf 80 #define __QSUB16 __qsub16
<> 129:0ab6a29f35bf 81 #define __SHSUB16 __shsub16
<> 129:0ab6a29f35bf 82 #define __USUB16 __usub16
<> 129:0ab6a29f35bf 83 #define __UQSUB16 __uqsub16
<> 129:0ab6a29f35bf 84 #define __UHSUB16 __uhsub16
<> 129:0ab6a29f35bf 85 #define __SASX __sasx
<> 129:0ab6a29f35bf 86 #define __QASX __qasx
<> 129:0ab6a29f35bf 87 #define __SHASX __shasx
<> 129:0ab6a29f35bf 88 #define __UASX __uasx
<> 129:0ab6a29f35bf 89 #define __UQASX __uqasx
<> 129:0ab6a29f35bf 90 #define __UHASX __uhasx
<> 129:0ab6a29f35bf 91 #define __SSAX __ssax
<> 129:0ab6a29f35bf 92 #define __QSAX __qsax
<> 129:0ab6a29f35bf 93 #define __SHSAX __shsax
<> 129:0ab6a29f35bf 94 #define __USAX __usax
<> 129:0ab6a29f35bf 95 #define __UQSAX __uqsax
<> 129:0ab6a29f35bf 96 #define __UHSAX __uhsax
<> 129:0ab6a29f35bf 97 #define __USAD8 __usad8
<> 129:0ab6a29f35bf 98 #define __USADA8 __usada8
<> 129:0ab6a29f35bf 99 #define __SSAT16 __ssat16
<> 129:0ab6a29f35bf 100 #define __USAT16 __usat16
<> 129:0ab6a29f35bf 101 #define __UXTB16 __uxtb16
<> 129:0ab6a29f35bf 102 #define __UXTAB16 __uxtab16
<> 129:0ab6a29f35bf 103 #define __SXTB16 __sxtb16
<> 129:0ab6a29f35bf 104 #define __SXTAB16 __sxtab16
<> 129:0ab6a29f35bf 105 #define __SMUAD __smuad
<> 129:0ab6a29f35bf 106 #define __SMUADX __smuadx
<> 129:0ab6a29f35bf 107 #define __SMLAD __smlad
<> 129:0ab6a29f35bf 108 #define __SMLADX __smladx
<> 129:0ab6a29f35bf 109 #define __SMLALD __smlald
<> 129:0ab6a29f35bf 110 #define __SMLALDX __smlaldx
<> 129:0ab6a29f35bf 111 #define __SMUSD __smusd
<> 129:0ab6a29f35bf 112 #define __SMUSDX __smusdx
<> 129:0ab6a29f35bf 113 #define __SMLSD __smlsd
<> 129:0ab6a29f35bf 114 #define __SMLSDX __smlsdx
<> 129:0ab6a29f35bf 115 #define __SMLSLD __smlsld
<> 129:0ab6a29f35bf 116 #define __SMLSLDX __smlsldx
<> 129:0ab6a29f35bf 117 #define __SEL __sel
<> 129:0ab6a29f35bf 118 #define __QADD __qadd
<> 129:0ab6a29f35bf 119 #define __QSUB __qsub
<> 129:0ab6a29f35bf 120
<> 129:0ab6a29f35bf 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
<> 129:0ab6a29f35bf 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
<> 129:0ab6a29f35bf 123
<> 129:0ab6a29f35bf 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
<> 129:0ab6a29f35bf 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
<> 129:0ab6a29f35bf 126
<> 129:0ab6a29f35bf 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
<> 129:0ab6a29f35bf 128 ((int64_t)(ARG3) << 32) ) >> 32))
<> 129:0ab6a29f35bf 129
<> 129:0ab6a29f35bf 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 131
<> 129:0ab6a29f35bf 132
<> 129:0ab6a29f35bf 133
<> 129:0ab6a29f35bf 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
<> 129:0ab6a29f35bf 135 /* IAR iccarm specific functions */
<> 129:0ab6a29f35bf 136
<> 129:0ab6a29f35bf 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 138 #include <cmsis_iar.h>
<> 129:0ab6a29f35bf 139
<> 129:0ab6a29f35bf 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 141
<> 129:0ab6a29f35bf 142
<> 129:0ab6a29f35bf 143
<> 129:0ab6a29f35bf 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
<> 129:0ab6a29f35bf 145 /* TI CCS specific functions */
<> 129:0ab6a29f35bf 146
<> 129:0ab6a29f35bf 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 148 #include <cmsis_ccs.h>
<> 129:0ab6a29f35bf 149
<> 129:0ab6a29f35bf 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 151
<> 129:0ab6a29f35bf 152
<> 129:0ab6a29f35bf 153
<> 129:0ab6a29f35bf 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
<> 129:0ab6a29f35bf 155 /* GNU gcc specific functions */
<> 129:0ab6a29f35bf 156
<> 129:0ab6a29f35bf 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 159 {
<> 129:0ab6a29f35bf 160 uint32_t result;
<> 129:0ab6a29f35bf 161
<> 129:0ab6a29f35bf 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 163 return(result);
<> 129:0ab6a29f35bf 164 }
<> 129:0ab6a29f35bf 165
<> 129:0ab6a29f35bf 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 167 {
<> 129:0ab6a29f35bf 168 uint32_t result;
<> 129:0ab6a29f35bf 169
<> 129:0ab6a29f35bf 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 171 return(result);
<> 129:0ab6a29f35bf 172 }
<> 129:0ab6a29f35bf 173
<> 129:0ab6a29f35bf 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 175 {
<> 129:0ab6a29f35bf 176 uint32_t result;
<> 129:0ab6a29f35bf 177
<> 129:0ab6a29f35bf 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 179 return(result);
<> 129:0ab6a29f35bf 180 }
<> 129:0ab6a29f35bf 181
<> 129:0ab6a29f35bf 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 183 {
<> 129:0ab6a29f35bf 184 uint32_t result;
<> 129:0ab6a29f35bf 185
<> 129:0ab6a29f35bf 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 187 return(result);
<> 129:0ab6a29f35bf 188 }
<> 129:0ab6a29f35bf 189
<> 129:0ab6a29f35bf 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 191 {
<> 129:0ab6a29f35bf 192 uint32_t result;
<> 129:0ab6a29f35bf 193
<> 129:0ab6a29f35bf 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 195 return(result);
<> 129:0ab6a29f35bf 196 }
<> 129:0ab6a29f35bf 197
<> 129:0ab6a29f35bf 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 199 {
<> 129:0ab6a29f35bf 200 uint32_t result;
<> 129:0ab6a29f35bf 201
<> 129:0ab6a29f35bf 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 203 return(result);
<> 129:0ab6a29f35bf 204 }
<> 129:0ab6a29f35bf 205
<> 129:0ab6a29f35bf 206
<> 129:0ab6a29f35bf 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 208 {
<> 129:0ab6a29f35bf 209 uint32_t result;
<> 129:0ab6a29f35bf 210
<> 129:0ab6a29f35bf 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 212 return(result);
<> 129:0ab6a29f35bf 213 }
<> 129:0ab6a29f35bf 214
<> 129:0ab6a29f35bf 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 216 {
<> 129:0ab6a29f35bf 217 uint32_t result;
<> 129:0ab6a29f35bf 218
<> 129:0ab6a29f35bf 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 220 return(result);
<> 129:0ab6a29f35bf 221 }
<> 129:0ab6a29f35bf 222
<> 129:0ab6a29f35bf 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 224 {
<> 129:0ab6a29f35bf 225 uint32_t result;
<> 129:0ab6a29f35bf 226
<> 129:0ab6a29f35bf 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 228 return(result);
<> 129:0ab6a29f35bf 229 }
<> 129:0ab6a29f35bf 230
<> 129:0ab6a29f35bf 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 232 {
<> 129:0ab6a29f35bf 233 uint32_t result;
<> 129:0ab6a29f35bf 234
<> 129:0ab6a29f35bf 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 236 return(result);
<> 129:0ab6a29f35bf 237 }
<> 129:0ab6a29f35bf 238
<> 129:0ab6a29f35bf 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 240 {
<> 129:0ab6a29f35bf 241 uint32_t result;
<> 129:0ab6a29f35bf 242
<> 129:0ab6a29f35bf 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 244 return(result);
<> 129:0ab6a29f35bf 245 }
<> 129:0ab6a29f35bf 246
<> 129:0ab6a29f35bf 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 248 {
<> 129:0ab6a29f35bf 249 uint32_t result;
<> 129:0ab6a29f35bf 250
<> 129:0ab6a29f35bf 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 252 return(result);
<> 129:0ab6a29f35bf 253 }
<> 129:0ab6a29f35bf 254
<> 129:0ab6a29f35bf 255
<> 129:0ab6a29f35bf 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 257 {
<> 129:0ab6a29f35bf 258 uint32_t result;
<> 129:0ab6a29f35bf 259
<> 129:0ab6a29f35bf 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 261 return(result);
<> 129:0ab6a29f35bf 262 }
<> 129:0ab6a29f35bf 263
<> 129:0ab6a29f35bf 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 265 {
<> 129:0ab6a29f35bf 266 uint32_t result;
<> 129:0ab6a29f35bf 267
<> 129:0ab6a29f35bf 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 269 return(result);
<> 129:0ab6a29f35bf 270 }
<> 129:0ab6a29f35bf 271
<> 129:0ab6a29f35bf 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 273 {
<> 129:0ab6a29f35bf 274 uint32_t result;
<> 129:0ab6a29f35bf 275
<> 129:0ab6a29f35bf 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 277 return(result);
<> 129:0ab6a29f35bf 278 }
<> 129:0ab6a29f35bf 279
<> 129:0ab6a29f35bf 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 281 {
<> 129:0ab6a29f35bf 282 uint32_t result;
<> 129:0ab6a29f35bf 283
<> 129:0ab6a29f35bf 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 285 return(result);
<> 129:0ab6a29f35bf 286 }
<> 129:0ab6a29f35bf 287
<> 129:0ab6a29f35bf 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 289 {
<> 129:0ab6a29f35bf 290 uint32_t result;
<> 129:0ab6a29f35bf 291
<> 129:0ab6a29f35bf 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 293 return(result);
<> 129:0ab6a29f35bf 294 }
<> 129:0ab6a29f35bf 295
<> 129:0ab6a29f35bf 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 297 {
<> 129:0ab6a29f35bf 298 uint32_t result;
<> 129:0ab6a29f35bf 299
<> 129:0ab6a29f35bf 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 301 return(result);
<> 129:0ab6a29f35bf 302 }
<> 129:0ab6a29f35bf 303
<> 129:0ab6a29f35bf 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 305 {
<> 129:0ab6a29f35bf 306 uint32_t result;
<> 129:0ab6a29f35bf 307
<> 129:0ab6a29f35bf 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 309 return(result);
<> 129:0ab6a29f35bf 310 }
<> 129:0ab6a29f35bf 311
<> 129:0ab6a29f35bf 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 313 {
<> 129:0ab6a29f35bf 314 uint32_t result;
<> 129:0ab6a29f35bf 315
<> 129:0ab6a29f35bf 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 317 return(result);
<> 129:0ab6a29f35bf 318 }
<> 129:0ab6a29f35bf 319
<> 129:0ab6a29f35bf 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 321 {
<> 129:0ab6a29f35bf 322 uint32_t result;
<> 129:0ab6a29f35bf 323
<> 129:0ab6a29f35bf 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 325 return(result);
<> 129:0ab6a29f35bf 326 }
<> 129:0ab6a29f35bf 327
<> 129:0ab6a29f35bf 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 329 {
<> 129:0ab6a29f35bf 330 uint32_t result;
<> 129:0ab6a29f35bf 331
<> 129:0ab6a29f35bf 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 333 return(result);
<> 129:0ab6a29f35bf 334 }
<> 129:0ab6a29f35bf 335
<> 129:0ab6a29f35bf 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 337 {
<> 129:0ab6a29f35bf 338 uint32_t result;
<> 129:0ab6a29f35bf 339
<> 129:0ab6a29f35bf 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 341 return(result);
<> 129:0ab6a29f35bf 342 }
<> 129:0ab6a29f35bf 343
<> 129:0ab6a29f35bf 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 345 {
<> 129:0ab6a29f35bf 346 uint32_t result;
<> 129:0ab6a29f35bf 347
<> 129:0ab6a29f35bf 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 349 return(result);
<> 129:0ab6a29f35bf 350 }
<> 129:0ab6a29f35bf 351
<> 129:0ab6a29f35bf 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 353 {
<> 129:0ab6a29f35bf 354 uint32_t result;
<> 129:0ab6a29f35bf 355
<> 129:0ab6a29f35bf 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 357 return(result);
<> 129:0ab6a29f35bf 358 }
<> 129:0ab6a29f35bf 359
<> 129:0ab6a29f35bf 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 361 {
<> 129:0ab6a29f35bf 362 uint32_t result;
<> 129:0ab6a29f35bf 363
<> 129:0ab6a29f35bf 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 365 return(result);
<> 129:0ab6a29f35bf 366 }
<> 129:0ab6a29f35bf 367
<> 129:0ab6a29f35bf 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 369 {
<> 129:0ab6a29f35bf 370 uint32_t result;
<> 129:0ab6a29f35bf 371
<> 129:0ab6a29f35bf 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 373 return(result);
<> 129:0ab6a29f35bf 374 }
<> 129:0ab6a29f35bf 375
<> 129:0ab6a29f35bf 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 377 {
<> 129:0ab6a29f35bf 378 uint32_t result;
<> 129:0ab6a29f35bf 379
<> 129:0ab6a29f35bf 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 381 return(result);
<> 129:0ab6a29f35bf 382 }
<> 129:0ab6a29f35bf 383
<> 129:0ab6a29f35bf 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 385 {
<> 129:0ab6a29f35bf 386 uint32_t result;
<> 129:0ab6a29f35bf 387
<> 129:0ab6a29f35bf 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 389 return(result);
<> 129:0ab6a29f35bf 390 }
<> 129:0ab6a29f35bf 391
<> 129:0ab6a29f35bf 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 393 {
<> 129:0ab6a29f35bf 394 uint32_t result;
<> 129:0ab6a29f35bf 395
<> 129:0ab6a29f35bf 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 397 return(result);
<> 129:0ab6a29f35bf 398 }
<> 129:0ab6a29f35bf 399
<> 129:0ab6a29f35bf 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 401 {
<> 129:0ab6a29f35bf 402 uint32_t result;
<> 129:0ab6a29f35bf 403
<> 129:0ab6a29f35bf 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 405 return(result);
<> 129:0ab6a29f35bf 406 }
<> 129:0ab6a29f35bf 407
<> 129:0ab6a29f35bf 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 409 {
<> 129:0ab6a29f35bf 410 uint32_t result;
<> 129:0ab6a29f35bf 411
<> 129:0ab6a29f35bf 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 413 return(result);
<> 129:0ab6a29f35bf 414 }
<> 129:0ab6a29f35bf 415
<> 129:0ab6a29f35bf 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 417 {
<> 129:0ab6a29f35bf 418 uint32_t result;
<> 129:0ab6a29f35bf 419
<> 129:0ab6a29f35bf 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 421 return(result);
<> 129:0ab6a29f35bf 422 }
<> 129:0ab6a29f35bf 423
<> 129:0ab6a29f35bf 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 425 {
<> 129:0ab6a29f35bf 426 uint32_t result;
<> 129:0ab6a29f35bf 427
<> 129:0ab6a29f35bf 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 429 return(result);
<> 129:0ab6a29f35bf 430 }
<> 129:0ab6a29f35bf 431
<> 129:0ab6a29f35bf 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 433 {
<> 129:0ab6a29f35bf 434 uint32_t result;
<> 129:0ab6a29f35bf 435
<> 129:0ab6a29f35bf 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 437 return(result);
<> 129:0ab6a29f35bf 438 }
<> 129:0ab6a29f35bf 439
<> 129:0ab6a29f35bf 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 441 {
<> 129:0ab6a29f35bf 442 uint32_t result;
<> 129:0ab6a29f35bf 443
<> 129:0ab6a29f35bf 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 445 return(result);
<> 129:0ab6a29f35bf 446 }
<> 129:0ab6a29f35bf 447
<> 129:0ab6a29f35bf 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 449 {
<> 129:0ab6a29f35bf 450 uint32_t result;
<> 129:0ab6a29f35bf 451
<> 129:0ab6a29f35bf 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 453 return(result);
<> 129:0ab6a29f35bf 454 }
<> 129:0ab6a29f35bf 455
<> 129:0ab6a29f35bf 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
<> 129:0ab6a29f35bf 457 {
<> 129:0ab6a29f35bf 458 uint32_t result;
<> 129:0ab6a29f35bf 459
<> 129:0ab6a29f35bf 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 461 return(result);
<> 129:0ab6a29f35bf 462 }
<> 129:0ab6a29f35bf 463
<> 129:0ab6a29f35bf 464 #define __SSAT16(ARG1,ARG2) \
<> 129:0ab6a29f35bf 465 ({ \
<> 129:0ab6a29f35bf 466 uint32_t __RES, __ARG1 = (ARG1); \
<> 129:0ab6a29f35bf 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 129:0ab6a29f35bf 468 __RES; \
<> 129:0ab6a29f35bf 469 })
<> 129:0ab6a29f35bf 470
<> 129:0ab6a29f35bf 471 #define __USAT16(ARG1,ARG2) \
<> 129:0ab6a29f35bf 472 ({ \
<> 129:0ab6a29f35bf 473 uint32_t __RES, __ARG1 = (ARG1); \
<> 129:0ab6a29f35bf 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 129:0ab6a29f35bf 475 __RES; \
<> 129:0ab6a29f35bf 476 })
<> 129:0ab6a29f35bf 477
<> 129:0ab6a29f35bf 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
<> 129:0ab6a29f35bf 479 {
<> 129:0ab6a29f35bf 480 uint32_t result;
<> 129:0ab6a29f35bf 481
<> 129:0ab6a29f35bf 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 129:0ab6a29f35bf 483 return(result);
<> 129:0ab6a29f35bf 484 }
<> 129:0ab6a29f35bf 485
<> 129:0ab6a29f35bf 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 487 {
<> 129:0ab6a29f35bf 488 uint32_t result;
<> 129:0ab6a29f35bf 489
<> 129:0ab6a29f35bf 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 491 return(result);
<> 129:0ab6a29f35bf 492 }
<> 129:0ab6a29f35bf 493
<> 129:0ab6a29f35bf 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
<> 129:0ab6a29f35bf 495 {
<> 129:0ab6a29f35bf 496 uint32_t result;
<> 129:0ab6a29f35bf 497
<> 129:0ab6a29f35bf 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 129:0ab6a29f35bf 499 return(result);
<> 129:0ab6a29f35bf 500 }
<> 129:0ab6a29f35bf 501
<> 129:0ab6a29f35bf 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 503 {
<> 129:0ab6a29f35bf 504 uint32_t result;
<> 129:0ab6a29f35bf 505
<> 129:0ab6a29f35bf 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 507 return(result);
<> 129:0ab6a29f35bf 508 }
<> 129:0ab6a29f35bf 509
<> 129:0ab6a29f35bf 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 511 {
<> 129:0ab6a29f35bf 512 uint32_t result;
<> 129:0ab6a29f35bf 513
<> 129:0ab6a29f35bf 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 515 return(result);
<> 129:0ab6a29f35bf 516 }
<> 129:0ab6a29f35bf 517
<> 129:0ab6a29f35bf 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 519 {
<> 129:0ab6a29f35bf 520 uint32_t result;
<> 129:0ab6a29f35bf 521
<> 129:0ab6a29f35bf 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 523 return(result);
<> 129:0ab6a29f35bf 524 }
<> 129:0ab6a29f35bf 525
<> 129:0ab6a29f35bf 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 129:0ab6a29f35bf 527 {
<> 129:0ab6a29f35bf 528 uint32_t result;
<> 129:0ab6a29f35bf 529
<> 129:0ab6a29f35bf 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 531 return(result);
<> 129:0ab6a29f35bf 532 }
<> 129:0ab6a29f35bf 533
<> 129:0ab6a29f35bf 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 129:0ab6a29f35bf 535 {
<> 129:0ab6a29f35bf 536 uint32_t result;
<> 129:0ab6a29f35bf 537
<> 129:0ab6a29f35bf 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 539 return(result);
<> 129:0ab6a29f35bf 540 }
<> 129:0ab6a29f35bf 541
<> 129:0ab6a29f35bf 542 #define __SMLALD(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 543 ({ \
<> 129:0ab6a29f35bf 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 129:0ab6a29f35bf 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 129:0ab6a29f35bf 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 129:0ab6a29f35bf 547 })
<> 129:0ab6a29f35bf 548
<> 129:0ab6a29f35bf 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 550 ({ \
<> 129:0ab6a29f35bf 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
<> 129:0ab6a29f35bf 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 129:0ab6a29f35bf 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 129:0ab6a29f35bf 554 })
<> 129:0ab6a29f35bf 555
<> 129:0ab6a29f35bf 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 557 {
<> 129:0ab6a29f35bf 558 uint32_t result;
<> 129:0ab6a29f35bf 559
<> 129:0ab6a29f35bf 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 561 return(result);
<> 129:0ab6a29f35bf 562 }
<> 129:0ab6a29f35bf 563
<> 129:0ab6a29f35bf 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 565 {
<> 129:0ab6a29f35bf 566 uint32_t result;
<> 129:0ab6a29f35bf 567
<> 129:0ab6a29f35bf 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 569 return(result);
<> 129:0ab6a29f35bf 570 }
<> 129:0ab6a29f35bf 571
<> 129:0ab6a29f35bf 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 129:0ab6a29f35bf 573 {
<> 129:0ab6a29f35bf 574 uint32_t result;
<> 129:0ab6a29f35bf 575
<> 129:0ab6a29f35bf 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 577 return(result);
<> 129:0ab6a29f35bf 578 }
<> 129:0ab6a29f35bf 579
<> 129:0ab6a29f35bf 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 129:0ab6a29f35bf 581 {
<> 129:0ab6a29f35bf 582 uint32_t result;
<> 129:0ab6a29f35bf 583
<> 129:0ab6a29f35bf 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 585 return(result);
<> 129:0ab6a29f35bf 586 }
<> 129:0ab6a29f35bf 587
<> 129:0ab6a29f35bf 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 589 ({ \
<> 129:0ab6a29f35bf 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 129:0ab6a29f35bf 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 129:0ab6a29f35bf 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 129:0ab6a29f35bf 593 })
<> 129:0ab6a29f35bf 594
<> 129:0ab6a29f35bf 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 596 ({ \
<> 129:0ab6a29f35bf 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
<> 129:0ab6a29f35bf 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
<> 129:0ab6a29f35bf 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
<> 129:0ab6a29f35bf 600 })
<> 129:0ab6a29f35bf 601
<> 129:0ab6a29f35bf 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 603 {
<> 129:0ab6a29f35bf 604 uint32_t result;
<> 129:0ab6a29f35bf 605
<> 129:0ab6a29f35bf 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 607 return(result);
<> 129:0ab6a29f35bf 608 }
<> 129:0ab6a29f35bf 609
<> 129:0ab6a29f35bf 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 611 {
<> 129:0ab6a29f35bf 612 uint32_t result;
<> 129:0ab6a29f35bf 613
<> 129:0ab6a29f35bf 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 615 return(result);
<> 129:0ab6a29f35bf 616 }
<> 129:0ab6a29f35bf 617
<> 129:0ab6a29f35bf 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
<> 129:0ab6a29f35bf 619 {
<> 129:0ab6a29f35bf 620 uint32_t result;
<> 129:0ab6a29f35bf 621
<> 129:0ab6a29f35bf 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 129:0ab6a29f35bf 623 return(result);
<> 129:0ab6a29f35bf 624 }
<> 129:0ab6a29f35bf 625
<> 129:0ab6a29f35bf 626 #define __PKHBT(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 627 ({ \
<> 129:0ab6a29f35bf 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 129:0ab6a29f35bf 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 129:0ab6a29f35bf 630 __RES; \
<> 129:0ab6a29f35bf 631 })
<> 129:0ab6a29f35bf 632
<> 129:0ab6a29f35bf 633 #define __PKHTB(ARG1,ARG2,ARG3) \
<> 129:0ab6a29f35bf 634 ({ \
<> 129:0ab6a29f35bf 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 129:0ab6a29f35bf 636 if (ARG3 == 0) \
<> 129:0ab6a29f35bf 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
<> 129:0ab6a29f35bf 638 else \
<> 129:0ab6a29f35bf 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 129:0ab6a29f35bf 640 __RES; \
<> 129:0ab6a29f35bf 641 })
<> 129:0ab6a29f35bf 642
<> 129:0ab6a29f35bf 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
<> 129:0ab6a29f35bf 644 {
<> 129:0ab6a29f35bf 645 int32_t result;
<> 129:0ab6a29f35bf 646
<> 129:0ab6a29f35bf 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
<> 129:0ab6a29f35bf 648 return(result);
<> 129:0ab6a29f35bf 649 }
<> 129:0ab6a29f35bf 650
<> 129:0ab6a29f35bf 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 652
<> 129:0ab6a29f35bf 653
<> 129:0ab6a29f35bf 654
<> 129:0ab6a29f35bf 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
<> 129:0ab6a29f35bf 656 /* TASKING carm specific functions */
<> 129:0ab6a29f35bf 657
<> 129:0ab6a29f35bf 658
<> 129:0ab6a29f35bf 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 660 /* not yet supported */
<> 129:0ab6a29f35bf 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
<> 129:0ab6a29f35bf 662
<> 129:0ab6a29f35bf 663
<> 129:0ab6a29f35bf 664 #endif
<> 129:0ab6a29f35bf 665
<> 129:0ab6a29f35bf 666 /*@} end of group CMSIS_SIMD_intrinsics */
<> 129:0ab6a29f35bf 667
<> 129:0ab6a29f35bf 668
<> 129:0ab6a29f35bf 669 #endif /* __CORE_CM4_SIMD_H */
<> 129:0ab6a29f35bf 670
<> 129:0ab6a29f35bf 671 #ifdef __cplusplus
<> 129:0ab6a29f35bf 672 }
<> 129:0ab6a29f35bf 673 #endif