The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
<>
Date:
Mon Jan 16 12:05:23 2017 +0000
Revision:
134:ad3be0349dc5
Parent:
132:9baf128c2fab
Release 134 of the mbed library

Ports for Upcoming Targets


Fixes and Changes

3488: Dev stm i2c v2 unitary functions https://github.com/ARMmbed/mbed-os/pull/3488
3492: Fix #3463 CAN read() return value https://github.com/ARMmbed/mbed-os/pull/3492
3503: [LPC15xx] Ensure that PWM=1 is resolved correctly https://github.com/ARMmbed/mbed-os/pull/3503
3504: [LPC15xx] CAN implementation improvements https://github.com/ARMmbed/mbed-os/pull/3504
3539: NUCLEO_F412ZG - Add support of TRNG peripheral https://github.com/ARMmbed/mbed-os/pull/3539
3540: STM: SPI: Initialize Rx in spi_master_write https://github.com/ARMmbed/mbed-os/pull/3540
3438: K64F: Add support for SERIAL ASYNCH API https://github.com/ARMmbed/mbed-os/pull/3438
3519: MCUXpresso: Fix ENET driver to enable interrupts after interrupt handler is set https://github.com/ARMmbed/mbed-os/pull/3519
3544: STM32L4 deepsleep improvement https://github.com/ARMmbed/mbed-os/pull/3544
3546: NUCLEO-F412ZG - Add CAN peripheral https://github.com/ARMmbed/mbed-os/pull/3546
3551: Fix I2C driver for RZ/A1H https://github.com/ARMmbed/mbed-os/pull/3551
3558: K64F UART Asynch API: Fix synchronization issue https://github.com/ARMmbed/mbed-os/pull/3558
3563: LPC4088 - Fix vector checksum https://github.com/ARMmbed/mbed-os/pull/3563
3567: Dev stm32 F0 v1.7.0 https://github.com/ARMmbed/mbed-os/pull/3567
3577: Fixes linking errors when building with debug profile https://github.com/ARMmbed/mbed-os/pull/3577

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 132:9baf128c2fab 1 /**************************************************************************//**
<> 132:9baf128c2fab 2 * @file core_cmSimd.h
<> 132:9baf128c2fab 3 * @brief CMSIS Cortex-M SIMD Header File
<> 132:9baf128c2fab 4 * @version V4.10
<> 132:9baf128c2fab 5 * @date 18. March 2015
<> 132:9baf128c2fab 6 *
<> 132:9baf128c2fab 7 * @note
<> 132:9baf128c2fab 8 *
<> 132:9baf128c2fab 9 ******************************************************************************/
<> 132:9baf128c2fab 10 /* Copyright (c) 2009 - 2014 ARM LIMITED
<> 132:9baf128c2fab 11
<> 132:9baf128c2fab 12 All rights reserved.
<> 132:9baf128c2fab 13 Redistribution and use in source and binary forms, with or without
<> 132:9baf128c2fab 14 modification, are permitted provided that the following conditions are met:
<> 132:9baf128c2fab 15 - Redistributions of source code must retain the above copyright
<> 132:9baf128c2fab 16 notice, this list of conditions and the following disclaimer.
<> 132:9baf128c2fab 17 - Redistributions in binary form must reproduce the above copyright
<> 132:9baf128c2fab 18 notice, this list of conditions and the following disclaimer in the
<> 132:9baf128c2fab 19 documentation and/or other materials provided with the distribution.
<> 132:9baf128c2fab 20 - Neither the name of ARM nor the names of its contributors may be used
<> 132:9baf128c2fab 21 to endorse or promote products derived from this software without
<> 132:9baf128c2fab 22 specific prior written permission.
<> 132:9baf128c2fab 23 *
<> 132:9baf128c2fab 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
<> 132:9baf128c2fab 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
<> 132:9baf128c2fab 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<> 132:9baf128c2fab 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
<> 132:9baf128c2fab 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
<> 132:9baf128c2fab 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
<> 132:9baf128c2fab 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
<> 132:9baf128c2fab 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
<> 132:9baf128c2fab 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
<> 132:9baf128c2fab 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
<> 132:9baf128c2fab 34 POSSIBILITY OF SUCH DAMAGE.
<> 132:9baf128c2fab 35 ---------------------------------------------------------------------------*/
<> 132:9baf128c2fab 36
<> 132:9baf128c2fab 37
<> 132:9baf128c2fab 38 #if defined ( __ICCARM__ )
<> 132:9baf128c2fab 39 #pragma system_include /* treat file as system include file for MISRA check */
<> 132:9baf128c2fab 40 #endif
<> 132:9baf128c2fab 41
<> 132:9baf128c2fab 42 #ifndef __CORE_CMSIMD_H
<> 132:9baf128c2fab 43 #define __CORE_CMSIMD_H
<> 132:9baf128c2fab 44
<> 132:9baf128c2fab 45 #ifdef __cplusplus
<> 132:9baf128c2fab 46 extern "C" {
<> 132:9baf128c2fab 47 #endif
<> 132:9baf128c2fab 48
<> 132:9baf128c2fab 49
<> 132:9baf128c2fab 50 /*******************************************************************************
<> 132:9baf128c2fab 51 * Hardware Abstraction Layer
<> 132:9baf128c2fab 52 ******************************************************************************/
<> 132:9baf128c2fab 53
<> 132:9baf128c2fab 54
<> 132:9baf128c2fab 55 /* ################### Compiler specific Intrinsics ########################### */
<> 132:9baf128c2fab 56 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
<> 132:9baf128c2fab 57 Access to dedicated SIMD instructions
<> 132:9baf128c2fab 58 @{
<> 132:9baf128c2fab 59 */
<> 132:9baf128c2fab 60
<> 132:9baf128c2fab 61 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
<> 132:9baf128c2fab 62 /* ARM armcc specific functions */
<> 132:9baf128c2fab 63 #define __SADD8 __sadd8
<> 132:9baf128c2fab 64 #define __QADD8 __qadd8
<> 132:9baf128c2fab 65 #define __SHADD8 __shadd8
<> 132:9baf128c2fab 66 #define __UADD8 __uadd8
<> 132:9baf128c2fab 67 #define __UQADD8 __uqadd8
<> 132:9baf128c2fab 68 #define __UHADD8 __uhadd8
<> 132:9baf128c2fab 69 #define __SSUB8 __ssub8
<> 132:9baf128c2fab 70 #define __QSUB8 __qsub8
<> 132:9baf128c2fab 71 #define __SHSUB8 __shsub8
<> 132:9baf128c2fab 72 #define __USUB8 __usub8
<> 132:9baf128c2fab 73 #define __UQSUB8 __uqsub8
<> 132:9baf128c2fab 74 #define __UHSUB8 __uhsub8
<> 132:9baf128c2fab 75 #define __SADD16 __sadd16
<> 132:9baf128c2fab 76 #define __QADD16 __qadd16
<> 132:9baf128c2fab 77 #define __SHADD16 __shadd16
<> 132:9baf128c2fab 78 #define __UADD16 __uadd16
<> 132:9baf128c2fab 79 #define __UQADD16 __uqadd16
<> 132:9baf128c2fab 80 #define __UHADD16 __uhadd16
<> 132:9baf128c2fab 81 #define __SSUB16 __ssub16
<> 132:9baf128c2fab 82 #define __QSUB16 __qsub16
<> 132:9baf128c2fab 83 #define __SHSUB16 __shsub16
<> 132:9baf128c2fab 84 #define __USUB16 __usub16
<> 132:9baf128c2fab 85 #define __UQSUB16 __uqsub16
<> 132:9baf128c2fab 86 #define __UHSUB16 __uhsub16
<> 132:9baf128c2fab 87 #define __SASX __sasx
<> 132:9baf128c2fab 88 #define __QASX __qasx
<> 132:9baf128c2fab 89 #define __SHASX __shasx
<> 132:9baf128c2fab 90 #define __UASX __uasx
<> 132:9baf128c2fab 91 #define __UQASX __uqasx
<> 132:9baf128c2fab 92 #define __UHASX __uhasx
<> 132:9baf128c2fab 93 #define __SSAX __ssax
<> 132:9baf128c2fab 94 #define __QSAX __qsax
<> 132:9baf128c2fab 95 #define __SHSAX __shsax
<> 132:9baf128c2fab 96 #define __USAX __usax
<> 132:9baf128c2fab 97 #define __UQSAX __uqsax
<> 132:9baf128c2fab 98 #define __UHSAX __uhsax
<> 132:9baf128c2fab 99 #define __USAD8 __usad8
<> 132:9baf128c2fab 100 #define __USADA8 __usada8
<> 132:9baf128c2fab 101 #define __SSAT16 __ssat16
<> 132:9baf128c2fab 102 #define __USAT16 __usat16
<> 132:9baf128c2fab 103 #define __UXTB16 __uxtb16
<> 132:9baf128c2fab 104 #define __UXTAB16 __uxtab16
<> 132:9baf128c2fab 105 #define __SXTB16 __sxtb16
<> 132:9baf128c2fab 106 #define __SXTAB16 __sxtab16
<> 132:9baf128c2fab 107 #define __SMUAD __smuad
<> 132:9baf128c2fab 108 #define __SMUADX __smuadx
<> 132:9baf128c2fab 109 #define __SMLAD __smlad
<> 132:9baf128c2fab 110 #define __SMLADX __smladx
<> 132:9baf128c2fab 111 #define __SMLALD __smlald
<> 132:9baf128c2fab 112 #define __SMLALDX __smlaldx
<> 132:9baf128c2fab 113 #define __SMUSD __smusd
<> 132:9baf128c2fab 114 #define __SMUSDX __smusdx
<> 132:9baf128c2fab 115 #define __SMLSD __smlsd
<> 132:9baf128c2fab 116 #define __SMLSDX __smlsdx
<> 132:9baf128c2fab 117 #define __SMLSLD __smlsld
<> 132:9baf128c2fab 118 #define __SMLSLDX __smlsldx
<> 132:9baf128c2fab 119 #define __SEL __sel
<> 132:9baf128c2fab 120 #define __QADD __qadd
<> 132:9baf128c2fab 121 #define __QSUB __qsub
<> 132:9baf128c2fab 122
<> 132:9baf128c2fab 123 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
<> 132:9baf128c2fab 124 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
<> 132:9baf128c2fab 125
<> 132:9baf128c2fab 126 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
<> 132:9baf128c2fab 127 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
<> 132:9baf128c2fab 128
<> 132:9baf128c2fab 129 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
<> 132:9baf128c2fab 130 ((int64_t)(ARG3) << 32) ) >> 32))
<> 132:9baf128c2fab 131
<> 132:9baf128c2fab 132
<> 132:9baf128c2fab 133 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
<> 132:9baf128c2fab 134 /* GNU gcc specific functions */
<> 132:9baf128c2fab 135 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 136 {
<> 132:9baf128c2fab 137 uint32_t result;
<> 132:9baf128c2fab 138
<> 132:9baf128c2fab 139 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 140 return(result);
<> 132:9baf128c2fab 141 }
<> 132:9baf128c2fab 142
<> 132:9baf128c2fab 143 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 144 {
<> 132:9baf128c2fab 145 uint32_t result;
<> 132:9baf128c2fab 146
<> 132:9baf128c2fab 147 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 148 return(result);
<> 132:9baf128c2fab 149 }
<> 132:9baf128c2fab 150
<> 132:9baf128c2fab 151 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 152 {
<> 132:9baf128c2fab 153 uint32_t result;
<> 132:9baf128c2fab 154
<> 132:9baf128c2fab 155 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 156 return(result);
<> 132:9baf128c2fab 157 }
<> 132:9baf128c2fab 158
<> 132:9baf128c2fab 159 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 160 {
<> 132:9baf128c2fab 161 uint32_t result;
<> 132:9baf128c2fab 162
<> 132:9baf128c2fab 163 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 164 return(result);
<> 132:9baf128c2fab 165 }
<> 132:9baf128c2fab 166
<> 132:9baf128c2fab 167 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 168 {
<> 132:9baf128c2fab 169 uint32_t result;
<> 132:9baf128c2fab 170
<> 132:9baf128c2fab 171 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 172 return(result);
<> 132:9baf128c2fab 173 }
<> 132:9baf128c2fab 174
<> 132:9baf128c2fab 175 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 176 {
<> 132:9baf128c2fab 177 uint32_t result;
<> 132:9baf128c2fab 178
<> 132:9baf128c2fab 179 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 180 return(result);
<> 132:9baf128c2fab 181 }
<> 132:9baf128c2fab 182
<> 132:9baf128c2fab 183
<> 132:9baf128c2fab 184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 185 {
<> 132:9baf128c2fab 186 uint32_t result;
<> 132:9baf128c2fab 187
<> 132:9baf128c2fab 188 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 189 return(result);
<> 132:9baf128c2fab 190 }
<> 132:9baf128c2fab 191
<> 132:9baf128c2fab 192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 193 {
<> 132:9baf128c2fab 194 uint32_t result;
<> 132:9baf128c2fab 195
<> 132:9baf128c2fab 196 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 197 return(result);
<> 132:9baf128c2fab 198 }
<> 132:9baf128c2fab 199
<> 132:9baf128c2fab 200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 201 {
<> 132:9baf128c2fab 202 uint32_t result;
<> 132:9baf128c2fab 203
<> 132:9baf128c2fab 204 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 205 return(result);
<> 132:9baf128c2fab 206 }
<> 132:9baf128c2fab 207
<> 132:9baf128c2fab 208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 209 {
<> 132:9baf128c2fab 210 uint32_t result;
<> 132:9baf128c2fab 211
<> 132:9baf128c2fab 212 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 213 return(result);
<> 132:9baf128c2fab 214 }
<> 132:9baf128c2fab 215
<> 132:9baf128c2fab 216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 217 {
<> 132:9baf128c2fab 218 uint32_t result;
<> 132:9baf128c2fab 219
<> 132:9baf128c2fab 220 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 221 return(result);
<> 132:9baf128c2fab 222 }
<> 132:9baf128c2fab 223
<> 132:9baf128c2fab 224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 225 {
<> 132:9baf128c2fab 226 uint32_t result;
<> 132:9baf128c2fab 227
<> 132:9baf128c2fab 228 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 229 return(result);
<> 132:9baf128c2fab 230 }
<> 132:9baf128c2fab 231
<> 132:9baf128c2fab 232
<> 132:9baf128c2fab 233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 234 {
<> 132:9baf128c2fab 235 uint32_t result;
<> 132:9baf128c2fab 236
<> 132:9baf128c2fab 237 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 238 return(result);
<> 132:9baf128c2fab 239 }
<> 132:9baf128c2fab 240
<> 132:9baf128c2fab 241 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 242 {
<> 132:9baf128c2fab 243 uint32_t result;
<> 132:9baf128c2fab 244
<> 132:9baf128c2fab 245 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 246 return(result);
<> 132:9baf128c2fab 247 }
<> 132:9baf128c2fab 248
<> 132:9baf128c2fab 249 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 250 {
<> 132:9baf128c2fab 251 uint32_t result;
<> 132:9baf128c2fab 252
<> 132:9baf128c2fab 253 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 254 return(result);
<> 132:9baf128c2fab 255 }
<> 132:9baf128c2fab 256
<> 132:9baf128c2fab 257 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 258 {
<> 132:9baf128c2fab 259 uint32_t result;
<> 132:9baf128c2fab 260
<> 132:9baf128c2fab 261 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 262 return(result);
<> 132:9baf128c2fab 263 }
<> 132:9baf128c2fab 264
<> 132:9baf128c2fab 265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 266 {
<> 132:9baf128c2fab 267 uint32_t result;
<> 132:9baf128c2fab 268
<> 132:9baf128c2fab 269 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 270 return(result);
<> 132:9baf128c2fab 271 }
<> 132:9baf128c2fab 272
<> 132:9baf128c2fab 273 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 274 {
<> 132:9baf128c2fab 275 uint32_t result;
<> 132:9baf128c2fab 276
<> 132:9baf128c2fab 277 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 278 return(result);
<> 132:9baf128c2fab 279 }
<> 132:9baf128c2fab 280
<> 132:9baf128c2fab 281 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 282 {
<> 132:9baf128c2fab 283 uint32_t result;
<> 132:9baf128c2fab 284
<> 132:9baf128c2fab 285 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 286 return(result);
<> 132:9baf128c2fab 287 }
<> 132:9baf128c2fab 288
<> 132:9baf128c2fab 289 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 290 {
<> 132:9baf128c2fab 291 uint32_t result;
<> 132:9baf128c2fab 292
<> 132:9baf128c2fab 293 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 294 return(result);
<> 132:9baf128c2fab 295 }
<> 132:9baf128c2fab 296
<> 132:9baf128c2fab 297 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 298 {
<> 132:9baf128c2fab 299 uint32_t result;
<> 132:9baf128c2fab 300
<> 132:9baf128c2fab 301 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 302 return(result);
<> 132:9baf128c2fab 303 }
<> 132:9baf128c2fab 304
<> 132:9baf128c2fab 305 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 306 {
<> 132:9baf128c2fab 307 uint32_t result;
<> 132:9baf128c2fab 308
<> 132:9baf128c2fab 309 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 310 return(result);
<> 132:9baf128c2fab 311 }
<> 132:9baf128c2fab 312
<> 132:9baf128c2fab 313 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 314 {
<> 132:9baf128c2fab 315 uint32_t result;
<> 132:9baf128c2fab 316
<> 132:9baf128c2fab 317 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 318 return(result);
<> 132:9baf128c2fab 319 }
<> 132:9baf128c2fab 320
<> 132:9baf128c2fab 321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 322 {
<> 132:9baf128c2fab 323 uint32_t result;
<> 132:9baf128c2fab 324
<> 132:9baf128c2fab 325 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 326 return(result);
<> 132:9baf128c2fab 327 }
<> 132:9baf128c2fab 328
<> 132:9baf128c2fab 329 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 330 {
<> 132:9baf128c2fab 331 uint32_t result;
<> 132:9baf128c2fab 332
<> 132:9baf128c2fab 333 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 334 return(result);
<> 132:9baf128c2fab 335 }
<> 132:9baf128c2fab 336
<> 132:9baf128c2fab 337 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 338 {
<> 132:9baf128c2fab 339 uint32_t result;
<> 132:9baf128c2fab 340
<> 132:9baf128c2fab 341 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 342 return(result);
<> 132:9baf128c2fab 343 }
<> 132:9baf128c2fab 344
<> 132:9baf128c2fab 345 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 346 {
<> 132:9baf128c2fab 347 uint32_t result;
<> 132:9baf128c2fab 348
<> 132:9baf128c2fab 349 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 350 return(result);
<> 132:9baf128c2fab 351 }
<> 132:9baf128c2fab 352
<> 132:9baf128c2fab 353 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 354 {
<> 132:9baf128c2fab 355 uint32_t result;
<> 132:9baf128c2fab 356
<> 132:9baf128c2fab 357 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 358 return(result);
<> 132:9baf128c2fab 359 }
<> 132:9baf128c2fab 360
<> 132:9baf128c2fab 361 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 362 {
<> 132:9baf128c2fab 363 uint32_t result;
<> 132:9baf128c2fab 364
<> 132:9baf128c2fab 365 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 366 return(result);
<> 132:9baf128c2fab 367 }
<> 132:9baf128c2fab 368
<> 132:9baf128c2fab 369 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 370 {
<> 132:9baf128c2fab 371 uint32_t result;
<> 132:9baf128c2fab 372
<> 132:9baf128c2fab 373 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 374 return(result);
<> 132:9baf128c2fab 375 }
<> 132:9baf128c2fab 376
<> 132:9baf128c2fab 377 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 378 {
<> 132:9baf128c2fab 379 uint32_t result;
<> 132:9baf128c2fab 380
<> 132:9baf128c2fab 381 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 382 return(result);
<> 132:9baf128c2fab 383 }
<> 132:9baf128c2fab 384
<> 132:9baf128c2fab 385 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 386 {
<> 132:9baf128c2fab 387 uint32_t result;
<> 132:9baf128c2fab 388
<> 132:9baf128c2fab 389 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 390 return(result);
<> 132:9baf128c2fab 391 }
<> 132:9baf128c2fab 392
<> 132:9baf128c2fab 393 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 394 {
<> 132:9baf128c2fab 395 uint32_t result;
<> 132:9baf128c2fab 396
<> 132:9baf128c2fab 397 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 398 return(result);
<> 132:9baf128c2fab 399 }
<> 132:9baf128c2fab 400
<> 132:9baf128c2fab 401 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 402 {
<> 132:9baf128c2fab 403 uint32_t result;
<> 132:9baf128c2fab 404
<> 132:9baf128c2fab 405 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 406 return(result);
<> 132:9baf128c2fab 407 }
<> 132:9baf128c2fab 408
<> 132:9baf128c2fab 409 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 410 {
<> 132:9baf128c2fab 411 uint32_t result;
<> 132:9baf128c2fab 412
<> 132:9baf128c2fab 413 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 414 return(result);
<> 132:9baf128c2fab 415 }
<> 132:9baf128c2fab 416
<> 132:9baf128c2fab 417 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 418 {
<> 132:9baf128c2fab 419 uint32_t result;
<> 132:9baf128c2fab 420
<> 132:9baf128c2fab 421 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 422 return(result);
<> 132:9baf128c2fab 423 }
<> 132:9baf128c2fab 424
<> 132:9baf128c2fab 425 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 426 {
<> 132:9baf128c2fab 427 uint32_t result;
<> 132:9baf128c2fab 428
<> 132:9baf128c2fab 429 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 430 return(result);
<> 132:9baf128c2fab 431 }
<> 132:9baf128c2fab 432
<> 132:9baf128c2fab 433 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 434 {
<> 132:9baf128c2fab 435 uint32_t result;
<> 132:9baf128c2fab 436
<> 132:9baf128c2fab 437 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 438 return(result);
<> 132:9baf128c2fab 439 }
<> 132:9baf128c2fab 440
<> 132:9baf128c2fab 441 #define __SSAT16(ARG1,ARG2) \
<> 132:9baf128c2fab 442 ({ \
<> 132:9baf128c2fab 443 uint32_t __RES, __ARG1 = (ARG1); \
<> 132:9baf128c2fab 444 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 132:9baf128c2fab 445 __RES; \
<> 132:9baf128c2fab 446 })
<> 132:9baf128c2fab 447
<> 132:9baf128c2fab 448 #define __USAT16(ARG1,ARG2) \
<> 132:9baf128c2fab 449 ({ \
<> 132:9baf128c2fab 450 uint32_t __RES, __ARG1 = (ARG1); \
<> 132:9baf128c2fab 451 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
<> 132:9baf128c2fab 452 __RES; \
<> 132:9baf128c2fab 453 })
<> 132:9baf128c2fab 454
<> 132:9baf128c2fab 455 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
<> 132:9baf128c2fab 456 {
<> 132:9baf128c2fab 457 uint32_t result;
<> 132:9baf128c2fab 458
<> 132:9baf128c2fab 459 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 132:9baf128c2fab 460 return(result);
<> 132:9baf128c2fab 461 }
<> 132:9baf128c2fab 462
<> 132:9baf128c2fab 463 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 464 {
<> 132:9baf128c2fab 465 uint32_t result;
<> 132:9baf128c2fab 466
<> 132:9baf128c2fab 467 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 468 return(result);
<> 132:9baf128c2fab 469 }
<> 132:9baf128c2fab 470
<> 132:9baf128c2fab 471 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
<> 132:9baf128c2fab 472 {
<> 132:9baf128c2fab 473 uint32_t result;
<> 132:9baf128c2fab 474
<> 132:9baf128c2fab 475 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
<> 132:9baf128c2fab 476 return(result);
<> 132:9baf128c2fab 477 }
<> 132:9baf128c2fab 478
<> 132:9baf128c2fab 479 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 480 {
<> 132:9baf128c2fab 481 uint32_t result;
<> 132:9baf128c2fab 482
<> 132:9baf128c2fab 483 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 484 return(result);
<> 132:9baf128c2fab 485 }
<> 132:9baf128c2fab 486
<> 132:9baf128c2fab 487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 488 {
<> 132:9baf128c2fab 489 uint32_t result;
<> 132:9baf128c2fab 490
<> 132:9baf128c2fab 491 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 492 return(result);
<> 132:9baf128c2fab 493 }
<> 132:9baf128c2fab 494
<> 132:9baf128c2fab 495 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 496 {
<> 132:9baf128c2fab 497 uint32_t result;
<> 132:9baf128c2fab 498
<> 132:9baf128c2fab 499 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 500 return(result);
<> 132:9baf128c2fab 501 }
<> 132:9baf128c2fab 502
<> 132:9baf128c2fab 503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 504 {
<> 132:9baf128c2fab 505 uint32_t result;
<> 132:9baf128c2fab 506
<> 132:9baf128c2fab 507 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 508 return(result);
<> 132:9baf128c2fab 509 }
<> 132:9baf128c2fab 510
<> 132:9baf128c2fab 511 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 512 {
<> 132:9baf128c2fab 513 uint32_t result;
<> 132:9baf128c2fab 514
<> 132:9baf128c2fab 515 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 516 return(result);
<> 132:9baf128c2fab 517 }
<> 132:9baf128c2fab 518
<> 132:9baf128c2fab 519 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
<> 132:9baf128c2fab 520 {
<> 132:9baf128c2fab 521 union llreg_u{
<> 132:9baf128c2fab 522 uint32_t w32[2];
<> 132:9baf128c2fab 523 uint64_t w64;
<> 132:9baf128c2fab 524 } llr;
<> 132:9baf128c2fab 525 llr.w64 = acc;
<> 132:9baf128c2fab 526
<> 132:9baf128c2fab 527 #ifndef __ARMEB__ // Little endian
<> 132:9baf128c2fab 528 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
<> 132:9baf128c2fab 529 #else // Big endian
<> 132:9baf128c2fab 530 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
<> 132:9baf128c2fab 531 #endif
<> 132:9baf128c2fab 532
<> 132:9baf128c2fab 533 return(llr.w64);
<> 132:9baf128c2fab 534 }
<> 132:9baf128c2fab 535
<> 132:9baf128c2fab 536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
<> 132:9baf128c2fab 537 {
<> 132:9baf128c2fab 538 union llreg_u{
<> 132:9baf128c2fab 539 uint32_t w32[2];
<> 132:9baf128c2fab 540 uint64_t w64;
<> 132:9baf128c2fab 541 } llr;
<> 132:9baf128c2fab 542 llr.w64 = acc;
<> 132:9baf128c2fab 543
<> 132:9baf128c2fab 544 #ifndef __ARMEB__ // Little endian
<> 132:9baf128c2fab 545 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
<> 132:9baf128c2fab 546 #else // Big endian
<> 132:9baf128c2fab 547 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
<> 132:9baf128c2fab 548 #endif
<> 132:9baf128c2fab 549
<> 132:9baf128c2fab 550 return(llr.w64);
<> 132:9baf128c2fab 551 }
<> 132:9baf128c2fab 552
<> 132:9baf128c2fab 553 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 554 {
<> 132:9baf128c2fab 555 uint32_t result;
<> 132:9baf128c2fab 556
<> 132:9baf128c2fab 557 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 558 return(result);
<> 132:9baf128c2fab 559 }
<> 132:9baf128c2fab 560
<> 132:9baf128c2fab 561 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 562 {
<> 132:9baf128c2fab 563 uint32_t result;
<> 132:9baf128c2fab 564
<> 132:9baf128c2fab 565 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 566 return(result);
<> 132:9baf128c2fab 567 }
<> 132:9baf128c2fab 568
<> 132:9baf128c2fab 569 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 570 {
<> 132:9baf128c2fab 571 uint32_t result;
<> 132:9baf128c2fab 572
<> 132:9baf128c2fab 573 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 574 return(result);
<> 132:9baf128c2fab 575 }
<> 132:9baf128c2fab 576
<> 132:9baf128c2fab 577 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
<> 132:9baf128c2fab 578 {
<> 132:9baf128c2fab 579 uint32_t result;
<> 132:9baf128c2fab 580
<> 132:9baf128c2fab 581 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 582 return(result);
<> 132:9baf128c2fab 583 }
<> 132:9baf128c2fab 584
<> 132:9baf128c2fab 585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
<> 132:9baf128c2fab 586 {
<> 132:9baf128c2fab 587 union llreg_u{
<> 132:9baf128c2fab 588 uint32_t w32[2];
<> 132:9baf128c2fab 589 uint64_t w64;
<> 132:9baf128c2fab 590 } llr;
<> 132:9baf128c2fab 591 llr.w64 = acc;
<> 132:9baf128c2fab 592
<> 132:9baf128c2fab 593 #ifndef __ARMEB__ // Little endian
<> 132:9baf128c2fab 594 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
<> 132:9baf128c2fab 595 #else // Big endian
<> 132:9baf128c2fab 596 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
<> 132:9baf128c2fab 597 #endif
<> 132:9baf128c2fab 598
<> 132:9baf128c2fab 599 return(llr.w64);
<> 132:9baf128c2fab 600 }
<> 132:9baf128c2fab 601
<> 132:9baf128c2fab 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
<> 132:9baf128c2fab 603 {
<> 132:9baf128c2fab 604 union llreg_u{
<> 132:9baf128c2fab 605 uint32_t w32[2];
<> 132:9baf128c2fab 606 uint64_t w64;
<> 132:9baf128c2fab 607 } llr;
<> 132:9baf128c2fab 608 llr.w64 = acc;
<> 132:9baf128c2fab 609
<> 132:9baf128c2fab 610 #ifndef __ARMEB__ // Little endian
<> 132:9baf128c2fab 611 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
<> 132:9baf128c2fab 612 #else // Big endian
<> 132:9baf128c2fab 613 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
<> 132:9baf128c2fab 614 #endif
<> 132:9baf128c2fab 615
<> 132:9baf128c2fab 616 return(llr.w64);
<> 132:9baf128c2fab 617 }
<> 132:9baf128c2fab 618
<> 132:9baf128c2fab 619 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 620 {
<> 132:9baf128c2fab 621 uint32_t result;
<> 132:9baf128c2fab 622
<> 132:9baf128c2fab 623 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 624 return(result);
<> 132:9baf128c2fab 625 }
<> 132:9baf128c2fab 626
<> 132:9baf128c2fab 627 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 628 {
<> 132:9baf128c2fab 629 uint32_t result;
<> 132:9baf128c2fab 630
<> 132:9baf128c2fab 631 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 632 return(result);
<> 132:9baf128c2fab 633 }
<> 132:9baf128c2fab 634
<> 132:9baf128c2fab 635 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
<> 132:9baf128c2fab 636 {
<> 132:9baf128c2fab 637 uint32_t result;
<> 132:9baf128c2fab 638
<> 132:9baf128c2fab 639 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
<> 132:9baf128c2fab 640 return(result);
<> 132:9baf128c2fab 641 }
<> 132:9baf128c2fab 642
<> 132:9baf128c2fab 643 #define __PKHBT(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 644 ({ \
<> 132:9baf128c2fab 645 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 132:9baf128c2fab 646 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 132:9baf128c2fab 647 __RES; \
<> 132:9baf128c2fab 648 })
<> 132:9baf128c2fab 649
<> 132:9baf128c2fab 650 #define __PKHTB(ARG1,ARG2,ARG3) \
<> 132:9baf128c2fab 651 ({ \
<> 132:9baf128c2fab 652 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
<> 132:9baf128c2fab 653 if (ARG3 == 0) \
<> 132:9baf128c2fab 654 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
<> 132:9baf128c2fab 655 else \
<> 132:9baf128c2fab 656 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
<> 132:9baf128c2fab 657 __RES; \
<> 132:9baf128c2fab 658 })
<> 132:9baf128c2fab 659
<> 132:9baf128c2fab 660 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
<> 132:9baf128c2fab 661 {
<> 132:9baf128c2fab 662 int32_t result;
<> 132:9baf128c2fab 663
<> 132:9baf128c2fab 664 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
<> 132:9baf128c2fab 665 return(result);
<> 132:9baf128c2fab 666 }
<> 132:9baf128c2fab 667
<> 132:9baf128c2fab 668
<> 132:9baf128c2fab 669 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
<> 132:9baf128c2fab 670 /* IAR iccarm specific functions */
<> 132:9baf128c2fab 671 #include <cmsis_iar.h>
<> 132:9baf128c2fab 672
<> 132:9baf128c2fab 673
<> 132:9baf128c2fab 674 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
<> 132:9baf128c2fab 675 /* TI CCS specific functions */
<> 132:9baf128c2fab 676 #include <cmsis_ccs.h>
<> 132:9baf128c2fab 677
<> 132:9baf128c2fab 678
<> 132:9baf128c2fab 679 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
<> 132:9baf128c2fab 680 /* TASKING carm specific functions */
<> 132:9baf128c2fab 681 /* not yet supported */
<> 132:9baf128c2fab 682
<> 132:9baf128c2fab 683
<> 132:9baf128c2fab 684 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
<> 132:9baf128c2fab 685 /* Cosmic specific functions */
<> 132:9baf128c2fab 686 #include <cmsis_csm.h>
<> 132:9baf128c2fab 687
<> 132:9baf128c2fab 688 #endif
<> 132:9baf128c2fab 689
<> 132:9baf128c2fab 690 /*@} end of group CMSIS_SIMD_intrinsics */
<> 132:9baf128c2fab 691
<> 132:9baf128c2fab 692
<> 132:9baf128c2fab 693 #ifdef __cplusplus
<> 132:9baf128c2fab 694 }
<> 132:9baf128c2fab 695 #endif
<> 132:9baf128c2fab 696
<> 132:9baf128c2fab 697 #endif /* __CORE_CMSIMD_H */