mbed library sources

Fork of mbed-src by mbed official

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers core_cm4_simd.h Source File

core_cm4_simd.h

Go to the documentation of this file.
00001 /**************************************************************************//**
00002  * @file     core_cm4_simd.h
00003  * @brief    CMSIS Cortex-M4 SIMD Header File
00004  * @version  V3.20
00005  * @date     25. February 2013
00006  *
00007  * @note
00008  *
00009  ******************************************************************************/
00010 /* Copyright (c) 2009 - 2013 ARM LIMITED
00011 
00012    All rights reserved.
00013    Redistribution and use in source and binary forms, with or without
00014    modification, are permitted provided that the following conditions are met:
00015    - Redistributions of source code must retain the above copyright
00016      notice, this list of conditions and the following disclaimer.
00017    - Redistributions in binary form must reproduce the above copyright
00018      notice, this list of conditions and the following disclaimer in the
00019      documentation and/or other materials provided with the distribution.
00020    - Neither the name of ARM nor the names of its contributors may be used
00021      to endorse or promote products derived from this software without
00022      specific prior written permission.
00023    *
00024    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00025    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00026    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00027    ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
00028    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00029    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00030    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00031    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00032    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00033    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00034    POSSIBILITY OF SUCH DAMAGE.
00035    ---------------------------------------------------------------------------*/
00036 
00037 
00038 #ifdef __cplusplus
00039  extern "C" {
00040 #endif
00041 
00042 #ifndef __CORE_CM4_SIMD_H
00043 #define __CORE_CM4_SIMD_H
00044 
00045 
00046 /*******************************************************************************
00047  *                Hardware Abstraction Layer
00048  ******************************************************************************/
00049 
00050 
00051 /* ###################  Compiler specific Intrinsics  ########################### */
00052 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
00053   Access to dedicated SIMD instructions
00054   @{
00055 */
00056 
00057 #if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
00058 /* ARM armcc specific functions */
00059 
00060 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
00061 #define __SADD8                           __sadd8
00062 #define __QADD8                           __qadd8
00063 #define __SHADD8                          __shadd8
00064 #define __UADD8                           __uadd8
00065 #define __UQADD8                          __uqadd8
00066 #define __UHADD8                          __uhadd8
00067 #define __SSUB8                           __ssub8
00068 #define __QSUB8                           __qsub8
00069 #define __SHSUB8                          __shsub8
00070 #define __USUB8                           __usub8
00071 #define __UQSUB8                          __uqsub8
00072 #define __UHSUB8                          __uhsub8
00073 #define __SADD16                          __sadd16
00074 #define __QADD16                          __qadd16
00075 #define __SHADD16                         __shadd16
00076 #define __UADD16                          __uadd16
00077 #define __UQADD16                         __uqadd16
00078 #define __UHADD16                         __uhadd16
00079 #define __SSUB16                          __ssub16
00080 #define __QSUB16                          __qsub16
00081 #define __SHSUB16                         __shsub16
00082 #define __USUB16                          __usub16
00083 #define __UQSUB16                         __uqsub16
00084 #define __UHSUB16                         __uhsub16
00085 #define __SASX                            __sasx
00086 #define __QASX                            __qasx
00087 #define __SHASX                           __shasx
00088 #define __UASX                            __uasx
00089 #define __UQASX                           __uqasx
00090 #define __UHASX                           __uhasx
00091 #define __SSAX                            __ssax
00092 #define __QSAX                            __qsax
00093 #define __SHSAX                           __shsax
00094 #define __USAX                            __usax
00095 #define __UQSAX                           __uqsax
00096 #define __UHSAX                           __uhsax
00097 #define __USAD8                           __usad8
00098 #define __USADA8                          __usada8
00099 #define __SSAT16                          __ssat16
00100 #define __USAT16                          __usat16
00101 #define __UXTB16                          __uxtb16
00102 #define __UXTAB16                         __uxtab16
00103 #define __SXTB16                          __sxtb16
00104 #define __SXTAB16                         __sxtab16
00105 #define __SMUAD                           __smuad
00106 #define __SMUADX                          __smuadx
00107 #define __SMLAD                           __smlad
00108 #define __SMLADX                          __smladx
00109 #define __SMLALD                          __smlald
00110 #define __SMLALDX                         __smlaldx
00111 #define __SMUSD                           __smusd
00112 #define __SMUSDX                          __smusdx
00113 #define __SMLSD                           __smlsd
00114 #define __SMLSDX                          __smlsdx
00115 #define __SMLSLD                          __smlsld
00116 #define __SMLSLDX                         __smlsldx
00117 #define __SEL                             __sel
00118 #define __QADD                            __qadd
00119 #define __QSUB                            __qsub
00120 
00121 #define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
00122                                            ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
00123 
00124 #define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
00125                                            ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
00126 
00127 #define __SMMLA(ARG1,ARG2,ARG3)          ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
00128                                                       ((int64_t)(ARG3) << 32)      ) >> 32))
00129 
00130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
00131 
00132 
00133 
00134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
00135 /* IAR iccarm specific functions */
00136 
00137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
00138 #include <cmsis_iar.h>
00139 
00140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
00141 
00142 
00143 
00144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
00145 /* TI CCS specific functions */
00146 
00147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
00148 #include <cmsis_ccs.h>
00149 
00150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
00151 
00152 
00153 
00154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
00155 /* GNU gcc specific functions */
00156 
00157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
00158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
00159 {
00160   uint32_t result;
00161 
00162   __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00163   return(result);
00164 }
00165 
00166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
00167 {
00168   uint32_t result;
00169 
00170   __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00171   return(result);
00172 }
00173 
00174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
00175 {
00176   uint32_t result;
00177 
00178   __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00179   return(result);
00180 }
00181 
00182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
00183 {
00184   uint32_t result;
00185 
00186   __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00187   return(result);
00188 }
00189 
00190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
00191 {
00192   uint32_t result;
00193 
00194   __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00195   return(result);
00196 }
00197 
00198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
00199 {
00200   uint32_t result;
00201 
00202   __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00203   return(result);
00204 }
00205 
00206 
00207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
00208 {
00209   uint32_t result;
00210 
00211   __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00212   return(result);
00213 }
00214 
00215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
00216 {
00217   uint32_t result;
00218 
00219   __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00220   return(result);
00221 }
00222 
00223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
00224 {
00225   uint32_t result;
00226 
00227   __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00228   return(result);
00229 }
00230 
00231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
00232 {
00233   uint32_t result;
00234 
00235   __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00236   return(result);
00237 }
00238 
00239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
00240 {
00241   uint32_t result;
00242 
00243   __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00244   return(result);
00245 }
00246 
00247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
00248 {
00249   uint32_t result;
00250 
00251   __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00252   return(result);
00253 }
00254 
00255 
00256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
00257 {
00258   uint32_t result;
00259 
00260   __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00261   return(result);
00262 }
00263 
00264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
00265 {
00266   uint32_t result;
00267 
00268   __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00269   return(result);
00270 }
00271 
00272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
00273 {
00274   uint32_t result;
00275 
00276   __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00277   return(result);
00278 }
00279 
00280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
00281 {
00282   uint32_t result;
00283 
00284   __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00285   return(result);
00286 }
00287 
00288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
00289 {
00290   uint32_t result;
00291 
00292   __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00293   return(result);
00294 }
00295 
00296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
00297 {
00298   uint32_t result;
00299 
00300   __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00301   return(result);
00302 }
00303 
00304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
00305 {
00306   uint32_t result;
00307 
00308   __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00309   return(result);
00310 }
00311 
00312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
00313 {
00314   uint32_t result;
00315 
00316   __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00317   return(result);
00318 }
00319 
00320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
00321 {
00322   uint32_t result;
00323 
00324   __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00325   return(result);
00326 }
00327 
00328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
00329 {
00330   uint32_t result;
00331 
00332   __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00333   return(result);
00334 }
00335 
00336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
00337 {
00338   uint32_t result;
00339 
00340   __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00341   return(result);
00342 }
00343 
00344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
00345 {
00346   uint32_t result;
00347 
00348   __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00349   return(result);
00350 }
00351 
00352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
00353 {
00354   uint32_t result;
00355 
00356   __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00357   return(result);
00358 }
00359 
00360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
00361 {
00362   uint32_t result;
00363 
00364   __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00365   return(result);
00366 }
00367 
00368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
00369 {
00370   uint32_t result;
00371 
00372   __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00373   return(result);
00374 }
00375 
00376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
00377 {
00378   uint32_t result;
00379 
00380   __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00381   return(result);
00382 }
00383 
00384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
00385 {
00386   uint32_t result;
00387 
00388   __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00389   return(result);
00390 }
00391 
00392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
00393 {
00394   uint32_t result;
00395 
00396   __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00397   return(result);
00398 }
00399 
00400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
00401 {
00402   uint32_t result;
00403 
00404   __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00405   return(result);
00406 }
00407 
00408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
00409 {
00410   uint32_t result;
00411 
00412   __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00413   return(result);
00414 }
00415 
00416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
00417 {
00418   uint32_t result;
00419 
00420   __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00421   return(result);
00422 }
00423 
00424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
00425 {
00426   uint32_t result;
00427 
00428   __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00429   return(result);
00430 }
00431 
00432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
00433 {
00434   uint32_t result;
00435 
00436   __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00437   return(result);
00438 }
00439 
00440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
00441 {
00442   uint32_t result;
00443 
00444   __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00445   return(result);
00446 }
00447 
00448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
00449 {
00450   uint32_t result;
00451 
00452   __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00453   return(result);
00454 }
00455 
00456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
00457 {
00458   uint32_t result;
00459 
00460   __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00461   return(result);
00462 }
00463 
00464 #define __SSAT16(ARG1,ARG2) \
00465 ({                          \
00466   uint32_t __RES, __ARG1 = (ARG1); \
00467   __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00468   __RES; \
00469  })
00470 
00471 #define __USAT16(ARG1,ARG2) \
00472 ({                          \
00473   uint32_t __RES, __ARG1 = (ARG1); \
00474   __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00475   __RES; \
00476  })
00477 
00478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
00479 {
00480   uint32_t result;
00481 
00482   __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
00483   return(result);
00484 }
00485 
00486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
00487 {
00488   uint32_t result;
00489 
00490   __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00491   return(result);
00492 }
00493 
00494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
00495 {
00496   uint32_t result;
00497 
00498   __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
00499   return(result);
00500 }
00501 
00502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
00503 {
00504   uint32_t result;
00505 
00506   __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00507   return(result);
00508 }
00509 
00510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
00511 {
00512   uint32_t result;
00513 
00514   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00515   return(result);
00516 }
00517 
00518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
00519 {
00520   uint32_t result;
00521 
00522   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00523   return(result);
00524 }
00525 
00526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
00527 {
00528   uint32_t result;
00529 
00530   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00531   return(result);
00532 }
00533 
00534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
00535 {
00536   uint32_t result;
00537 
00538   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00539   return(result);
00540 }
00541 
00542 #define __SMLALD(ARG1,ARG2,ARG3) \
00543 ({ \
00544   uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
00545   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
00546   (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
00547  })
00548 
00549 #define __SMLALDX(ARG1,ARG2,ARG3) \
00550 ({ \
00551   uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
00552   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
00553   (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
00554  })
00555 
00556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
00557 {
00558   uint32_t result;
00559 
00560   __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00561   return(result);
00562 }
00563 
00564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
00565 {
00566   uint32_t result;
00567 
00568   __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00569   return(result);
00570 }
00571 
00572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
00573 {
00574   uint32_t result;
00575 
00576   __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00577   return(result);
00578 }
00579 
00580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
00581 {
00582   uint32_t result;
00583 
00584   __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00585   return(result);
00586 }
00587 
00588 #define __SMLSLD(ARG1,ARG2,ARG3) \
00589 ({ \
00590   uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
00591   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
00592   (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
00593  })
00594 
00595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
00596 ({ \
00597   uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
00598   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
00599   (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
00600  })
00601 
00602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
00603 {
00604   uint32_t result;
00605 
00606   __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00607   return(result);
00608 }
00609 
00610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
00611 {
00612   uint32_t result;
00613 
00614   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00615   return(result);
00616 }
00617 
00618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
00619 {
00620   uint32_t result;
00621 
00622   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00623   return(result);
00624 }
00625 
00626 #define __PKHBT(ARG1,ARG2,ARG3) \
00627 ({                          \
00628   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
00629   __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
00630   __RES; \
00631  })
00632 
00633 #define __PKHTB(ARG1,ARG2,ARG3) \
00634 ({                          \
00635   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
00636   if (ARG3 == 0) \
00637     __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
00638   else \
00639     __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
00640   __RES; \
00641  })
00642 
00643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
00644 {
00645  int32_t result;
00646 
00647  __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
00648  return(result);
00649 }
00650 
00651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
00652 
00653 
00654 
00655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
00656 /* TASKING carm specific functions */
00657 
00658 
00659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
00660 /* not yet supported */
00661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
00662 
00663 
00664 #endif
00665 
00666 /*@} end of group CMSIS_SIMD_intrinsics */
00667 
00668 
00669 #endif /* __CORE_CM4_SIMD_H */
00670 
00671 #ifdef __cplusplus
00672 }
00673 #endif