00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef AVUTIL_ARM_INTMATH_H
00022 #define AVUTIL_ARM_INTMATH_H
00023
00024 #include <stdint.h>
00025
00026 #include "config.h"
00027 #include "libavutil/attributes.h"
00028
00029 #if HAVE_INLINE_ASM
00030
00031 #if HAVE_ARMV6
00032
00033 #define FASTDIV FASTDIV
00034 static av_always_inline av_const int FASTDIV(int a, int b)
00035 {
00036 int r, t;
00037 __asm__ volatile("cmp %3, #2 \n\t"
00038 "ldr %1, [%4, %3, lsl #2] \n\t"
00039 "lsrle %0, %2, #1 \n\t"
00040 "smmulgt %0, %1, %2 \n\t"
00041 : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
00042 return r;
00043 }
00044
00045 #define av_clip_uint8 av_clip_uint8_arm
00046 static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
00047 {
00048 unsigned x;
00049 __asm__ volatile ("usat %0, #8, %1" : "=r"(x) : "r"(a));
00050 return x;
00051 }
00052
00053 #define av_clip_int8 av_clip_int8_arm
00054 static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
00055 {
00056 unsigned x;
00057 __asm__ volatile ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
00058 return x;
00059 }
00060
00061 #define av_clip_uint16 av_clip_uint16_arm
00062 static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
00063 {
00064 unsigned x;
00065 __asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
00066 return x;
00067 }
00068
00069 #define av_clip_int16 av_clip_int16_arm
00070 static av_always_inline av_const int16_t av_clip_int16_arm(int a)
00071 {
00072 int x;
00073 __asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
00074 return x;
00075 }
00076
00077 #else
00078
00079 #define FASTDIV FASTDIV
00080 static av_always_inline av_const int FASTDIV(int a, int b)
00081 {
00082 int r, t;
00083 __asm__ volatile("umull %1, %0, %2, %3"
00084 : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
00085 return r;
00086 }
00087
00088 #endif
00089
00090 #define av_clipl_int32 av_clipl_int32_arm
00091 static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
00092 {
00093 int x, y;
00094 __asm__ volatile ("adds %1, %R2, %Q2, lsr #31 \n\t"
00095 "mvnne %1, #1<<31 \n\t"
00096 "eorne %0, %1, %R2, asr #31 \n\t"
00097 : "=r"(x), "=&r"(y) : "r"(a));
00098 return x;
00099 }
00100
00101 #endif
00102
00103 #endif