| Index: third_party/opus/src/silk/arm/macros_armv4.h
|
| diff --git a/third_party/opus/src/silk/arm/macros_armv4.h b/third_party/opus/src/silk/arm/macros_armv4.h
|
| index 3f30e97288fd77b3a2298df668a419777164c622..877eb18dd58129cc159152dd4b09d6759a84a277 100644
|
| --- a/third_party/opus/src/silk/arm/macros_armv4.h
|
| +++ b/third_party/opus/src/silk/arm/macros_armv4.h
|
| @@ -28,6 +28,11 @@ POSSIBILITY OF SUCH DAMAGE.
|
| #ifndef SILK_MACROS_ARMv4_H
|
| #define SILK_MACROS_ARMv4_H
|
|
|
| +/* This macro only avoids the undefined behaviour from a left shift of
|
| + a negative value. It should only be used in macros that can't include
|
| + SigProc_FIX.h. In other cases, use silk_LSHIFT32(). */
|
| +#define SAFE_SHL(a,b) ((opus_int32)((opus_uint32)(a) << (b)))
|
| +
|
| /* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */
|
| #undef silk_SMULWB
|
| static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b)
|
| @@ -38,7 +43,7 @@ static OPUS_INLINE opus_int32 silk_SMULWB_armv4(opus_int32 a, opus_int16 b)
|
| "#silk_SMULWB\n\t"
|
| "smull %0, %1, %2, %3\n\t"
|
| : "=&r"(rd_lo), "=&r"(rd_hi)
|
| - : "%r"(a), "r"(b<<16)
|
| + : "%r"(a), "r"(SAFE_SHL(b,16))
|
| );
|
| return rd_hi;
|
| }
|
| @@ -80,7 +85,7 @@ static OPUS_INLINE opus_int32 silk_SMULWW_armv4(opus_int32 a, opus_int32 b)
|
| : "=&r"(rd_lo), "=&r"(rd_hi)
|
| : "%r"(a), "r"(b)
|
| );
|
| - return (rd_hi<<16)+(rd_lo>>16);
|
| + return SAFE_SHL(rd_hi,16)+(rd_lo>>16);
|
| }
|
| #define silk_SMULWW(a, b) (silk_SMULWW_armv4(a, b))
|
|
|
| @@ -96,8 +101,10 @@ static OPUS_INLINE opus_int32 silk_SMLAWW_armv4(opus_int32 a, opus_int32 b,
|
| : "=&r"(rd_lo), "=&r"(rd_hi)
|
| : "%r"(b), "r"(c)
|
| );
|
| - return a+(rd_hi<<16)+(rd_lo>>16);
|
| + return a+SAFE_SHL(rd_hi,16)+(rd_lo>>16);
|
| }
|
| #define silk_SMLAWW(a, b, c) (silk_SMLAWW_armv4(a, b, c))
|
|
|
| +#undef SAFE_SHL
|
| +
|
| #endif /* SILK_MACROS_ARMv4_H */
|
|
|