提交 c51695db 编写于 作者: M Mans Rullgard

ARM: fix MUL64 inline asm for pre-armv6

Prior to ARMv6, the destination registers of the SMULL instruction
must be distinct from the first source register.  Marking the
output early-clobber ensures it is allocated unique registers.

This restriction is dropped in ARMv6 and later, so allowing overlap
between input and output registers there might give better code.
Signed-off-by: NMans Rullgard <mans@mansr.com>
上级 5ac4952a
......@@ -41,6 +41,8 @@ static inline av_const int MULL(int a, int b, unsigned shift)
}
#define MULH MULH
#define MUL64 MUL64
#if HAVE_ARMV6
static inline av_const int MULH(int a, int b)
{
......@@ -48,6 +50,13 @@ static inline av_const int MULH(int a, int b)
__asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
return r;
}
static inline av_const int64_t MUL64(int a, int b)
{
int64_t x;
__asm__ ("smull %Q0, %R0, %1, %2" : "=r"(x) : "r"(a), "r"(b));
return x;
}
#else
static inline av_const int MULH(int a, int b)
{
......@@ -55,15 +64,14 @@ static inline av_const int MULH(int a, int b)
__asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
return hi;
}
#endif
static inline av_const int64_t MUL64(int a, int b)
{
int64_t x;
__asm__ ("smull %Q0, %R0, %1, %2" : "=r"(x) : "r"(a), "r"(b));
__asm__ ("smull %Q0, %R0, %1, %2" : "=&r"(x) : "r"(a), "r"(b));
return x;
}
#define MUL64 MUL64
#endif
static inline av_const int64_t MAC64(int64_t d, int a, int b)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册