提交 d3034d31 编写于 作者: A Andy Polyakov

ec/asm/ecp_nistz256-x86_64.pl: /cmovb/cmovc/ as nasm doesn't recognize cmovb.

Reviewed-by: NRichard Levitte <levitte@openssl.org>
Reviewed-by: NMatt Caswell <matt@openssl.org>
上级 efba7787
...@@ -156,12 +156,12 @@ ecp_nistz256_mul_by_2: ...@@ -156,12 +156,12 @@ ecp_nistz256_mul_by_2:
sbb 8*3($a_ptr), $a3 sbb 8*3($a_ptr), $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -260,10 +260,10 @@ ecp_nistz256_mul_by_3: ...@@ -260,10 +260,10 @@ ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3 sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
cmovb $t2, $a2 cmovc $t2, $a2
cmovb $t3, $a3 cmovc $t3, $a3
xor $t4, $t4 xor $t4, $t4
add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3] add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
...@@ -282,12 +282,12 @@ ecp_nistz256_mul_by_3: ...@@ -282,12 +282,12 @@ ecp_nistz256_mul_by_3:
sbb .Lpoly+8*3(%rip), $a3 sbb .Lpoly+8*3(%rip), $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -328,12 +328,12 @@ ecp_nistz256_add: ...@@ -328,12 +328,12 @@ ecp_nistz256_add:
sbb 8*3($a_ptr), $a3 sbb 8*3($a_ptr), $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -1908,12 +1908,12 @@ __ecp_nistz256_add_toq: ...@@ -1908,12 +1908,12 @@ __ecp_nistz256_add_toq:
sbb $poly3, $a3 sbb $poly3, $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -1998,12 +1998,12 @@ __ecp_nistz256_mul_by_2q: ...@@ -1998,12 +1998,12 @@ __ecp_nistz256_mul_by_2q:
sbb $poly3, $a3 sbb $poly3, $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -2474,13 +2474,13 @@ $code.=<<___; ...@@ -2474,13 +2474,13 @@ $code.=<<___;
sbb $poly3, $acc3 sbb $poly3, $acc3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $acc0 cmovc $t0, $acc0
mov 8*0($a_ptr), $t0 mov 8*0($a_ptr), $t0
cmovb $t1, $acc1 cmovc $t1, $acc1
mov 8*1($a_ptr), $t1 mov 8*1($a_ptr), $t1
cmovb $t2, $acc2 cmovc $t2, $acc2
mov 8*2($a_ptr), $t2 mov 8*2($a_ptr), $t2
cmovb $t3, $acc3 cmovc $t3, $acc3
mov 8*3($a_ptr), $t3 mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr); call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
...@@ -2778,13 +2778,13 @@ $code.=<<___; ...@@ -2778,13 +2778,13 @@ $code.=<<___;
sbb $poly3, $acc3 sbb $poly3, $acc3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $acc0 cmovc $t0, $acc0
mov 8*0($a_ptr), $t0 mov 8*0($a_ptr), $t0
cmovb $t1, $acc1 cmovc $t1, $acc1
mov 8*1($a_ptr), $t1 mov 8*1($a_ptr), $t1
cmovb $t2, $acc2 cmovc $t2, $acc2
mov 8*2($a_ptr), $t2 mov 8*2($a_ptr), $t2
cmovb $t3, $acc3 cmovc $t3, $acc3
mov 8*3($a_ptr), $t3 mov 8*3($a_ptr), $t3
call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr); call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
...@@ -2938,12 +2938,12 @@ __ecp_nistz256_add_tox: ...@@ -2938,12 +2938,12 @@ __ecp_nistz256_add_tox:
sbb $poly3, $a3 sbb $poly3, $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
...@@ -3033,12 +3033,12 @@ __ecp_nistz256_mul_by_2x: ...@@ -3033,12 +3033,12 @@ __ecp_nistz256_mul_by_2x:
sbb $poly3, $a3 sbb $poly3, $a3
sbb \$0, $t4 sbb \$0, $t4
cmovb $t0, $a0 cmovc $t0, $a0
cmovb $t1, $a1 cmovc $t1, $a1
mov $a0, 8*0($r_ptr) mov $a0, 8*0($r_ptr)
cmovb $t2, $a2 cmovc $t2, $a2
mov $a1, 8*1($r_ptr) mov $a1, 8*1($r_ptr)
cmovb $t3, $a3 cmovc $t3, $a3
mov $a2, 8*2($r_ptr) mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr) mov $a3, 8*3($r_ptr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册