mpir/mpn/x86_64w/core2/mul_basecase.asm
gladman 659df23bbc 1. Improve g2y.py
2. Add divrem changes to Windows build
3. Add new x64 mpn_mul_basecase assembler to Windows
2009-05-19 19:53:15 +00:00

944 lines
19 KiB
NASM

; AMD64 mpn_mul_basecase
; Copyright 2008,2009 Jason Moxham
; This file is part of the MPIR Library.
; The MPIR Library is free software; you can redistribute it and/or modify
; it under the terms of the GNU Lesser General Public License as published
; by the Free Software Foundation; either version 2.1 of the License, or (at
; your option) any later version.
; The MPIR Library is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
; License for more details.
; You should have received a copy of the GNU Lesser General Public License
; along with the MPIR Library; see the file COPYING.LIB. If not, write
; to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
; Boston, MA 02110-1301, USA.
;
; (rcx,r8+[rsp+0x28])=(rdx,r8)*(r9,[rsp+0x28])
; Version 1.0.7
%include "..\yasm_mac.inc"
%macro addmul2lp 0
xalign 16
%%1:
mov rax, [rsi+rbx*8]
mul r8
add r9, rax
mov rax, [rsi+rbx*8+8]
adc r10, rdx
mov r11, 0
mul rcx
add [rdi+rbx*8], r12
adc r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+rbx*8+8]
adc r11, 0
mul r8
add [rdi+rbx*8+8], r9
adc r10, rax
adc r11, rdx
mov rax, [rsi+rbx*8+16]
mul rcx
add r10, rax
mov rax, [rsi+rbx*8+16]
adc r11, rdx
adc r12, 0
mul r8
add [rdi+rbx*8+16], r10
mov r9, 0
adc r11, rax
mov r10, 0
mov rax, [rsi+rbx*8+24]
adc r12, rdx
mov r15, r15
mul rcx
add r11, rax
mov rax, [rsi+rbx*8+24]
adc r12, rdx
adc r9, 0
mul r8
add [rdi+rbx*8+24], r11
adc r12, rax
adc r9, rdx
mov rax, [rsi+rbx*8+32]
mul rcx
add r12, rax
adc r9, rdx
adc r10, 0
add rbx, 4
jnc %%1
%endmacro
%macro addmul2pro0 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r9, rdx
mov r10, 0
mov r8, [r13+r15*8+8]
%endmacro
%macro addmul2epi0 0
mov rbx, r14
mov rax, [rsi+24]
mul r8
add [rdi+24], r12
adc r9, rax
adc r10, rdx
add r15, 2
mov rax, [rsi+r14*8]
mov [rdi+32], r9
lea rdi, [rdi+16]
mov [rdi+24], r10
%endmacro
%macro addmul2pro1 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r10, 0
mov r9, rdx
mov r8, [r13+r15*8+8]
%endmacro
%macro addmul2epi1 0
mov rax, [rsi+16]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+24]
mov r11, 0
adc r10, rdx
mul rcx
add [rdi], r12
adc r9, rax
adc r10, rdx
adc r11, 0
mov rax, [rsi+24]
mul r8
add [rdi+8], r9
adc r10, rax
adc r11, rdx
add r15, 2
mov rbx, r14
mov rax, [rsi+r14*8]
mov [rdi+24], r11
mov [rdi+16], r10
%endmacro
%macro addmul2pro2 0
mov rcx, [r13+r15*8]
mul rcx
mov r10, 0
mov r12, rax
mov r9, rdx
mov r8, [r13+r15*8+8]
%endmacro
%macro addmul2epi2 0
mov rax, [rsi+8]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+16]
adc r10, rdx
mov r11, 0
mul rcx
add [rdi-8], r12
adc r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+16]
adc r11, 0
mul r8
add [rdi], r9
adc r10, rax
adc r11, rdx
mov rax, [rsi+24]
mul rcx
add r10, rax
mov rax, [rsi+24]
adc r11, rdx
adc r12, 0
mul r8
add [rdi+8], r10
adc r11, rax
adc r12, rdx
mov rax, [rsi+r14*8]
mov [rdi+16], r11
mov [rdi+24], r12
add r15, 2
mov rbx, r14
%endmacro
%macro addmul2pro3 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r9, rdx
mov r8, [r13+r15*8+8]
mov r10, 0
%endmacro
%macro addmul2epi3 0
mov rax, [rsi]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+8]
adc r10, rdx
mov r11, 0
mul rcx
add [rdi-16], r12
adc r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+8]
adc r11, 0
mul r8
add [rdi-8], r9
adc r10, rax
adc r11, rdx
mov rax, [rsi+16]
mul rcx
add r10, rax
mov rax, [rsi+16]
adc r11, rdx
adc r12, 0
mul r8
add [rdi], r10
mov r9, 0
adc r11, rax
mov r10, 0
mov rax, [rsi+24]
adc r12, rdx
mov r15, r15
mul rcx
add r11, rax
mov rax, [rsi+24]
adc r12, rdx
adc r9, 0
mul r8
add [rdi+8], r11
adc r12, rax
adc r9, rdx
mov rax, [rsi+r14*8]
mov [rdi+16], r12
mov [rdi+24], r9
add r15, 2
mov rbx, r14
%endmacro
%macro mul2lp 0
xalign 16
%%1:
mov rax, [rsi+rbx*8]
mul r8
add r9, rax
mov rax, [rsi+rbx*8+8]
adc r10, rdx
mov r11, 0
mul rcx
mov [rdi+rbx*8], r12
add r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+rbx*8+8]
adc r11, 0
mul r8
mov [rdi+rbx*8+8], r9
add r10, rax
adc r11, rdx
mov rax, [rsi+rbx*8+16]
mul rcx
add r10, rax
mov rax, [rsi+rbx*8+16]
adc r11, rdx
adc r12, 0
mul r8
mov [rdi+rbx*8+16], r10
mov r9, 0
add r11, rax
mov r10, 0
mov rax, [rsi+rbx*8+24]
adc r12, rdx
mov r15, r15
mul rcx
add r11, rax
mov rax, [rsi+rbx*8+24]
adc r12, rdx
adc r9, 0
mul r8
mov [rdi+rbx*8+24], r11
add r12, rax
adc r9, rdx
mov rax, [rsi+rbx*8+32]
mul rcx
add r12, rax
adc r9, rdx
adc r10, 0
add rbx, 4
jnc %%1
%endmacro
%macro mul2pro0 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r9, rdx
mov r10, 0
mov r8, [r13+r15*8+8]
%endmacro
%macro mul2epi0 0
mov rbx, r14
mov rax, [rsi+24]
mul r8
mov [rdi+24], r12
add r9, rax
adc r10, rdx
add r15, 2
mov rax, [rsi+r14*8]
mov [rdi+32], r9
lea rdi, [rdi+16]
mov [rdi+24], r10
%endmacro
%macro mul2pro1 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r10, 0
mov r9, rdx
mov r8, [r13+r15*8+8]
%endmacro
%macro mul2epi1 0
mov rax, [rsi+16]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+24]
mov r11, 0
adc r10, rdx
mul rcx
mov [rdi], r12
add r9, rax
adc r10, rdx
adc r11, 0
mov rax, [rsi+24]
mul r8
mov [rdi+8], r9
add r10, rax
adc r11, rdx
add r15, 2
mov rbx, r14
mov rax, [rsi+r14*8]
mov [rdi+24], r11
mov [rdi+16], r10
%endmacro
%macro mul2pro2 0
mov rcx, [r13+r15*8]
mul rcx
mov r10, 0
mov r12, rax
mov r9, rdx
mov r8, [r13+r15*8+8]
%endmacro
%macro mul2epi2 0
mov rax, [rsi+8]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+16]
adc r10, rdx
mov r11, 0
mul rcx
mov [rdi-8], r12
add r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+16]
adc r11, 0
mul r8
mov [rdi], r9
add r10, rax
adc r11, rdx
mov rax, [rsi+24]
mul rcx
add r10, rax
mov rax, [rsi+24]
adc r11, rdx
adc r12, 0
mul r8
mov [rdi+8], r10
add r11, rax
adc r12, rdx
mov rax, [rsi+r14*8]
mov [rdi+16], r11
mov [rdi+24], r12
add r15, 2
mov rbx, r14
%endmacro
%macro mul2pro3 0
mov rcx, [r13+r15*8]
mul rcx
mov r12, rax
mov r9, rdx
mov r8, [r13+r15*8+8]
mov r10, 0
%endmacro
%macro mul2epi3 0
mov rax, [rsi]
lea rdi, [rdi+16]
mul r8
add r9, rax
mov rax, [rsi+8]
adc r10, rdx
mov r11, 0
mul rcx
mov [rdi-16], r12
add r9, rax
mov r12, 0
adc r10, rdx
mov rax, [rsi+8]
adc r11, 0
mul r8
mov [rdi-8], r9
add r10, rax
adc r11, rdx
mov rax, [rsi+16]
mul rcx
add r10, rax
mov rax, [rsi+16]
adc r11, rdx
adc r12, 0
mul r8
mov [rdi], r10
mov r9, 0
add r11, rax
mov r10, 0
mov rax, [rsi+24]
adc r12, rdx
mov r15, r15
mul rcx
add r11, rax
mov rax, [rsi+24]
adc r12, rdx
adc r9, 0
mul r8
mov [rdi+8], r11
add r12, rax
adc r9, rdx
mov rax, [rsi+r14*8]
mov [rdi+16], r12
mov [rdi+24], r9
add r15, 2
mov rbx, r14
%endmacro
%macro mul1lp 0
xalign 16
%%1:
mov r10, 0
mul r8
mov [rdi+rbx*8-8], r12
add r9, rax
db 0x26
adc r10, rdx
mov rax, [rsi+rbx*8+8]
mul r8
mov [rdi+rbx*8], r9
add r10, rax
mov r11d, 0
adc r11, rdx
mov rax, [rsi+rbx*8+16]
mov r12, 0
mov r9, 0
mul r8
mov [rdi+rbx*8+8], r10
db 0x26
add r11, rax
db 0x26
adc r12, rdx
mov rax, [rsi+rbx*8+24]
mul r8
mov [rdi+rbx*8+16], r11
db 0x26
add r12, rax
db 0x26
adc r9, rdx
add rbx, 4
mov rax, [rsi+rbx*8]
jnc %%1
%endmacro
; rbx is 0
%macro mulnext0 0
mov rax, [rsi+8]
mul r8
mov [rdi], r9
add r10, rax
mov r11d, 0
adc r11, rdx
mov rax, [rsi+16]
mov r12d, 0
mul r8
mov [rdi+8], r10
add r11, rax
adc r12, rdx
mov rax, [rsi+24]
mul r8
mov [rdi+16], r11
add r12, rax
adc rdx, 0
mov [rdi+24], r12
mov rax, [rsi+r14*8]
mov [rdi+32], rdx
inc r15
lea rdi, [rdi+8]
mov rbx, r14
%endmacro
; rbx is 1
%macro mulnext1 0
mov rax, [rsi+16]
mul r8
mov [rdi+8], r9
add r10, rax
mov r12d, 0
adc r12, rdx
mov rax, [rsi+24]
mul r8
mov [rdi+16], r10
add r12, rax
adc rdx, 0
mov [rdi+24], r12
mov [rdi+32], rdx
inc r15
lea rdi, [rdi+8]
mov rbx, r14
mov rax, [rsi+r14*8]
%endmacro
; rbx is 2
%macro mulnext2 0
mov rax, [rsi+24]
mul r8
mov [rdi+16], r9
add r10, rax
mov r11d, 0
adc r11, rdx
mov [rdi+24], r10
mov [rdi+32], r11
inc r15
lea rdi, [rdi+8]
mov rax, [rsi+r14*8]
mov rbx, r14
%endmacro
; rbx is 3
%macro mulnext3 0
mov [rdi+24], r9
mov [rdi+32], r10
inc r15
lea rdi, [rdi+8]
mov rax, [rsi+r14*8]
mov rbx, r14
%endmacro
%macro mpn_addmul_2_int 1
jz %%2
xalign 16
%%1:
addmul2pro%1
addmul2lp
addmul2epi%1
jnz %%1
%%2:
%endmacro
%macro oldmulnext0 0
mov rax, [rsi+r11*8+16]
mul r13
mov [rdi+r11*8+8], r9
add r10, rax
mov ebx, 0
adc rbx, rdx
mov rax, [rsi+r11*8+24]
mov r12d, 0
mul r13
mov [rdi+r11*8+16], r10
add rbx, rax
adc r12, rdx
mov rax, [rsi+r11*8+32]
mul r13
mov [rdi+r11*8+24], rbx
add r12, rax
adc rdx, 0
mov [rdi+r11*8+32], r12
mov rax, [rsi+r14*8]
mov [rdi+r11*8+40], rdx
inc r8
mov r11, r14
%endmacro
%macro oldmulnext1 0
mov rax, [rsi+r11*8+16]
mul r13
mov [rdi+r11*8+8], r9
add r10, rax
mov r12d, 0
adc r12, rdx
mov rax, [rsi+r11*8+24]
mul r13
mov [rdi+r11*8+16], r10
add r12, rax
adc rdx, 0
mov [rdi+r11*8+24], r12
mov [rdi+r11*8+32], rdx
inc r8
lea rdi, [rdi+8]
mov r11, r14
mov rax, [rsi+r14*8]
%endmacro
%macro oldmulnext2 0
mov rax, [rsi+r11*8+16]
mul r13
mov [rdi+r11*8+8], r9
add r10, rax
mov ebx, 0
adc rbx, rdx
mov [rdi+r11*8+16], r10
mov [rdi+r11*8+24], rbx
inc r8
mov rax, [rsi+r14*8]
mov r11, r14
%endmacro
%macro oldmulnext3 0
mov [rdi+r11*8+8], r9
mov [rdi+r11*8+16], r10
inc r8
mov rax, [rsi+r14*8]
mov r11, r14
%endmacro
%macro oldaddmulpro0 0
mov r13, [rcx+r8*8]
db 0x26
mul r13
db 0x26
mov r12, rax
mov rax, [rsi+r14*8+8]
db 0x26
mov r9, rdx
lea rdi, [rdi+8]
%endmacro
%macro oldaddmulnext0 0
mov r10d, 0
mul r13
add [rdi], r12
adc r9, rax
adc r10, rdx
mov rax, [rsi+16]
mul r13
add [rdi+8], r9
adc r10, rax
mov ebx, 0
adc rbx, rdx
mov rax, [rsi+24]
mov r12d, 0
mov r11, r14
mul r13
add [rdi+16], r10
adc rbx, rax
adc r12, rdx
mov rax, [rsi+32]
mul r13
add [rdi+24], rbx
adc r12, rax
adc rdx, 0
add [rdi+32], r12
mov rax, [rsi+r14*8]
adc rdx, 0
inc r8
mov [rdi+40], rdx
%endmacro
%macro oldaddmulpro1 0
mov r13, [rcx+r8*8]
mul r13
mov r12, rax
mov rax, [rsi+r14*8+8]
mov r9, rdx
%endmacro
%macro oldaddmulnext1 0
mov r10d, 0
mul r13
add [rdi+8], r12
adc r9, rax
adc r10, rdx
mov rax, [rsi+24]
mul r13
lea rdi, [rdi+8]
add [rdi+8], r9
adc r10, rax
mov r12d, 0
mov rax, [rsi+32]
adc r12, rdx
mov r11, r14
mul r13
add [rdi+16], r10
adc r12, rax
adc rdx, 0
add [rdi+24], r12
adc rdx, 0
mov [rdi+32], rdx
inc r8
mov rax, [rsi+r14*8]
%endmacro
%macro oldaddmulpro2 0
mov r13, [rcx+r8*8]
lea rdi, [rdi+8]
mul r13
mov r12, rax
mov rax, [rsi+r14*8+8]
mov r9, rdx
%endmacro
%macro oldaddmulnext2 0
mov r10d, 0
mul r13
add [rdi+r11*8], r12
adc r9, rax
adc r10, rdx
mov rax, [rsi+r11*8+16]
mul r13
mov ebx, 0
add [rdi+r11*8+8], r9
adc r10, rax
adc rbx, rdx
mov rax, [rsi+r14*8]
add [rdi+r11*8+16], r10
adc rbx, 0
mov [rdi+r11*8+24], rbx
inc r8
mov r11, r14
%endmacro
%macro oldaddmulpro3 0
mov r13, [rcx+r8*8]
db 0x26
mul r13
db 0x26
mov r12, rax
db 0x26
lea rdi, [rdi+8]
db 0x26
mov r9, rdx
mov rax, [rsi+r14*8+8]
%endmacro
%macro oldaddmulnext3 0
mov r11, r14
mul r13
add [rdi+24], r12
adc r9, rax
adc rdx, 0
add [rdi+32], r9
mov rax, [rsi+r14*8]
adc rdx, 0
inc r8
mov [rdi+40], rdx
%endmacro
%macro oldmpn_muladdmul_1_int 1
oldmulnext%1
jz %%2
xalign 16
%%1:
oldaddmulpro%1
oldaddmulnext%1
jnz %%1
%%2:
%endmacro
CPU Core2
BITS 64
%define reg_save_list rbx, rsi, rdi, r12, r13, r14
LEAF_PROC mpn_mul_basecase
; the current mul does not handle case one
cmp r8d, 4
jg L_fiveormore
cmp r8d, 1
je L_one
WIN64_GCC_PROC mpn_mbc1, 5, frame
movsxd rdx, edx
movsxd r8, r8d
mov r14, 5
sub r14, rdx
lea rdi, [rdi+rdx*8-40]
lea rcx, [rcx+r8*8]
neg r8
lea rsi, [rsi+rdx*8-40]
mov rax, [rsi+r14*8]
mov r13, [rcx+r8*8]
mov r11, r14
mul r13
mov r12, rax
mov rax, [rsi+r14*8+8]
mov r9, rdx
mov r10d, 0
mul r13
mov [rdi+r11*8], r12
add r9, rax
adc r10, rdx
cmp r11, 2
ja L_oldcase3
jz L_oldcase2
jp L_oldcase1
L_oldcase0:
oldmpn_muladdmul_1_int 0
jmp L_xit1
xalign 16
L_oldcase1:
oldmpn_muladdmul_1_int 1
jmp L_xit1
xalign 16
L_oldcase2:
oldmpn_muladdmul_1_int 2
jmp L_xit1
xalign 16
L_oldcase3:
oldmpn_muladdmul_1_int 3
xalign 16
L_xit1:
WIN64_GCC_END frame
; rdx >= 5 as we dont have an inner jump
; (rdi,rdx+r8)=(rsi,rdx)*(rcx,r8)
%undef reg_save_list
%define reg_save_list rbx, rsi, rdi, r12, r13, r14, r15
xalign 16
L_fiveormore:
WIN64_GCC_PROC mpn_mbc2, 5, frame
movsxd rdx, edx
movsxd r8, r8d
mov r14, 4
sub r14, rdx
lea rdi, [rdi+rdx*8-32]
lea rsi, [rsi+rdx*8-32]
mov r13, rcx
mov r15, r8
lea r13, [r13+r15*8]
neg r15
mov rbx, r14
mov rax, [rsi+r14*8]
bt r15, 0
jnc L_even
L_odd:
inc rbx
mov r8, [r13+r15*8]
mul r8
mov r12, rax
mov rax, [rsi+r14*8+8]
mov r9, rdx
cmp rbx, 0
jge L_mulskiploop
mul1lp
L_mulskiploop:
mov r10d, 0
mul r8
mov [rdi+rbx*8-8], r12
add r9, rax
adc r10, rdx
cmp rbx, 2
ja L_mul1case3
jz L_mul1case2
jp L_mul1case1
L_mul1case0:
mulnext0
jmp L_case0
L_mul1case1:
mulnext1
jmp L_case3
L_mul1case2:
mulnext2
jmp L_case2
L_mul1case3:
mulnext3
jmp L_case1
L_even:
; as all the mul2pro? are the same
mul2pro0
mul2lp
cmp rbx, 2
ja L_mul2case0
jz L_mul2case1
jp L_mul2case2
L_mul2case3:
mul2epi3
L_case3:
mpn_addmul_2_int 3
jmp L_xit2
L_mul2case2:
mul2epi2
L_case2:
mpn_addmul_2_int 2
jmp L_xit2
L_mul2case1:
mul2epi1
L_case1:
mpn_addmul_2_int 1
jmp L_xit2
L_mul2case0:
mul2epi0
L_case0:
mpn_addmul_2_int 0
xalign 16
L_xit2:
WIN64_GCC_END frame
xalign 16
L_one:
mov rax, [rdx]
mul qword [r9]
mov [rcx], rax
mov [rcx+8], rdx
ret
end