commit 39ce0d5862cd5fab8c93d60b195610b2ac77a094 from: rsc date: Wed Mar 22 16:30:50 2006 UTC Avoid storing precious data below the stack pointer. commit - aff51ee51f0bab35fca95569c639042de8acf777 commit + 39ce0d5862cd5fab8c93d60b195610b2ac77a094 blob - f02e16160790d0716eb9d505b01b9b2cf820b170 blob + 143bba9b66e10cd5c5cb53227900c595bc9921af --- src/libmp/386/mpdigdiv.s +++ src/libmp/386/mpdigdiv.s @@ -1,19 +1,19 @@ .text - .p2align 2,0x90 .globl mpdigdiv .type mpdigdiv, @function mpdigdiv: /* Prelude */ - pushl %ebp - movl %ebx, -4(%esp) /* save on stack */ + pushl %ebp /* save on stack */ + pushl %ebx + + leal 12(%esp), %ebp /* %ebp = FP for now */ + movl 0(%ebp), %ebx /* dividend */ + movl 0(%ebx), %eax + movl 4(%ebx), %edx + movl 4(%ebp), %ebx /* divisor */ + movl 8(%ebp), %ebp /* quotient */ - movl 8(%esp), %ebx - movl (%ebx), %eax - movl 4(%ebx), %edx - - movl 12(%esp), %ebx - movl 16(%esp), %ebp xorl %ecx, %ecx cmpl %ebx, %edx /* dividend >= 2^32 * divisor */ jae divovfl @@ -21,19 +21,14 @@ mpdigdiv: je divovfl divl %ebx /* AX = DX:AX/BX */ movl %eax, (%ebp) - jmp done +done: + /* Postlude */ + popl %ebx + popl %ebp + ret /* return all 1's */ divovfl: notl %ecx movl %ecx, (%ebp) - -done: - /* Postlude */ - movl -4(%esp), %ebx /* restore from stack */ - movl %esp, %ebp - leave - ret - -.endmpdigdiv: - .size mpdigdiv,.endmpdigdiv-mpdigdiv + jmp done blob - 67519ad4f78430fbeaae967c80db79bffbca6d2c blob + 04a2bedf9d2e246b68a3d06a5d0324261435253c --- src/libmp/386/mpvecadd.s +++ src/libmp/386/mpvecadd.s @@ -10,17 +10,19 @@ .type mpvecadd, @function mpvecadd: /* Prelude */ - pushl %ebp - movl %ebx, -4(%esp) /* save on stack */ - movl %esi, -8(%esp) - movl %edi, -12(%esp) + pushl %ebp /* save on stack */ + pushl %ebx + pushl %esi + pushl %edi - movl 12(%esp), %edx /* alen */ - movl 20(%esp), %ecx /* blen */ - movl 8(%esp), %esi /* a */ - movl 16(%esp), %ebx /* b */ + leal 20(%esp), %ebp /* %ebp = FP for now */ + + movl 4(%ebp), %edx /* alen */ + movl 12(%ebp), %ecx /* blen */ + movl 0(%ebp), %esi /* a */ + movl 8(%ebp), %ebx /* b */ subl %ecx, %edx - movl 24(%esp), %edi /* sum */ + movl 16(%ebp), %edi /* sum */ xorl %ebp, %ebp /* this also sets carry to 0 */ /* skip addition if b is zero */ @@ -62,9 +64,8 @@ _addloop2: done: /* Postlude */ - movl -4(%esp), %ebx /* restore from stack */ - movl -8(%esp), %esi - movl -12(%esp), %edi - movl %esp, %ebp - leave + popl %edi + popl %esi + popl %ebx + popl %ebp ret blob - 987c63306543ebae3c96c00e7c6fb785b316ac39 blob + 7d11f2a1db01833f9c706724df3a7c1f65e850f9 --- src/libmp/386/mpvecdigmuladd.s +++ src/libmp/386/mpvecdigmuladd.s @@ -27,15 +27,16 @@ .type mpvecdigmuladd, @function mpvecdigmuladd: /* Prelude */ - pushl %ebp - movl %ebx, -4(%esp) /* save on stack */ - movl %esi, -8(%esp) - movl %edi, -12(%esp) + pushl %ebp /* save on stack */ + pushl %ebx + pushl %esi + pushl %edi - movl 8(%esp), %esi /* b */ - movl 12(%esp), %ecx /* n */ - movl 16(%esp), %ebx /* m */ - movl 20(%esp), %edi /* p */ + leal 20(%esp), %ebp /* %ebp = FP for now */ + movl 0(%ebp), %esi /* b */ + movl 4(%ebp), %ecx /* n */ + movl 8(%ebp), %ebx /* m */ + movl 12(%ebp), %edi /* p */ movl %ecx, %ebp negl %ebp /* BP = -n */ shll $2, %ecx @@ -61,9 +62,9 @@ _muladdnocarry2: adcl %eax, %eax /* return carry out of p[n] */ /* Postlude */ - movl -4(%esp), %ebx /* restore from stack */ - movl -8(%esp), %esi - movl -12(%esp), %edi - movl %esp, %ebp - leave + popl %edi + popl %esi + popl %ebx + popl %ebp ret + blob - e94dae1669357562153839e40acdc13344d312c4 blob + 06726668996e72104a28d8d9ce70cec787487bda --- src/libmp/386/mpvecsub.s +++ src/libmp/386/mpvecsub.s @@ -10,16 +10,18 @@ .type mpvecsub, @function mpvecsub: /* Prelude */ - pushl %ebp - movl %ebx, -4(%esp) /* save on stack */ - movl %esi, -8(%esp) - movl %edi, -12(%esp) + pushl %ebp /* save on stack */ + pushl %ebx + pushl %esi + pushl %edi - movl 8(%esp), %esi /* a */ - movl 16(%esp), %ebx /* b */ - movl 12(%esp), %edx /* alen */ - movl 20(%esp), %ecx /* blen */ - movl 24(%esp), %edi /* diff */ + leal 20(%esp), %ebp /* %ebp = FP for now */ + movl 0(%ebp), %esi /* a */ + movl 8(%ebp), %ebx /* b */ + movl 4(%ebp), %edx /* alen */ + movl 12(%ebp), %ecx /* blen */ + movl 16(%ebp), %edi /* diff */ + subl %ecx,%edx xorl %ebp,%ebp /* this also sets carry to 0 */ @@ -46,15 +48,14 @@ _subloop2: movl (%esi, %ebp, 4), %eax sbbl $0, %eax movl %eax, (%edi, %ebp, 4) - INCL %ebp - LOOP _subloop2 + incl %ebp + loop _subloop2 done: /* Postlude */ - movl -4(%esp), %ebx /* restore from stack */ - movl -8(%esp), %esi - movl -12(%esp), %edi - movl %esp, %ebp - leave + popl %edi + popl %esi + popl %ebx + popl %ebp ret