OLD | NEW |
(Empty) | |
| 1 // Copyright 2010 The Go Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style |
| 3 // license that can be found in the LICENSE file. |
| 4 |
| 5 #define HSqrt2 7.07106781186547524401e-01 // sqrt(2)/2 |
| 6 #define Ln2Hi 6.93147180369123816490e-01 // 0x3fe62e42fee00000 |
| 7 #define Ln2Lo 1.90821492927058770002e-10 // 0x3dea39ef35793c76 |
| 8 #define L1 6.666666666666735130e-01 // 0x3FE5555555555593 |
| 9 #define L2 3.999999999940941908e-01 // 0x3FD999999997FA04 |
| 10 #define L3 2.857142874366239149e-01 // 0x3FD2492494229359 |
| 11 #define L4 2.222219843214978396e-01 // 0x3FCC71C51D8E78AF |
| 12 #define L5 1.818357216161805012e-01 // 0x3FC7466496CB03DE |
| 13 #define L6 1.531383769920937332e-01 // 0x3FC39A09D078C69F |
| 14 #define L7 1.479819860511658591e-01 // 0x3FC2F112DF3E5244 |
| 15 #define NaN 0x7FF0000000000001 |
| 16 #define NegInf 0xFFF0000000000000 |
| 17 #define PosInf 0x7FF0000000000000 |
| 18 |
| 19 // func Log(x float64) float64 |
| 20 TEXT ·Log(SB),7,$0 |
| 21 // test bits for special cases |
| 22 MOVQ x+0(FP), BX |
| 23 MOVQ $~(1<<63), AX // sign bit mask |
| 24 ANDQ BX, AX |
| 25 JEQ isZero |
| 26 MOVQ $0, AX |
| 27 CMPQ AX, BX |
| 28 JGT isNegative |
| 29 MOVQ $PosInf, AX |
| 30 CMPQ AX, BX |
| 31 JLE isInfOrNaN |
| 32 // f1, ki := math.Frexp(x); k := float64(ki) |
| 33 MOVQ BX, X0 |
| 34 MOVQ $0x000FFFFFFFFFFFFF, AX |
| 35 MOVQ AX, X2 |
| 36 ANDPD X0, X2 |
| 37 MOVSD $0.5, X0 // 0x3FE0000000000000 |
| 38 ORPD X0, X2 // X2= f1 |
| 39 SHRQ $52, BX |
| 40 ANDL $0x7FF, BX |
| 41 SUBL $0x3FE, BX |
| 42 CVTSL2SD BX, X1 // x1= k, x2= f1 |
| 43 // if f1 < math.Sqrt2/2 { k -= 1; f1 *= 2 } |
| 44 MOVSD $HSqrt2, X0 // x0= 0.7071, x1= k, x2= f1 |
| 45 CMPSD X2, X0, 5 // cmpnlt; x0= 0 or ^0, x1= k, x2 = f1 |
| 46 MOVSD $1.0, X3 // x0= 0 or ^0, x1= k, x2 = f1, x3= 1 |
| 47 ANDPD X0, X3 // x0= 0 or ^0, x1= k, x2 = f1, x3= 0 or 1 |
| 48 SUBSD X3, X1 // x0= 0 or ^0, x1= k, x2 = f1, x3= 0 or 1 |
| 49 MOVSD $1.0, X0 // x0= 1, x1= k, x2= f1, x3= 0 or 1 |
| 50 ADDSD X0, X3 // x0= 1, x1= k, x2= f1, x3= 1 or 2 |
| 51 MULSD X3, X2 // x0= 1, x1= k, x2= f1 |
| 52 // f := f1 - 1 |
| 53 SUBSD X0, X2 // x1= k, x2= f |
| 54 // s := f / (2 + f) |
| 55 MOVSD $2.0, X0 |
| 56 ADDSD X2, X0 |
| 57 MOVSD X2, X3 |
| 58 DIVSD X0, X3 // x1=k, x2= f, x3= s |
| 59 // s2 := s * s |
| 60 MOVSD X3, X4 // x1= k, x2= f, x3= s |
| 61 MULSD X4, X4 // x1= k, x2= f, x3= s, x4= s2 |
| 62 // s4 := s2 * s2 |
| 63 MOVSD X4, X5 // x1= k, x2= f, x3= s, x4= s2 |
| 64 MULSD X5, X5 // x1= k, x2= f, x3= s, x4= s2, x5= s4 |
| 65 // t1 := s2 * (L1 + s4*(L3+s4*(L5+s4*L7))) |
| 66 MOVSD $L7, X6 |
| 67 MULSD X5, X6 |
| 68 ADDSD $L5, X6 |
| 69 MULSD X5, X6 |
| 70 ADDSD $L3, X6 |
| 71 MULSD X5, X6 |
| 72 ADDSD $L1, X6 |
| 73 MULSD X6, X4 // x1= k, x2= f, x3= s, x4= t1, x5= s4 |
| 74 // t2 := s4 * (L2 + s4*(L4+s4*L6)) |
| 75 MOVSD $L6, X6 |
| 76 MULSD X5, X6 |
| 77 ADDSD $L4, X6 |
| 78 MULSD X5, X6 |
| 79 ADDSD $L2, X6 |
| 80 MULSD X6, X5 // x1= k, x2= f, x3= s, x4= t1, x5= t2 |
| 81 // R := t1 + t2 |
| 82 ADDSD X5, X4 // x1= k, x2= f, x3= s, x4= R |
| 83 // hfsq := 0.5 * f * f |
| 84 MOVSD $0.5, X0 |
| 85 MULSD X2, X0 |
| 86 MULSD X2, X0 // x0= hfsq, x1= k, x2= f, x3= s, x4= R |
| 87 // return k*Ln2Hi - ((hfsq - (s*(hfsq+R) + k*Ln2Lo)) - f) |
| 88 ADDSD X0, X4 // x0= hfsq, x1= k, x2= f, x3= s, x4= hfsq+R |
| 89 MULSD X4, X3 // x0= hfsq, x1= k, x2= f, x3= s*(hfsq+R) |
| 90 MOVSD $Ln2Lo, X4 |
| 91 MULSD X1, X4 // x4= k*Ln2Lo |
| 92 ADDSD X4, X3 // x0= hfsq, x1= k, x2= f, x3= s*(hfsq+R)+k*Ln2Lo |
| 93 SUBSD X3, X0 // x0= hfsq-(s*(hfsq+R)+k*Ln2Lo), x1= k, x2= f |
| 94 SUBSD X2, X0 // x0= (hfsq-(s*(hfsq+R)+k*Ln2Lo))-f, x1= k |
| 95 MULSD $Ln2Hi, X1 // x0= (hfsq-(s*(hfsq+R)+k*Ln2Lo))-f, x1= k*Ln2Hi |
| 96 SUBSD X0, X1 // x1= k*Ln2Hi-((hfsq-(s*(hfsq+R)+k*Ln2Lo))-f) |
| 97 MOVSD X1, r+8(FP) |
| 98 RET |
| 99 isInfOrNaN: |
| 100 MOVQ BX, r+8(FP) // +Inf or NaN, return x |
| 101 RET |
| 102 isNegative: |
| 103 MOVQ $NaN, AX |
| 104 MOVQ AX, r+8(FP) // return NaN |
| 105 RET |
| 106 isZero: |
| 107 MOVQ $NegInf, AX |
| 108 MOVQ AX, r+8(FP) // return -Inf |
| 109 RET |
OLD | NEW |