From a63f0d99978c3d6e6fcde1f2562295070f8d7e14 Mon Sep 17 00:00:00 2001 From: Elliott Hughes Date: Fri, 13 Aug 2010 18:23:34 -0700 Subject: [PATCH] Clean up more of our floating-point cruft. There was a ton of unused stuff here. I was hoping to kill all our C-style casts so I could turn -Wold-style-cast on, but it looks like our header files are too full of C-style casts for that to be practical in conjunction with -Werror. I'll get rid of the remaining C-style casts in the more heavily maintained files at some point, but I'm calling it a day on the floating-point stuff for now. Change-Id: I93ce1c4a9db27674f4db1f329d6e7fa27e81ad72 --- luni/src/main/native/cbigint.cpp | 194 +++++++++----------- luni/src/main/native/cbigint.h | 117 ++++++++---- luni/src/main/native/fltconst.h | 187 ------------------- .../org_apache_harmony_luni_util_NumberConvert.cpp | 26 +-- .../org_apache_harmony_luni_util_fltparse.cpp | 198 ++++++++++----------- 5 files changed, 272 insertions(+), 450 deletions(-) delete mode 100644 luni/src/main/native/fltconst.h diff --git a/luni/src/main/native/cbigint.cpp b/luni/src/main/native/cbigint.cpp index 830b52c9..0b7cc420 100644 --- a/luni/src/main/native/cbigint.cpp +++ b/luni/src/main/native/cbigint.cpp @@ -22,7 +22,7 @@ #define USE_LL #endif -#ifdef HY_LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN #define at(i) (i) #else #define at(i) ((i)^1) @@ -81,9 +81,7 @@ #define TIMES_TEN(x) (((x) << 3) + ((x) << 1)) #define bitSection(x, mask, shift) (((x) & (mask)) >> (shift)) -#define DOUBLE_TO_LONGBITS(dbl) (*((U_64 *)(&dbl))) -#define FLOAT_TO_INTBITS(flt) (*((U_32 *)(&flt))) -#define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | (((U_64)((e) + E_OFFSET)) << 52)) +#define CREATE_DOUBLE_BITS(normalizedM, e) (((normalizedM) & MANTISSA_MASK) | ((static_cast((e) + E_OFFSET)) << 52)) #if defined(USE_LL) #define MANTISSA_MASK (0x000FFFFFFFFFFFFFLL) @@ -111,11 +109,11 @@ #define FLOAT_NORMAL_MASK (0x00800000) #define FLOAT_E_OFFSET (150) -IDATA -simpleAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2) +int32_t +simpleAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2) { /* assumes length > 0 */ - IDATA index = 1; + int32_t index = 1; *arg1 += arg2; if (arg2 <= *arg1) @@ -125,20 +123,20 @@ simpleAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2) while (++arg1[index] == 0 && ++index < length); - return (IDATA) index == length; + return index == length; } -IDATA -addHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) +int32_t +addHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2) { /* addition is limited by length of arg1 as it this function is * storing the result in arg1 */ /* fix for cc (GCC) 3.2 20020903 (Red Hat Linux 8.0 3.2-7): code generated does not * do the temp1 + temp2 + carry addition correct. carry is 64 bit because gcc has * subtle issues when you mix 64 / 32 bit maths. */ - U_64 temp1, temp2, temp3; /* temporary variables to help the SH-4, and gcc */ - U_64 carry; - IDATA index; + uint64_t temp1, temp2, temp3; /* temporary variables to help the SH-4, and gcc */ + uint64_t carry; + int32_t index; if (length1 == 0 || length2 == 0) { @@ -170,14 +168,14 @@ addHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) while (++arg1[index] == 0 && ++index < length1); - return (IDATA) index == length1; + return index == length1; } void -subtractHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) +subtractHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2) { /* assumes arg1 > arg2 */ - IDATA index; + int32_t index; for (index = 0; index < length1; ++index) arg1[index] = ~arg1[index]; simpleAddHighPrecision (arg1, length1, 1); @@ -192,12 +190,10 @@ subtractHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) simpleAddHighPrecision (arg1, length1, 1); } -U_32 -simpleMultiplyHighPrecision (U_64 * arg1, IDATA length, U_64 arg2) -{ +static uint32_t simpleMultiplyHighPrecision(uint64_t* arg1, int32_t length, uint64_t arg2) { /* assumes arg2 only holds 32 bits of information */ - U_64 product; - IDATA index; + uint64_t product; + int32_t index; index = 0; product = 0; @@ -216,14 +212,14 @@ simpleMultiplyHighPrecision (U_64 * arg1, IDATA length, U_64 arg2) return HIGH_U32_FROM_VAR (product); } -void -simpleMultiplyAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2, - U_32 * result) +static void +simpleMultiplyAddHighPrecision (uint64_t * arg1, int32_t length, uint64_t arg2, + uint32_t * result) { /* Assumes result can hold the product and arg2 only holds 32 bits of information */ - U_64 product; - IDATA index, resultIndex; + uint64_t product; + int32_t index, resultIndex; index = resultIndex = 0; product = 0; @@ -253,12 +249,12 @@ simpleMultiplyAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2, } } -#ifndef HY_LITTLE_ENDIAN -void simpleMultiplyAddHighPrecisionBigEndianFix(U_64* arg1, IDATA length, U_64 arg2, U_32* result) { +#if __BYTE_ORDER != __LITTLE_ENDIAN +void simpleMultiplyAddHighPrecisionBigEndianFix(uint64_t* arg1, int32_t length, uint64_t arg2, uint32_t* result) { /* Assumes result can hold the product and arg2 only holds 32 bits of information */ - U_64 product; - IDATA index, resultIndex; + uint64_t product; + int32_t index, resultIndex; index = resultIndex = 0; product = 0; @@ -282,13 +278,13 @@ void simpleMultiplyAddHighPrecisionBigEndianFix(U_64* arg1, IDATA length, U_64 a #endif void -multiplyHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2, - U_64 * result, IDATA length) +multiplyHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2, + uint64_t * result, int32_t length) { /* assumes result is large enough to hold product */ - U_64* temp; - U_32* resultIn32; - IDATA count, index; + uint64_t* temp; + uint32_t* resultIn32; + int32_t count, index; if (length1 < length2) { @@ -300,16 +296,16 @@ multiplyHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2, length2 = count; } - memset (result, 0, sizeof (U_64) * length); + memset (result, 0, sizeof (uint64_t) * length); /* length1 > length2 */ - resultIn32 = (U_32 *) result; + resultIn32 = reinterpret_cast(result); index = -1; for (count = 0; count < length2; ++count) { simpleMultiplyAddHighPrecision (arg1, length1, LOW_IN_U64 (arg2[count]), resultIn32 + (++index)); -#ifdef HY_LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN simpleMultiplyAddHighPrecision(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index)); #else simpleMultiplyAddHighPrecisionBigEndianFix(arg1, length1, HIGH_IN_U64(arg2[count]), resultIn32 + (++index)); @@ -317,12 +313,12 @@ multiplyHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2, } } -U_32 -simpleAppendDecimalDigitHighPrecision (U_64 * arg1, IDATA length, U_64 digit) +uint32_t +simpleAppendDecimalDigitHighPrecision (uint64_t * arg1, int32_t length, uint64_t digit) { /* assumes digit is less than 32 bits */ - U_64 arg; - IDATA index = 0; + uint64_t arg; + int32_t index = 0; digit <<= 32; do @@ -341,10 +337,10 @@ simpleAppendDecimalDigitHighPrecision (U_64 * arg1, IDATA length, U_64 digit) } void -simpleShiftLeftHighPrecision (U_64 * arg1, IDATA length, IDATA arg2) +simpleShiftLeftHighPrecision (uint64_t * arg1, int32_t length, int32_t arg2) { /* assumes length > 0 */ - IDATA index, offset; + int32_t index, offset; if (arg2 >= 64) { offset = arg2 >> 6; @@ -370,11 +366,11 @@ simpleShiftLeftHighPrecision (U_64 * arg1, IDATA length, IDATA arg2) *arg1 <<= arg2; } -IDATA -highestSetBit (U_64 * y) +int32_t +highestSetBit (uint64_t * y) { - U_32 x; - IDATA result; + uint32_t x; + int32_t result; if (*y == 0) return 0; @@ -441,11 +437,11 @@ highestSetBit (U_64 * y) return result + 1; } -IDATA -lowestSetBit (U_64 * y) +int32_t +lowestSetBit (uint64_t * y) { - U_32 x; - IDATA result; + uint32_t x; + int32_t result; if (*y == 0) return 0; @@ -513,10 +509,10 @@ lowestSetBit (U_64 * y) return result + 4; } -IDATA -highestSetBitHighPrecision (U_64 * arg, IDATA length) +int32_t +highestSetBitHighPrecision (uint64_t * arg, int32_t length) { - IDATA highBit; + int32_t highBit; while (--length >= 0) { @@ -528,10 +524,10 @@ highestSetBitHighPrecision (U_64 * arg, IDATA length) return 0; } -IDATA -lowestSetBitHighPrecision (U_64 * arg, IDATA length) +int32_t +lowestSetBitHighPrecision (uint64_t * arg, int32_t length) { - IDATA lowBit, index = -1; + int32_t lowBit, index = -1; while (++index < length) { @@ -543,8 +539,8 @@ lowestSetBitHighPrecision (U_64 * arg, IDATA length) return 0; } -IDATA -compareHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) +int32_t +compareHighPrecision (uint64_t * arg1, int32_t length1, uint64_t * arg2, int32_t length2) { while (--length1 >= 0 && arg1[length1] == 0); while (--length2 >= 0 && arg2[length2] == 0); @@ -569,11 +565,11 @@ compareHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, IDATA length2) } jdouble -toDoubleHighPrecision (U_64 * arg, IDATA length) +toDoubleHighPrecision (uint64_t * arg, int32_t length) { - IDATA highBit; - U_64 mantissa, test64; - U_32 test; + int32_t highBit; + uint64_t mantissa, test64; + uint32_t test; jdouble result; while (length > 0 && arg[length - 1] == 0) @@ -674,37 +670,13 @@ toDoubleHighPrecision (U_64 * arg, IDATA length) return result; } -IDATA -tenToTheEHighPrecision (U_64 * result, IDATA length, jint e) -{ - /* size test */ - if (length < ((e / 19) + 1)) - return 0; - - memset (result, 0, length * sizeof (U_64)); - *result = 1; - - if (e == 0) - return 1; - - length = 1; - length = timesTenToTheEHighPrecision (result, length, e); - /* bad O(n) way of doing it, but simple */ - /* - do { - overflow = simpleAppendDecimalDigitHighPrecision(result, length, 0); - if (overflow) - result[length++] = overflow; - } while (--e); - */ - return length; -} +static uint64_t simpleMultiplyHighPrecision64(uint64_t* arg1, int32_t length, uint64_t arg2); -IDATA -timesTenToTheEHighPrecision (U_64 * result, IDATA length, jint e) +int32_t +timesTenToTheEHighPrecision (uint64_t * result, int32_t length, jint e) { /* assumes result can hold value */ - U_64 overflow; + uint64_t overflow; int exp10 = e; if (e == 0) @@ -797,10 +769,10 @@ timesTenToTheEHighPrecision (U_64 * result, IDATA length, jint e) return length; } -U_64 +uint64_t doubleMantissa (jdouble z) { - U_64 m = DOUBLE_TO_LONGBITS (z); + uint64_t m = DOUBLE_TO_LONGBITS (z); if ((m & EXPONENT_MASK) != 0) m = (m & MANTISSA_MASK) | NORMAL_MASK; @@ -810,11 +782,11 @@ doubleMantissa (jdouble z) return m; } -IDATA +int32_t doubleExponent (jdouble z) { /* assumes positive double */ - IDATA k = HIGH_U32_FROM_VAR (z) >> 20; + int32_t k = HIGH_U32_FROM_VAR (z) >> 20; if (k) k -= E_OFFSET; @@ -824,10 +796,8 @@ doubleExponent (jdouble z) return k; } -UDATA -floatMantissa (jfloat z) -{ - UDATA m = (UDATA) FLOAT_TO_INTBITS (z); +uint32_t floatMantissa(jfloat z) { + uint32_t m = FLOAT_TO_INTBITS (z); if ((m & FLOAT_EXPONENT_MASK) != 0) m = (m & FLOAT_MANTISSA_MASK) | FLOAT_NORMAL_MASK; @@ -837,11 +807,11 @@ floatMantissa (jfloat z) return m; } -IDATA +int32_t floatExponent (jfloat z) { /* assumes positive float */ - IDATA k = FLOAT_TO_INTBITS (z) >> 23; + int32_t k = FLOAT_TO_INTBITS (z) >> 23; if (k) k -= FLOAT_E_OFFSET; else @@ -851,13 +821,13 @@ floatExponent (jfloat z) } /* Allow a 64-bit value in arg2 */ -U_64 -simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2) +uint64_t +simpleMultiplyHighPrecision64 (uint64_t * arg1, int32_t length, uint64_t arg2) { - U_64 intermediate, carry1, carry2, prod1, prod2, sum; - U_64* pArg1; - IDATA index; - U_32 buf32; + uint64_t intermediate, carry1, carry2, prod1, prod2, sum; + uint64_t* pArg1; + int32_t index; + uint32_t buf32; index = 0; intermediate = 0; @@ -869,7 +839,7 @@ simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2) if ((*pArg1 != 0) || (intermediate != 0)) { prod1 = - (U_64) LOW_U32_FROM_VAR (arg2) * (U_64) LOW_U32_FROM_PTR (pArg1); + static_cast(LOW_U32_FROM_VAR (arg2)) * static_cast(LOW_U32_FROM_PTR (pArg1)); sum = intermediate + prod1; if ((sum < prod1) || (sum < intermediate)) { @@ -880,9 +850,9 @@ simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2) carry1 = 0; } prod1 = - (U_64) LOW_U32_FROM_VAR (arg2) * (U_64) HIGH_U32_FROM_PTR (pArg1); + static_cast(LOW_U32_FROM_VAR (arg2)) * static_cast(HIGH_U32_FROM_PTR (pArg1)); prod2 = - (U_64) HIGH_U32_FROM_VAR (arg2) * (U_64) LOW_U32_FROM_PTR (pArg1); + static_cast(HIGH_U32_FROM_VAR (arg2)) * static_cast(LOW_U32_FROM_PTR (pArg1)); intermediate = carry2 + HIGH_IN_U64 (sum) + prod1 + prod2; if ((intermediate < prod1) || (intermediate < prod2)) { @@ -896,7 +866,7 @@ simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2) buf32 = HIGH_U32_FROM_PTR (pArg1); HIGH_U32_FROM_PTR (pArg1) = LOW_U32_FROM_VAR (intermediate); intermediate = carry1 + HIGH_IN_U64 (intermediate) - + (U_64) HIGH_U32_FROM_VAR (arg2) * (U_64) buf32; + + static_cast(HIGH_U32_FROM_VAR (arg2)) * static_cast(buf32); } pArg1++; } diff --git a/luni/src/main/native/cbigint.h b/luni/src/main/native/cbigint.h index 1e525cb5..2bf5b1b1 100644 --- a/luni/src/main/native/cbigint.h +++ b/luni/src/main/native/cbigint.h @@ -18,47 +18,90 @@ #if !defined(cbigint_h) #define cbigint_h -#include "fltconst.h" #include "JNIHelp.h" +#include +#include +#include + +/* IEEE floats consist of: sign bit, exponent field, significand field + single: 31 = sign bit, 30..23 = exponent (8 bits), 22..0 = significand (23 bits) + double: 63 = sign bit, 62..52 = exponent (11 bits), 51..0 = significand (52 bits) + inf == (all exponent bits set) and (all mantissa bits clear) + nan == (all exponent bits set) and (at least one mantissa bit set) + finite == (at least one exponent bit clear) + zero == (all exponent bits clear) and (all mantissa bits clear) + denormal == (all exponent bits clear) and (at least one mantissa bit set) + positive == sign bit clear + negative == sign bit set +*/ +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define DOUBLE_LO_OFFSET 0 +#define DOUBLE_HI_OFFSET 1 +#define LONG_LO_OFFSET 0 +#define LONG_HI_OFFSET 1 +#else +#define DOUBLE_LO_OFFSET 1 +#define DOUBLE_HI_OFFSET 0 +#define LONG_LO_OFFSET 1 +#define LONG_HI_OFFSET 0 +#endif + +#define DOUBLE_EXPONENT_MASK_HI 0x7FF00000 +#define DOUBLE_MANTISSA_MASK_HI 0x000FFFFF + +union U64U32DBL { + uint64_t u64val; + uint32_t u32val[2]; + int32_t i32val[2]; + double dval; +}; + +#define DOUBLE_TO_LONGBITS(dbl) (*(reinterpret_cast(&dbl))) +#define FLOAT_TO_INTBITS(flt) (*(reinterpret_cast(&flt))) +#define INTBITS_TO_FLOAT(bits) (*(reinterpret_cast(&bits))) + +/* Replace P_FLOAT_HI and P_FLOAT_LOW */ +/* These macros are used to access the high and low 32-bit parts of a double (64-bit) value. */ +#define LOW_U32_FROM_DBL_PTR(dblptr) ((reinterpret_cast(dblptr))->u32val[DOUBLE_LO_OFFSET]) +#define HIGH_U32_FROM_DBL_PTR(dblptr) ((reinterpret_cast(dblptr))->u32val[DOUBLE_HI_OFFSET]) +#define LOW_I32_FROM_DBL_PTR(dblptr) ((reinterpret_cast(dblptr))->i32val[DOUBLE_LO_OFFSET]) +#define HIGH_I32_FROM_DBL_PTR(dblptr) ((reinterpret_cast(dblptr))->i32val[DOUBLE_HI_OFFSET]) +#define LOW_U32_FROM_DBL(dbl) LOW_U32_FROM_DBL_PTR(&(dbl)) +#define HIGH_U32_FROM_DBL(dbl) HIGH_U32_FROM_DBL_PTR(&(dbl)) +#define LOW_U32_FROM_LONG64_PTR(long64ptr) ((reinterpret_cast(long64ptr))->u32val[LONG_LO_OFFSET]) +#define HIGH_U32_FROM_LONG64_PTR(long64ptr) ((reinterpret_cast(long64ptr))->u32val[LONG_HI_OFFSET]) +#define LOW_I32_FROM_LONG64_PTR(long64ptr) ((reinterpret_cast(long64ptr))->i32val[LONG_LO_OFFSET]) +#define HIGH_I32_FROM_LONG64_PTR(long64ptr) ((reinterpret_cast(long64ptr))->i32val[LONG_HI_OFFSET]) +#define LOW_U32_FROM_LONG64(long64) LOW_U32_FROM_LONG64_PTR(&(long64)) +#define HIGH_U32_FROM_LONG64(long64) HIGH_U32_FROM_LONG64_PTR(&(long64)) +#define LOW_I32_FROM_LONG64(long64) LOW_I32_FROM_LONG64_PTR(&(long64)) +#define HIGH_I32_FROM_LONG64(long64) HIGH_I32_FROM_LONG64_PTR(&(long64)) +#define IS_DENORMAL_DBL_PTR(dblptr) (((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_EXPONENT_MASK_HI) == 0) && ((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_MANTISSA_MASK_HI) != 0 || (LOW_U32_FROM_DBL_PTR(dblptr) != 0))) +#define IS_DENORMAL_DBL(dbl) IS_DENORMAL_DBL_PTR(&(dbl)) + #define LOW_U32_FROM_VAR(u64) LOW_U32_FROM_LONG64(u64) #define LOW_U32_FROM_PTR(u64ptr) LOW_U32_FROM_LONG64_PTR(u64ptr) #define HIGH_U32_FROM_VAR(u64) HIGH_U32_FROM_LONG64(u64) #define HIGH_U32_FROM_PTR(u64ptr) HIGH_U32_FROM_LONG64_PTR(u64ptr) -#if defined(__cplusplus) -extern "C" -{ -#endif - void multiplyHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, - IDATA length2, U_64 * result, IDATA length); - U_32 simpleAppendDecimalDigitHighPrecision (U_64 * arg1, IDATA length, - U_64 digit); - jdouble toDoubleHighPrecision (U_64 * arg, IDATA length); - IDATA tenToTheEHighPrecision (U_64 * result, IDATA length, jint e); - U_64 doubleMantissa (jdouble z); - IDATA compareHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, - IDATA length2); - IDATA highestSetBitHighPrecision (U_64 * arg, IDATA length); - void subtractHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, - IDATA length2); - IDATA doubleExponent (jdouble z); - U_32 simpleMultiplyHighPrecision (U_64 * arg1, IDATA length, U_64 arg2); - IDATA addHighPrecision (U_64 * arg1, IDATA length1, U_64 * arg2, - IDATA length2); - void simpleMultiplyAddHighPrecisionBigEndianFix (U_64 * arg1, IDATA length, - U_64 arg2, U_32 * result); - IDATA lowestSetBit (U_64 * y); - IDATA timesTenToTheEHighPrecision (U_64 * result, IDATA length, jint e); - void simpleMultiplyAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2, - U_32 * result); - IDATA highestSetBit (U_64 * y); - IDATA lowestSetBitHighPrecision (U_64 * arg, IDATA length); - void simpleShiftLeftHighPrecision (U_64 * arg1, IDATA length, IDATA arg2); - UDATA floatMantissa (jfloat z); - U_64 simpleMultiplyHighPrecision64 (U_64 * arg1, IDATA length, U_64 arg2); - IDATA simpleAddHighPrecision (U_64 * arg1, IDATA length, U_64 arg2); - IDATA floatExponent (jfloat z); -#if defined(__cplusplus) -} -#endif + +void multiplyHighPrecision(uint64_t* arg1, int32_t length1, uint64_t* arg2, int32_t length2, + uint64_t* result, int32_t length); +uint32_t simpleAppendDecimalDigitHighPrecision(uint64_t* arg1, int32_t length, uint64_t digit); +jdouble toDoubleHighPrecision(uint64_t* arg, int32_t length); +uint64_t doubleMantissa(jdouble z); +int32_t compareHighPrecision(uint64_t* arg1, int32_t length1, uint64_t* arg2, int32_t length2); +int32_t highestSetBitHighPrecision(uint64_t* arg, int32_t length); +void subtractHighPrecision(uint64_t* arg1, int32_t length1, uint64_t* arg2, int32_t length2); +int32_t doubleExponent(jdouble z); +int32_t addHighPrecision(uint64_t* arg1, int32_t length1, uint64_t* arg2, int32_t length2); +int32_t lowestSetBit(uint64_t* y); +int32_t timesTenToTheEHighPrecision(uint64_t* result, int32_t length, jint e); +int32_t highestSetBit(uint64_t* y); +int32_t lowestSetBitHighPrecision(uint64_t* arg, int32_t length); +void simpleShiftLeftHighPrecision(uint64_t* arg1, int32_t length, int32_t arg2); +uint32_t floatMantissa(jfloat z); +int32_t simpleAddHighPrecision(uint64_t* arg1, int32_t length, uint64_t arg2); +int32_t floatExponent(jfloat z); + #endif /* cbigint_h */ diff --git a/luni/src/main/native/fltconst.h b/luni/src/main/native/fltconst.h deleted file mode 100644 index a5ed1c80..00000000 --- a/luni/src/main/native/fltconst.h +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#if !defined(fltconst_h) -#define fltconst_h - -#include -#include -#if __BYTE_ORDER == __LITTLE_ENDIAN -#define HY_LITTLE_ENDIAN -#elif __BYTE_ORDER == __BIG_ENDIAN -#define HY_BIG_ENDIAN -#endif - -/** - * By default order doubles in the native (that is big/little endian) ordering. - */ -#define HY_PLATFORM_DOUBLE_ORDER - -#include - -typedef int32_t I_32; -typedef uint32_t U_32; -typedef uint64_t U_64; - -typedef I_32 IDATA; -typedef U_32 UDATA; - -#define U32(x) ((U_32) (x)) -#define I32(x) ((I_32) (x)) -#define U32P(x) ((U_32 *) (x)) - -/* IEEE floats consist of: sign bit, exponent field, significand field - single: 31 = sign bit, 30..23 = exponent (8 bits), 22..0 = significand (23 bits) - double: 63 = sign bit, 62..52 = exponent (11 bits), 51..0 = significand (52 bits) - inf == (all exponent bits set) and (all mantissa bits clear) - nan == (all exponent bits set) and (at least one mantissa bit set) - finite == (at least one exponent bit clear) - zero == (all exponent bits clear) and (all mantissa bits clear) - denormal == (all exponent bits clear) and (at least one mantissa bit set) - positive == sign bit clear - negative == sign bit set -*/ -#define MAX_U32_DOUBLE (double) (4294967296.0) /* 2^32 */ -#define MAX_U32_SINGLE (float) (4294967296.0) /* 2^32 */ -#define HY_POS_PI (double) (3.141592653589793) - -#ifdef HY_LITTLE_ENDIAN -#ifdef HY_PLATFORM_DOUBLE_ORDER -#define DOUBLE_LO_OFFSET 0 -#define DOUBLE_HI_OFFSET 1 -#else -#define DOUBLE_LO_OFFSET 1 -#define DOUBLE_HI_OFFSET 0 -#endif -#define LONG_LO_OFFSET 0 -#define LONG_HI_OFFSET 1 -#else -#ifdef HY_PLATFORM_DOUBLE_ORDER -#define DOUBLE_LO_OFFSET 1 -#define DOUBLE_HI_OFFSET 0 -#else -#define DOUBLE_LO_OFFSET 0 -#define DOUBLE_HI_OFFSET 1 -#endif -#define LONG_LO_OFFSET 1 -#define LONG_HI_OFFSET 0 -#endif - -#define RETURN_FINITE 0 -#define RETURN_NAN 1 -#define RETURN_POS_INF 2 -#define RETURN_NEG_INF 3 -#define DOUBLE_SIGN_MASK_HI 0x80000000 -#define DOUBLE_EXPONENT_MASK_HI 0x7FF00000 -#define DOUBLE_MANTISSA_MASK_LO 0xFFFFFFFF -#define DOUBLE_MANTISSA_MASK_HI 0x000FFFFF -#define SINGLE_SIGN_MASK 0x80000000 -#define SINGLE_EXPONENT_MASK 0x7F800000 -#define SINGLE_MANTISSA_MASK 0x007FFFFF -#define SINGLE_NAN_BITS (SINGLE_EXPONENT_MASK | 0x00400000) - -typedef union u64u32dbl_tag { - U_64 u64val; - U_32 u32val[2]; - I_32 i32val[2]; - double dval; -} U64U32DBL; - -/* Replace P_FLOAT_HI and P_FLOAT_LOW */ -/* These macros are used to access the high and low 32-bit parts of a double (64-bit) value. */ -#define LOW_U32_FROM_DBL_PTR(dblptr) (((U64U32DBL *)(dblptr))->u32val[DOUBLE_LO_OFFSET]) -#define HIGH_U32_FROM_DBL_PTR(dblptr) (((U64U32DBL *)(dblptr))->u32val[DOUBLE_HI_OFFSET]) -#define LOW_I32_FROM_DBL_PTR(dblptr) (((U64U32DBL *)(dblptr))->i32val[DOUBLE_LO_OFFSET]) -#define HIGH_I32_FROM_DBL_PTR(dblptr) (((U64U32DBL *)(dblptr))->i32val[DOUBLE_HI_OFFSET]) -#define LOW_U32_FROM_DBL(dbl) LOW_U32_FROM_DBL_PTR(&(dbl)) -#define HIGH_U32_FROM_DBL(dbl) HIGH_U32_FROM_DBL_PTR(&(dbl)) -#define LOW_I32_FROM_DBL(dbl) LOW_I32_FROM_DBL_PTR(&(dbl)) -#define HIGH_I32_FROM_DBL(dbl) HIGH_I32_FROM_DBL_PTR(&(dbl)) -#define LOW_U32_FROM_LONG64_PTR(long64ptr) (((U64U32DBL *)(long64ptr))->u32val[LONG_LO_OFFSET]) -#define HIGH_U32_FROM_LONG64_PTR(long64ptr) (((U64U32DBL *)(long64ptr))->u32val[LONG_HI_OFFSET]) -#define LOW_I32_FROM_LONG64_PTR(long64ptr) (((U64U32DBL *)(long64ptr))->i32val[LONG_LO_OFFSET]) -#define HIGH_I32_FROM_LONG64_PTR(long64ptr) (((U64U32DBL *)(long64ptr))->i32val[LONG_HI_OFFSET]) -#define LOW_U32_FROM_LONG64(long64) LOW_U32_FROM_LONG64_PTR(&(long64)) -#define HIGH_U32_FROM_LONG64(long64) HIGH_U32_FROM_LONG64_PTR(&(long64)) -#define LOW_I32_FROM_LONG64(long64) LOW_I32_FROM_LONG64_PTR(&(long64)) -#define HIGH_I32_FROM_LONG64(long64) HIGH_I32_FROM_LONG64_PTR(&(long64)) -#define IS_ZERO_DBL_PTR(dblptr) ((LOW_U32_FROM_DBL_PTR(dblptr) == 0) && ((HIGH_U32_FROM_DBL_PTR(dblptr) == 0) || (HIGH_U32_FROM_DBL_PTR(dblptr) == DOUBLE_SIGN_MASK_HI))) -#define IS_ONE_DBL_PTR(dblptr) ((HIGH_U32_FROM_DBL_PTR(dblptr) == 0x3ff00000 || HIGH_U32_FROM_DBL_PTR(dblptr) == 0xbff00000) && (LOW_U32_FROM_DBL_PTR(dblptr) == 0)) -#define IS_NAN_DBL_PTR(dblptr) (((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_EXPONENT_MASK_HI) == DOUBLE_EXPONENT_MASK_HI) && (LOW_U32_FROM_DBL_PTR(dblptr) | (HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_MANTISSA_MASK_HI))) -#define IS_INF_DBL_PTR(dblptr) (((HIGH_U32_FROM_DBL_PTR(dblptr) & (DOUBLE_EXPONENT_MASK_HI|DOUBLE_MANTISSA_MASK_HI)) == DOUBLE_EXPONENT_MASK_HI) && (LOW_U32_FROM_DBL_PTR(dblptr) == 0)) -#define IS_DENORMAL_DBL_PTR(dblptr) (((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_EXPONENT_MASK_HI) == 0) && ((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_MANTISSA_MASK_HI) != 0 || (LOW_U32_FROM_DBL_PTR(dblptr) != 0))) -#define IS_FINITE_DBL_PTR(dblptr) ((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_EXPONENT_MASK_HI) < DOUBLE_EXPONENT_MASK_HI) -#define IS_POSITIVE_DBL_PTR(dblptr) ((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_SIGN_MASK_HI) == 0) -#define IS_NEGATIVE_DBL_PTR(dblptr) ((HIGH_U32_FROM_DBL_PTR(dblptr) & DOUBLE_SIGN_MASK_HI) != 0) -#define IS_NEGATIVE_MAX_DBL_PTR(dblptr) ((HIGH_U32_FROM_DBL_PTR(dblptr) == 0xFFEFFFFF) && (LOW_U32_FROM_DBL_PTR(dblptr) == 0xFFFFFFFF)) -#define IS_ZERO_DBL(dbl) IS_ZERO_DBL_PTR(&(dbl)) -#define IS_ONE_DBL(dbl) IS_ONE_DBL_PTR(&(dbl)) -#define IS_NAN_DBL(dbl) IS_NAN_DBL_PTR(&(dbl)) -#define IS_INF_DBL(dbl) IS_INF_DBL_PTR(&(dbl)) -#define IS_DENORMAL_DBL(dbl) IS_DENORMAL_DBL_PTR(&(dbl)) -#define IS_FINITE_DBL(dbl) IS_FINITE_DBL_PTR(&(dbl)) -#define IS_POSITIVE_DBL(dbl) IS_POSITIVE_DBL_PTR(&(dbl)) -#define IS_NEGATIVE_DBL(dbl) IS_NEGATIVE_DBL_PTR(&(dbl)) -#define IS_NEGATIVE_MAX_DBL(dbl) IS_NEGATIVE_MAX_DBL_PTR(&(dbl)) -#define IS_ZERO_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)~SINGLE_SIGN_MASK) == (U_32)0) -#define IS_ONE_SNGL_PTR(fltptr) ((*U32P((fltptr)) == 0x3f800000) || (*U32P((fltptr)) == 0xbf800000)) -#define IS_NAN_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)~SINGLE_SIGN_MASK) > (U_32)SINGLE_EXPONENT_MASK) -#define IS_INF_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)~SINGLE_SIGN_MASK) == (U_32)SINGLE_EXPONENT_MASK) -#define IS_DENORMAL_SNGL_PTR(fltptr) (((*U32P((fltptr)) & (U_32)~SINGLE_SIGN_MASK)-(U_32)1) < (U_32)SINGLE_MANTISSA_MASK) -#define IS_FINITE_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)~SINGLE_SIGN_MASK) < (U_32)SINGLE_EXPONENT_MASK) -#define IS_POSITIVE_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)SINGLE_SIGN_MASK) == (U_32)0) -#define IS_NEGATIVE_SNGL_PTR(fltptr) ((*U32P((fltptr)) & (U_32)SINGLE_SIGN_MASK) != (U_32)0) -#define IS_ZERO_SNGL(flt) IS_ZERO_SNGL_PTR(&(flt)) -#define IS_ONE_SNGL(flt) IS_ONE_SNGL_PTR(&(flt)) -#define IS_NAN_SNGL(flt) IS_NAN_SNGL_PTR(&(flt)) -#define IS_INF_SNGL(flt) IS_INF_SNGL_PTR(&(flt)) -#define IS_DENORMAL_SNGL(flt) IS_DENORMAL_SNGL_PTR(&(flt)) -#define IS_FINITE_SNGL(flt) IS_FINITE_SNGL_PTR(&(flt)) -#define IS_POSITIVE_SNGL(flt) IS_POSITIVE_SNGL_PTR(&(flt)) -#define IS_NEGATIVE_SNGL(flt) IS_NEGATIVE_SNGL_PTR(&(flt)) -#define SET_NAN_DBL_PTR(dblptr) HIGH_U32_FROM_DBL_PTR(dblptr) = (DOUBLE_EXPONENT_MASK_HI | 0x00080000); LOW_U32_FROM_DBL_PTR(dblptr) = 0 -#define SET_PZERO_DBL_PTR(dblptr) HIGH_U32_FROM_DBL_PTR(dblptr) = 0; LOW_U32_FROM_DBL_PTR(dblptr) = 0 -#define SET_NZERO_DBL_PTR(dblptr) HIGH_U32_FROM_DBL_PTR(dblptr) = DOUBLE_SIGN_MASK_HI; LOW_U32_FROM_DBL_PTR(dblptr) = 0 -#define SET_PINF_DBL_PTR(dblptr) HIGH_U32_FROM_DBL_PTR(dblptr) = DOUBLE_EXPONENT_MASK_HI; LOW_U32_FROM_DBL_PTR(dblptr) = 0 -#define SET_NINF_DBL_PTR(dblptr) HIGH_U32_FROM_DBL_PTR(dblptr) = (DOUBLE_EXPONENT_MASK_HI | DOUBLE_SIGN_MASK_HI); LOW_U32_FROM_DBL_PTR(dblptr) = 0 -#define SET_NAN_SNGL_PTR(fltptr) *U32P((fltptr)) = ((U_32)SINGLE_NAN_BITS) -#define SET_PZERO_SNGL_PTR(fltptr) *U32P((fltptr)) = 0 -#define SET_NZERO_SNGL_PTR(fltptr) *U32P((fltptr)) = SINGLE_SIGN_MASK -#define SET_PINF_SNGL_PTR(fltptr) *U32P((fltptr)) = SINGLE_EXPONENT_MASK -#define SET_NINF_SNGL_PTR(fltptr) *U32P((fltptr)) = (SINGLE_EXPONENT_MASK | SINGLE_SIGN_MASK) - -#if defined(HY_WORD64) - #define PTR_DOUBLE_VALUE(dstPtr, aDoublePtr) ((U64U32DBL *)(aDoublePtr))->u64val = ((U64U32DBL *)(dstPtr))->u64val - #define PTR_DOUBLE_STORE(dstPtr, aDoublePtr) ((U64U32DBL *)(dstPtr))->u64val = ((U64U32DBL *)(aDoublePtr))->u64val - #define STORE_LONG(dstPtr, hi, lo) ((U64U32DBL *)(dstPtr))->u64val = (((U_64)(hi)) << 32) | (lo) -#else - /* on some platforms (HP720) we cannot reference an unaligned float. Build them by hand, one U_32 at a time. */ - #if defined(ATOMIC_FLOAT_ACCESS) - #define PTR_DOUBLE_STORE(dstPtr, aDoublePtr) HIGH_U32_FROM_DBL_PTR(dstPtr) = HIGH_U32_FROM_DBL_PTR(aDoublePtr); LOW_U32_FROM_DBL_PTR(dstPtr) = LOW_U32_FROM_DBL_PTR(aDoublePtr) - #define PTR_DOUBLE_VALUE(dstPtr, aDoublePtr) HIGH_U32_FROM_DBL_PTR(aDoublePtr) = HIGH_U32_FROM_DBL_PTR(dstPtr); LOW_U32_FROM_DBL_PTR(aDoublePtr) = LOW_U32_FROM_DBL_PTR(dstPtr) - #else - #define PTR_DOUBLE_STORE(dstPtr, aDoublePtr) (*(dstPtr) = *(aDoublePtr)) - #define PTR_DOUBLE_VALUE(dstPtr, aDoublePtr) (*(aDoublePtr) = *(dstPtr)) - #endif - - #define STORE_LONG(dstPtr, hi, lo) HIGH_U32_FROM_LONG64_PTR(dstPtr) = (hi); LOW_U32_FROM_LONG64_PTR(dstPtr) = (lo) -#endif /* HY_WORD64 */ - -#define PTR_SINGLE_VALUE(dstPtr, aSinglePtr) (*U32P(aSinglePtr) = *U32P(dstPtr)) -#define PTR_SINGLE_STORE(dstPtr, aSinglePtr) *((U_32 *)(dstPtr)) = (*U32P(aSinglePtr)) - -#endif /* fltconst_h */ diff --git a/luni/src/main/native/org_apache_harmony_luni_util_NumberConvert.cpp b/luni/src/main/native/org_apache_harmony_luni_util_NumberConvert.cpp index f42327a3..015a8ae3 100644 --- a/luni/src/main/native/org_apache_harmony_luni_util_NumberConvert.cpp +++ b/luni/src/main/native/org_apache_harmony_luni_util_NumberConvert.cpp @@ -80,14 +80,14 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo jfieldID fid; jintArray uArrayObject; - U_64 R[RM_SIZE], S[STemp_SIZE], mplus[RM_SIZE], mminus[RM_SIZE], + uint64_t R[RM_SIZE], S[STemp_SIZE], mplus[RM_SIZE], mminus[RM_SIZE], Temp[STemp_SIZE]; - memset (R , 0, RM_SIZE * sizeof (U_64)); - memset (S , 0, STemp_SIZE * sizeof (U_64)); - memset (mplus , 0, RM_SIZE * sizeof (U_64)); - memset (mminus, 0, RM_SIZE * sizeof (U_64)); - memset (Temp , 0, STemp_SIZE * sizeof (U_64)); + memset (R , 0, RM_SIZE * sizeof (uint64_t)); + memset (S , 0, STemp_SIZE * sizeof (uint64_t)); + memset (mplus , 0, RM_SIZE * sizeof (uint64_t)); + memset (mminus, 0, RM_SIZE * sizeof (uint64_t)); + memset (Temp , 0, STemp_SIZE * sizeof (uint64_t)); if (e >= 0) { @@ -134,7 +134,7 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo } } - k = (int) ceil ((e + p - 1) * INV_LOG_OF_TEN_BASE_2 - 1e-10); + k = static_cast(ceil ((e + p - 1) * INV_LOG_OF_TEN_BASE_2 - 1e-10)); if (k > 0) { @@ -150,8 +150,8 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo RLength = mplus_Length = mminus_Length = RM_SIZE; SLength = TempLength = STemp_SIZE; - memset (Temp + RM_SIZE, 0, (STemp_SIZE - RM_SIZE) * sizeof (U_64)); - memcpy (Temp, R, RM_SIZE * sizeof (U_64)); + memset (Temp + RM_SIZE, 0, (STemp_SIZE - RM_SIZE) * sizeof (uint64_t)); + memcpy (Temp, R, RM_SIZE * sizeof (uint64_t)); while (RLength > 1 && R[RLength - 1] == 0) --RLength; @@ -184,7 +184,7 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo clazz = env->GetObjectClass(inst); fid = env->GetFieldID(clazz, "uArray", "[I"); - uArrayObject = (jintArray) env->GetObjectField(inst, fid); + uArrayObject = reinterpret_cast(env->GetObjectField(inst, fid)); ScopedIntArrayRW uArray(env, uArrayObject); if (uArray.get() == NULL) { return; @@ -198,7 +198,7 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo { TempLength = SLength + 1; Temp[SLength] = 0; - memcpy (Temp, S, SLength * sizeof (U_64)); + memcpy (Temp, S, SLength * sizeof (uint64_t)); simpleShiftLeftHighPrecision (Temp, TempLength, i); if (compareHighPrecision (R, RLength, Temp, TempLength) >= 0) { @@ -209,8 +209,8 @@ void NumberConverter_bigIntDigitGeneratorInstImpl(JNIEnv* env, jobject inst, jlo low = compareHighPrecision (R, RLength, mminus, mminus_Length) <= 0; - memset (Temp + RLength, 0, (STemp_SIZE - RLength) * sizeof (U_64)); - memcpy (Temp, R, RLength * sizeof (U_64)); + memset (Temp + RLength, 0, (STemp_SIZE - RLength) * sizeof (uint64_t)); + memcpy (Temp, R, RLength * sizeof (uint64_t)); TempLength = (RLength > mplus_Length ? RLength : mplus_Length) + 1; addHighPrecision (Temp, TempLength, mplus, mplus_Length); diff --git a/luni/src/main/native/org_apache_harmony_luni_util_fltparse.cpp b/luni/src/main/native/org_apache_harmony_luni_util_fltparse.cpp index 6995f46c..b483874c 100644 --- a/luni/src/main/native/org_apache_harmony_luni_util_fltparse.cpp +++ b/luni/src/main/native/org_apache_harmony_luni_util_fltparse.cpp @@ -65,8 +65,6 @@ #endif /* USE_L */ #endif /* USE_LL */ -#define DOUBLE_TO_LONGBITS(dbl) (*((U_64 *)(&dbl))) - /* Keep a count of the number of times we decrement and increment to * approximate the double, and attempt to detect the case where we * could potentially toggle back and forth between decrementing and @@ -103,8 +101,7 @@ } \ } -#define allocateU64(x, n) if (!((x) = (U_64*) malloc((n) * sizeof(U_64)))) goto OutOfMemory; -#define release(r) if ((r)) free((r)); +#define allocateU64(x, n) if (!((x) = reinterpret_cast(malloc((n) * sizeof(uint64_t))))) goto OutOfMemory; /* *********************************************************** */ @@ -137,8 +134,8 @@ static const jdouble double_tens[] = { /* *********************************************************** */ /* ************** private function declarations ************** */ -static jdouble createDouble1 (JNIEnv* env, U_64 * f, IDATA length, jint e); -static jdouble doubleAlgorithm (JNIEnv* env, U_64 * f, IDATA length, jint e, +static jdouble createDouble1 (JNIEnv* env, uint64_t * f, int32_t length, jint e); +static jdouble doubleAlgorithm (JNIEnv* env, uint64_t * f, int32_t length, jint e, jdouble z); /* *********************************************************** */ @@ -150,15 +147,15 @@ static jdouble doubleAlgorithm (JNIEnv* env, U_64 * f, IDATA length, jint e, static jdouble createDouble(JNIEnv* env, const char* s, jint e) { /* assumes s is a null terminated string with at least one * character in it */ - U_64 def[DEFAULT_DOUBLE_WIDTH]; - U_64 defBackup[DEFAULT_DOUBLE_WIDTH]; - U_64* f; - U_64* fNoOverflow; - U_64* g; - U_64* tempBackup; - U_32 overflow; + uint64_t def[DEFAULT_DOUBLE_WIDTH]; + uint64_t defBackup[DEFAULT_DOUBLE_WIDTH]; + uint64_t* f; + uint64_t* fNoOverflow; + uint64_t* g; + uint64_t* tempBackup; + uint32_t overflow; jdouble result; - IDATA index = 1; + int32_t index = 1; int unprocessedDigits = 0; f = def; @@ -173,7 +170,7 @@ static jdouble createDouble(JNIEnv* env, const char* s, jint e) { * back out of it if there is no more room, i.e. index > * MAX_DOUBLE_ACCURACY_WIDTH. */ - memcpy (fNoOverflow, f, sizeof (U_64) * index); + memcpy (fNoOverflow, f, sizeof (uint64_t) * index); overflow = simpleAppendDecimalDigitHighPrecision (f, index, *s - '0'); if (overflow) @@ -187,7 +184,7 @@ static jdouble createDouble(JNIEnv* env, const char* s, jint e) { if (index >= MAX_DOUBLE_ACCURACY_WIDTH) { index--; - memcpy (f, fNoOverflow, sizeof (U_64) * index); + memcpy (f, fNoOverflow, sizeof (uint64_t) * index); break; } if (tempBackup) @@ -253,8 +250,8 @@ static jdouble createDouble(JNIEnv* env, const char* s, jint e) { } -static jdouble createDouble1(JNIEnv* env, U_64* f, IDATA length, jint e) { - IDATA numBits; +static jdouble createDouble1(JNIEnv* env, uint64_t* f, int32_t length, jint e) { + int32_t numBits; jdouble result; static const jint APPROX_MIN_MAGNITUDE = -309; @@ -342,14 +339,14 @@ static jdouble createDouble1(JNIEnv* env, U_64* f, IDATA length, jint e) { * is currently set such that if the oscillation occurs more than twice * then return the original approximation. */ -static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z) { - U_64 m; - IDATA k, comparison, comparison2; - U_64* x; - U_64* y; - U_64* D; - U_64* D2; - IDATA xLength, yLength, DLength, D2Length, decApproxCount, incApproxCount; +static jdouble doubleAlgorithm(JNIEnv*, uint64_t* f, int32_t length, jint e, jdouble z) { + uint64_t m; + int32_t k, comparison, comparison2; + uint64_t* x; + uint64_t* y; + uint64_t* D; + uint64_t* D2; + int32_t xLength, yLength, DLength, D2Length, decApproxCount, incApproxCount; x = y = D = D2 = 0; xLength = yLength = DLength = D2Length = 0; @@ -363,21 +360,21 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z if (x && x != f) free(x); - release (y); - release (D); - release (D2); + free(y); + free(D); + free(D2); if (e >= 0 && k >= 0) { xLength = sizeOfTenToTheE (e) + length; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); timesTenToTheEHighPrecision (x, xLength, e); yLength = (k >> 6) + 2; allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; simpleShiftLeftHighPrecision (y, yLength, k); } @@ -385,8 +382,8 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z { xLength = sizeOfTenToTheE (e) + length + ((-k) >> 6) + 1; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); timesTenToTheEHighPrecision (x, xLength, e); simpleShiftLeftHighPrecision (x, xLength, -k); @@ -401,7 +398,7 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z yLength = sizeOfTenToTheE (-e) + 2 + (k >> 6); allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); simpleShiftLeftHighPrecision (y, yLength, k); @@ -410,13 +407,13 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z { xLength = length + ((-k) >> 6) + 1; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); simpleShiftLeftHighPrecision (x, xLength, -k); yLength = sizeOfTenToTheE (-e) + 1; allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); } @@ -426,14 +423,14 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z { /* x > y */ DLength = xLength; allocateU64 (D, DLength); - memcpy (D, x, DLength * sizeof (U_64)); + memcpy (D, x, DLength * sizeof (uint64_t)); subtractHighPrecision (D, DLength, y, yLength); } else if (comparison) { /* y > x */ DLength = yLength; allocateU64 (D, DLength); - memcpy (D, y, DLength * sizeof (U_64)); + memcpy (D, y, DLength * sizeof (uint64_t)); subtractHighPrecision (D, DLength, x, xLength); } else @@ -508,18 +505,18 @@ static jdouble doubleAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jdouble z if (x && x != f) free(x); - release (y); - release (D); - release (D2); + free(y); + free(D); + free(D2); return z; OutOfMemory: if (x && x != f) free(x); - release (y); - release (y); - release (D); - release (D2); + free(y); + free(y); + free(D); + free(D2); DOUBLE_TO_LONGBITS (z) = -2; @@ -532,10 +529,10 @@ OutOfMemory: #define DEFAULT_FLOAT_WIDTH MAX_FLOAT_ACCURACY_WIDTH -static jfloat createFloat1(JNIEnv* env, U_64* f, IDATA length, jint e); -static jfloat floatAlgorithm(JNIEnv* env, U_64* f, IDATA length, jint e, jfloat z); +static jfloat createFloat1(JNIEnv* env, uint64_t* f, int32_t length, jint e); +static jfloat floatAlgorithm(JNIEnv* env, uint64_t* f, int32_t length, jint e, jfloat z); -static const U_32 float_tens[] = { +static const uint32_t float_tens[] = { 0x3f800000, 0x41200000, 0x42c80000, @@ -558,7 +555,6 @@ static const U_32 float_tens[] = { #define FLOAT_MANTISSA_MASK (0x007FFFFF) #define FLOAT_EXPONENT_MASK (0x7F800000) #define FLOAT_NORMAL_MASK (0x00800000) -#define FLOAT_TO_INTBITS(flt) (*((U_32 *)(&flt))) /* Keep a count of the number of times we decrement and increment to * approximate the double, and attempt to detect the case where we @@ -600,15 +596,15 @@ static const U_32 float_tens[] = { static jfloat createFloat(JNIEnv* env, const char* s, jint e) { /* assumes s is a null terminated string with at least one * character in it */ - U_64 def[DEFAULT_FLOAT_WIDTH]; - U_64 defBackup[DEFAULT_FLOAT_WIDTH]; - U_64* f; - U_64* fNoOverflow; - U_64* g; - U_64* tempBackup; - U_32 overflow; + uint64_t def[DEFAULT_FLOAT_WIDTH]; + uint64_t defBackup[DEFAULT_FLOAT_WIDTH]; + uint64_t* f; + uint64_t* fNoOverflow; + uint64_t* g; + uint64_t* tempBackup; + uint32_t overflow; jfloat result; - IDATA index = 1; + int32_t index = 1; int unprocessedDigits = 0; f = def; @@ -623,7 +619,7 @@ static jfloat createFloat(JNIEnv* env, const char* s, jint e) { * back out of it if there is no more room, i.e. index > * MAX_FLOAT_ACCURACY_WIDTH. */ - memcpy (fNoOverflow, f, sizeof (U_64) * index); + memcpy (fNoOverflow, f, sizeof (uint64_t) * index); overflow = simpleAppendDecimalDigitHighPrecision (f, index, *s - '0'); if (overflow) @@ -638,7 +634,7 @@ static jfloat createFloat(JNIEnv* env, const char* s, jint e) { if (index >= MAX_FLOAT_ACCURACY_WIDTH) { index--; - memcpy (f, fNoOverflow, sizeof (U_64) * index); + memcpy (f, fNoOverflow, sizeof (uint64_t) * index); break; } if (tempBackup) @@ -680,7 +676,7 @@ static jfloat createFloat(JNIEnv* env, const char* s, jint e) { } else { - result = *(jfloat *) & index; + result = INTBITS_TO_FLOAT(index); } } else @@ -691,7 +687,7 @@ static jfloat createFloat(JNIEnv* env, const char* s, jint e) { } else { - result = *(jfloat *) & index; + result = INTBITS_TO_FLOAT(index); } } @@ -699,8 +695,8 @@ static jfloat createFloat(JNIEnv* env, const char* s, jint e) { } -static jfloat createFloat1 (JNIEnv* env, U_64* f, IDATA length, jint e) { - IDATA numBits; +static jfloat createFloat1 (JNIEnv* env, uint64_t* f, int32_t length, jint e) { + int32_t numBits; jdouble dresult; jfloat result; @@ -733,8 +729,8 @@ static jfloat createFloat1 (JNIEnv* env, U_64* f, IDATA length, jint e) { else if (e > -309) { int dexp; - U_32 fmant, fovfl; - U_64 dmant; + uint32_t fmant, fovfl; + uint64_t dmant; dresult = toDoubleHighPrecision (f, length) / pow (10.0, (double) -e); if (IS_DENORMAL_DBL (dresult)) { @@ -754,8 +750,8 @@ static jfloat createFloat1 (JNIEnv* env, U_64* f, IDATA length, jint e) { if ((dexp <= -127) && (dexp > -155)) { /* Only interested in 24 msb bits of the 53-bit double mantissa */ - fmant = (U_32) (dmant >> 29); - fovfl = ((U_32) (dmant & 0x1FFFFFFF)) << 3; + fmant = (uint32_t) (dmant >> 29); + fovfl = ((uint32_t) (dmant & 0x1FFFFFFF)) << 3; while ((dexp < -127) && ((fmant | fovfl) != 0)) { if ((fmant & 1) != 0) @@ -826,15 +822,15 @@ static jfloat createFloat1 (JNIEnv* env, U_64* f, IDATA length, jint e) { * is currently set such that if the oscillation occurs more than twice * then return the original approximation. */ -static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { - U_64 m; - IDATA k, comparison, comparison2; - U_64* x; - U_64* y; - U_64* D; - U_64* D2; - IDATA xLength, yLength, DLength, D2Length; - IDATA decApproxCount, incApproxCount; +static jfloat floatAlgorithm(JNIEnv*, uint64_t* f, int32_t length, jint e, jfloat z) { + uint64_t m; + int32_t k, comparison, comparison2; + uint64_t* x; + uint64_t* y; + uint64_t* D; + uint64_t* D2; + int32_t xLength, yLength, DLength, D2Length; + int32_t decApproxCount, incApproxCount; x = y = D = D2 = 0; xLength = yLength = DLength = D2Length = 0; @@ -848,21 +844,21 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { if (x && x != f) free(x); - release (y); - release (D); - release (D2); + free(y); + free(D); + free(D2); if (e >= 0 && k >= 0) { xLength = sizeOfTenToTheE (e) + length; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); timesTenToTheEHighPrecision (x, xLength, e); yLength = (k >> 6) + 2; allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; simpleShiftLeftHighPrecision (y, yLength, k); } @@ -870,8 +866,8 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { { xLength = sizeOfTenToTheE (e) + length + ((-k) >> 6) + 1; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); timesTenToTheEHighPrecision (x, xLength, e); simpleShiftLeftHighPrecision (x, xLength, -k); @@ -886,7 +882,7 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { yLength = sizeOfTenToTheE (-e) + 2 + (k >> 6); allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); simpleShiftLeftHighPrecision (y, yLength, k); @@ -895,13 +891,13 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { { xLength = length + ((-k) >> 6) + 1; allocateU64 (x, xLength); - memset (x + length, 0, sizeof (U_64) * (xLength - length)); - memcpy (x, f, sizeof (U_64) * length); + memset (x + length, 0, sizeof (uint64_t) * (xLength - length)); + memcpy (x, f, sizeof (uint64_t) * length); simpleShiftLeftHighPrecision (x, xLength, -k); yLength = sizeOfTenToTheE (-e) + 1; allocateU64 (y, yLength); - memset (y + 1, 0, sizeof (U_64) * (yLength - 1)); + memset (y + 1, 0, sizeof (uint64_t) * (yLength - 1)); *y = m; timesTenToTheEHighPrecision (y, yLength, -e); } @@ -911,14 +907,14 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { { /* x > y */ DLength = xLength; allocateU64 (D, DLength); - memcpy (D, x, DLength * sizeof (U_64)); + memcpy (D, x, DLength * sizeof (uint64_t)); subtractHighPrecision (D, DLength, y, yLength); } else if (comparison) { /* y > x */ DLength = yLength; allocateU64 (D, DLength); - memcpy (D, y, DLength * sizeof (U_64)); + memcpy (D, y, DLength * sizeof (uint64_t)); subtractHighPrecision (D, DLength, x, xLength); } else @@ -993,17 +989,17 @@ static jfloat floatAlgorithm(JNIEnv*, U_64* f, IDATA length, jint e, jfloat z) { if (x && x != f) free(x); - release (y); - release (D); - release (D2); + free(y); + free(D); + free(D2); return z; OutOfMemory: if (x && x != f) free(x); - release (y); - release (D); - release (D2); + free(y); + free(D); + free(D2); FLOAT_TO_INTBITS (z) = -2; @@ -1021,9 +1017,9 @@ static jfloat FloatingPointParser_parseFltImpl(JNIEnv* env, jclass, jstring s, j } jfloat flt = createFloat(env, str.c_str(), e); - if (((I_32) FLOAT_TO_INTBITS (flt)) >= 0) { + if (((int32_t) FLOAT_TO_INTBITS (flt)) >= 0) { return flt; - } else if (((I_32) FLOAT_TO_INTBITS (flt)) == (I_32) - 1) { + } else if (((int32_t) FLOAT_TO_INTBITS (flt)) == (int32_t) - 1) { jniThrowException(env, "java/lang/NumberFormatException", NULL); } else { jniThrowException(env, "java/lang/OutOfMemoryError", NULL); @@ -1040,7 +1036,7 @@ static jdouble FloatingPointParser_parseDblImpl(JNIEnv* env, jclass, jstring s, if (!ERROR_OCCURED (dbl)) { return dbl; - } else if (LOW_I32_FROM_VAR (dbl) == (I_32) - 1) { + } else if (LOW_I32_FROM_VAR (dbl) == (int32_t) - 1) { jniThrowException(env, "java/lang/NumberFormatException", NULL); } else { jniThrowException(env, "java/lang/OutOfMemoryError", NULL); -- 2.11.0