将十进制值转换为32位浮点十六进制 [英] Converting a decimal value to a 32bit floating-point hexadecimal
问题描述
$ p $函数floatToIntBits(f){
var NAN_BITS = 0 | 0x7FC00000;
var INF_BITS = 0 | 0x7F800000;
var ZERO_BITS = 0 | 0x00000000;
var SIGN_BIT = 0 | 0x80000000;
var EXP_MASK = 0 | 0x7F800000;
var MANT_MASK = 0 | 0x007FFFFF;
if(f!= f)
return NAN_BITS;
var signBit =(f> 0.0 ||(f == 0.0& Math.pow(f,-1)> 0))? 0:SIGN_BIT;
var fabs = Math.abs(f);
if(fabs == Number.POSITIVE_INFINITY)
return signBit | INF_BITS;
if(fabs == 0.0)
返回signBit | ZERO_BITS;
var e = 0,x = f;
while(x!= 0.0){
e ++;
x / = 2.0;
}
var exp = e - (1023 + 52);
if(exp> = 127)// XXX:可能不正确
返回signBit | INF_BITS;
if(exp <= -126)// XXX:可能不正确
返回signBit | ZERO_BITS;
var ceil = Math.pow(2.0,exp);
//console.log(\"fabs,fabs,ceil,ceil);
var mantissa = fabs / ceil * Math.pow(2.0,24);
if(fabs == ceil){
mantissa = 0;
} else {
exp--;
}
var expBits =((exp + 127)<< 23)& EXP_MASK;
var mantissaBits =尾数& MANT_MASK;
//console.log(\"sign,signBit,expBits,expBits.toString(16),mantissaBits,mantissaBits.toString(16));
返回signBit | expBits | mantissaBits;
函数testCase(expected,f){
var actual = floatToIntBits(f);
if(expected!== actual){
console.log(expected,expected.toString(16),actual,actual.toString(16),f,f);
}
}
testCase(0 | 0x80000000,-0.0);
testCase(0 | 0x00000000,0.0);
testCase(0 | 0x3F800000,1.0);
testCase(0 | 0x42C80000,100.0);
testCase(0 | 0x7FC00000,0.0 / 0.0);
testCase(0 | 0x7F800000,1.0 / 0.0);
testCase(0 | 0xFF800000,1.0 / -0.0);
有趣的 0 | 0x ...
表达式是必须的,因为JavaScript将这些文字数字视为大正整数,但应用按位运算符显然会将它们转换为带符号的32位整数。 (比较ECMAScript规范,第8.5节,最后一段)。
更新:以下代码基于上面的代码,但它是更符合规范的实际措辞。此外,它独立于用来实现JavaScript的 Number
的特定浮点类型。代码首先将值移到区间[1.0; 2.0),因为这是IEEE 754-1985中提到的归一化数字的表示。这个代码也能正确处理非规范化的数字,它所使用的所有操作都是在IEEE 754-1985中定义的,并且是精确的,也就是说它们不会失去精度。
function assert(cond,msg,arg0){
if(!cond)
console.log(error,msg,arg0);
函数floatToIntBits(f){
var NAN_BITS = 0 | 0x7FC00000;
var INF_BITS = 0 | 0x7F800000;
var ZERO_BITS = 0 | 0x00000000;
var SIGN_MASK = 0 | 0x80000000;
var EXP_MASK = 0 | 0x7F800000;
var MANT_MASK = 0 | 0x007FFFFF;
var MANT_MAX = Math.pow(2.0,23) - 1.0;
if(f!= f)
return NAN_BITS;
var hasSign = f < 0.0 || (f == 0.0&& 1.0 / f< 0);
var signBits = hasSign? SIGN_MASK:0;
var fabs = Math.abs(f);
if(fabs == Number.POSITIVE_INFINITY)
return signBits | INF_BITS;
var exp = 0,x = fabs;
while(x> = 2.0&& exp< = 127){
exp ++;
x / = 2.0;
while(x <1.0& exp> = -126){
exp--;
x * = 2.0;
}
assert(x * Math.pow(2.0,exp)== fabs,fabs);
var biasedExp = exp + 127;
assert(0 <= biasedExp&biasedExp≤254,biasedExp);
if(biasedExp == 255)
返回signBit | INF_BITS;
assert(0.0 <= x&& x< 2.0,x in [0.0,1.0),x);
if(biasedExp == 0)
var mantissa = x * Math.pow(2.0,23)/ 2.0;
assert(1.0 <= x&& x< 2.0,x in [0.5; 1.0),x);
var mantissa = x * Math.pow(2.0,23) - Math.pow(2.0,23); (尾数),尾数);(b)(b)(b)(b)(b)
$ b $ //console.log(\"number,f,x,x,biasedExp,biasedExp,mantissa,mantissa.toString(16));
var expBits =(biasedExp << 23)& EXP_MASK;
var mantissaBits =尾数& MANT_MASK; (16),expBits,expBits.toString(16),mantissaBits,mantissaBits.toString( 16));
返回signBits | expBits | mantissaBits;
函数testCase(expected,f){
var actual = floatToIntBits(f);
if(expected!== actual){
console.log(error,number,f,expected,expected.toString(16),got,actual.toString( 16));
}
}
testCase(0 | 0xFF800000,1.0 / -0.0); // -Inf
testCase(0 | 0xBF800000,-1.0);
testCase(0 | 0x80000000,-0.0);
testCase(0 | 0x00000000,0.0);
testCase(0 | 0x00000001,Math.pow(2.0, - (126 + 23))); //最小非规格化
testCase(0 | 0x007FFFFF,Math.pow(2.0,-126) - Math.pow(2.0, - (126 + 23))); //最大非规范化
testCase(0 | 0x00800000,Math.pow(2.0,-126)); //最小规格化浮点数
testCase(0 | 0x3F800000,1.0);
testCase(0 | 0x42C80000,100.0);
testCase(0 | 0x7F800000,1.0 / 0.0); // Inf
testCase(0 | 0x7FC00000,0.0 / 0.0); // NaN
For a simple utility I'm working on, I need a script that converts a given decimal value to a 32bit floating-point hexadecimal value. For example, I know 1 is 3F800000 and 100 is 42C80000, however I don't know how to return these results with any number. If somebody knows a simple formula or even a complex way to go about doing this, please share.
I don't know if I got the corner cases correctly, but anyway, here is some code:
function floatToIntBits(f) {
var NAN_BITS = 0|0x7FC00000;
var INF_BITS = 0|0x7F800000;
var ZERO_BITS = 0|0x00000000;
var SIGN_BIT = 0|0x80000000;
var EXP_MASK = 0|0x7F800000;
var MANT_MASK = 0|0x007FFFFF;
if (f != f)
return NAN_BITS;
var signBit = (f > 0.0 || (f == 0.0 && Math.pow(f, -1) > 0)) ? 0 : SIGN_BIT;
var fabs = Math.abs(f);
if (fabs == Number.POSITIVE_INFINITY)
return signBit | INF_BITS;
if (fabs == 0.0)
return signBit | ZERO_BITS;
var e = 0, x = f;
while (x != 0.0) {
e++;
x /= 2.0;
}
var exp = e - (1023 + 52);
if (exp >= 127) // XXX: maybe incorrect
return signBit | INF_BITS;
if (exp <= -126) // XXX: maybe incorrect
return signBit | ZERO_BITS;
var ceil = Math.pow(2.0, exp);
//console.log("fabs", fabs, "ceil", ceil);
var mantissa = fabs / ceil * Math.pow(2.0, 24);
if (fabs == ceil) {
mantissa = 0;
} else {
exp--;
}
var expBits = ((exp + 127) << 23) & EXP_MASK;
var mantissaBits = mantissa & MANT_MASK;
//console.log("sign", signBit, "expBits", expBits.toString(16), "mantissaBits", mantissaBits.toString(16));
return signBit | expBits | mantissaBits;
}
function testCase(expected, f) {
var actual = floatToIntBits(f);
if (expected !== actual) {
console.log("expected", expected.toString(16), "actual", actual.toString(16), "f", f);
}
}
testCase(0|0x80000000, -0.0);
testCase(0|0x00000000, 0.0);
testCase(0|0x3F800000, 1.0);
testCase(0|0x42C80000, 100.0);
testCase(0|0x7FC00000, 0.0 / 0.0);
testCase(0|0x7F800000, 1.0 / 0.0);
testCase(0|0xFF800000, 1.0 / -0.0);
The funny-looking 0|0x...
expressions are necessary because JavaScript treats these literal numbers as being large positive integers, but applying a bitwise operator apparently converts them to signed 32-bit ints. (Compare the ECMAScript specification, section 8.5, last paragraph.)
Update: The following code is based on the above code, but it is more aligned to the actual wording of the specification. Additionally, it is independent of the particular floating point type that is used to implement JavaScript's Number
. The code first moves the value to the interval [1.0; 2.0), since this is the representation that is mentioned in IEEE 754-1985 for normalized numbers. This code also handles denormalized numbers correctly and all the operations it uses are defined in IEEE 754-1985 and are exact, that is they don't lose precision.
function assert(cond, msg, arg0) {
if (!cond)
console.log("error", msg, arg0);
}
function floatToIntBits(f) {
var NAN_BITS = 0|0x7FC00000;
var INF_BITS = 0|0x7F800000;
var ZERO_BITS = 0|0x00000000;
var SIGN_MASK = 0|0x80000000;
var EXP_MASK = 0|0x7F800000;
var MANT_MASK = 0|0x007FFFFF;
var MANT_MAX = Math.pow(2.0, 23) - 1.0;
if (f != f)
return NAN_BITS;
var hasSign = f < 0.0 || (f == 0.0 && 1.0 / f < 0);
var signBits = hasSign ? SIGN_MASK : 0;
var fabs = Math.abs(f);
if (fabs == Number.POSITIVE_INFINITY)
return signBits | INF_BITS;
var exp = 0, x = fabs;
while (x >= 2.0 && exp <= 127) {
exp++;
x /= 2.0;
}
while (x < 1.0 && exp >= -126) {
exp--;
x *= 2.0;
}
assert(x * Math.pow(2.0, exp) == fabs, "fabs");
var biasedExp = exp + 127;
assert(0 <= biasedExp && biasedExp <= 254, biasedExp);
if (biasedExp == 255)
return signBit | INF_BITS;
if (biasedExp == 0) {
assert(0.0 <= x && x < 2.0, "x in [0.0, 1.0)", x);
var mantissa = x * Math.pow(2.0, 23) / 2.0;
} else {
assert(1.0 <= x && x < 2.0, "x in [0.5; 1.0)", x);
var mantissa = x * Math.pow(2.0, 23) - Math.pow(2.0, 23);
}
assert(0.0 <= mantissa && mantissa <= MANT_MAX, "mantissa in [0.0, 2^23)", mantissa);
//console.log("number", f, "x", x, "biasedExp", biasedExp, "mantissa", mantissa.toString(16));
var expBits = (biasedExp << 23) & EXP_MASK;
var mantissaBits = mantissa & MANT_MASK;
//console.log("number", f, "sign", signBits.toString(16), "expBits", expBits.toString(16), "mantissaBits", mantissaBits.toString(16));
return signBits | expBits | mantissaBits;
}
function testCase(expected, f) {
var actual = floatToIntBits(f);
if (expected !== actual) {
console.log("error", "number", f, "expected", expected.toString(16), "got", actual.toString(16));
}
}
testCase(0|0xFF800000, 1.0 / -0.0); // -Inf
testCase(0|0xBF800000, -1.0);
testCase(0|0x80000000, -0.0);
testCase(0|0x00000000, 0.0);
testCase(0|0x00000001, Math.pow(2.0, -(126 + 23))); // minimum denormalized
testCase(0|0x007FFFFF, Math.pow(2.0, -126) - Math.pow(2.0, -(126 + 23))); // maximum denormalized
testCase(0|0x00800000, Math.pow(2.0, -126)); // minimum normalized float
testCase(0|0x3F800000, 1.0);
testCase(0|0x42C80000, 100.0);
testCase(0|0x7F800000, 1.0 / 0.0); // Inf
testCase(0|0x7FC00000, 0.0 / 0.0); // NaN
这篇关于将十进制值转换为32位浮点十六进制的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!