#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Calculate a base-2 logarithm using division and square root operations.
In theory this code should work with things that aren’t Python-native
floating-point numbers if you give it the frexp and ldexp functions
necessary.
Naturally enough, it requires one iteration per bit of the result, and
that iteration itself involves a square root. So it involves 52 or 53
iterations for IEEE-488 double-precision floats. More troubling, I
fear it may be suffering some cumulative numerical precision loss from
the long chain of square-root operations; for most numbers, its result
matches that of the standard math library, but e.g. for
1.000000000000001 it gets lg(1.0) = 1.33226762955e-15 (should be
1.60171325191e-15).
So this isn’t a very practical algorithm for on-line use. It might
make more sense to use the Taylor series for 2**x from √½ to √2, or
splines between a bunch of points, or something.
"""
import sys
import math
def debug(msg):
sys.stderr.write('%r\n' % msg)
def lg(x, frexp=math.frexp, ldexp=math.ldexp, debug=lambda x: 37):
assert x > 0
m, e = frexp(x)
floor_lg = e - 1
ceil_lg = e
floor = ldexp(1, floor_lg)
ceil = ldexp(1, ceil_lg)
iter_count = 0
while True:
iter_count += 1
mid_lg = floor_lg + (ceil_lg - floor_lg) / 2.0
mid = (floor*ceil)**0.5
if mid == floor or mid == ceil:
break
if mid < x:
floor, floor_lg = mid, mid_lg
else:
ceil, ceil_lg = mid, mid_lg
debug("%d iters for %r" % (iter_count, x))
return floor_lg
if __name__ == '__main__':
x = float(sys.argv[1])
print "lg(%s) = %s" % (x, lg(x)),
print " (should be %s)" % (math.log(x)/math.log(2))