I'm looking for a function in C++ that can determine, how accurate all floating point numbers in a given range can be represented as strings, without the use of e.g. boost library, etc.
Please take a look at my code and tell me how it can be improved.
#include <iostream>
#include <cmath>
#include <cfloat>
/**
* Calculates the scale of a given string.
*
* @param str the string to calculate the scale of
*
* @return the scale of the string
*
* @throws None
*/
int get_scale_of(const std::string &str)
{
int scale = 0;
for (int i = str.length() - 1; i >= 0; i--)
{
if (str[i] != '0')
{
break;
}
scale++;
}
return str.length() - str.find('.') - scale - 1;
}
/**
* Calculates the maximum precision in the range of floating-point numbers.
*
* @param min The minimum value of the range.
* @param max The maximum value of the range.
*
* @return The maximum precision in the range.
*
* @throws None.
*/
int get_max_precision_in_range_for_float(const int min, const int max)
{
int precision = 100;
float f = min;
while (f < max)
{
float n = std::nextafter(f, FLT_MAX);
std::string s1 = std::to_string(f);
std::string s2 = std::to_string(n);
while (s1 == s2)
{
n = std::nextafter(n, FLT_MAX);
s2 = std::to_string(n);
}
int p = std::max(get_scale_of(s1), get_scale_of(s2));
if (p < precision)
{
precision = p;
// Debug outputs:
std::cout << "s1 = " << s1 << "\n";
std::cout << "s2 = " << s2 << "\n";
}
f = n;
}
return precision;
}
int main(int argc, char const *argv[])
{
// Some examples:
std::cout << get_max_precision_in_range_for_float(1, 2) << " digits accuracy\n";
std::cout << get_max_precision_in_range_for_float(7, 8) << " digits accuracy\n";
std::cout << get_max_precision_in_range_for_float(500000, 600000) << " digits accuracy\n";
return 0;
}