The problem is that you mix up two different types, i.e., int and char. You cannot simply convert a binary representation of an int to a single char because they have a different number of bytes used to represent them. With a char, you can represent only 256 different numbers.
Below I post a code example that converts an int to a binary representation and from this back to an int.
decToBin(int val, int* binaryNum) gets a value that is converted from decimal to binary. binaryNum is an array to store the result.
binToDec(int* binaryNum) converts the binary representation to an int and returns it
clearBinNum(int* binaryNum) clears the binary representation wiht 0
printBinNumber(int* binaryNum) is a helper to print the binary representation
binToChar(int* binaryNum) converts a binary representation to a char and returns it
charToDec(char charNum) converts a char to a decimal representation using only bit-operations
#include <iostream>
using namespace std;
// This is the number of bits used to
// represent an variable of type int.
int bit_pos_count = sizeof(int) * 8;
// ---------------------------------------------------------- //
void
decToBin(int val, int* binaryNum) {
int bit_pos = 0;
while (val > 0) {
if (val % 2 == 1)
binaryNum[bit_pos] = 1;
else
binaryNum[bit_pos] = 0;
val = val / 2; // integer division
++bit_pos;
}
}
// ---------------------------------------------------------- //
void
clearBinNumber(int* binaryNum) {
for (int i = 0; i < bit_pos_count; ++i)
binaryNum[i] = 0;
}
// ---------------------------------------------------------- //
void
printBinNumber(int* binaryNum) {
for (int i = bit_pos_count - 1; i >= 0; --i)
cout << binaryNum[i];
}
// ---------------------------------------------------------- //
int
binToDec(int* binaryNum) {
int result = 0;
for (int i = bit_pos_count - 1; i >= 0; --i) {
result = result << 1;
if (binaryNum[i] == 1)
result = result | 1;
}
return result;
}
// ---------------------------------------------------------- //
char
binToChar(int* binaryNum) {
int char_size = sizeof(char) * 8;
char result = 0;
for (int i = char_size - 1; i >= 0; --i) {
result = result << 1;
if (binaryNum[i] == 1)
result = result | 1;
}
return result;
}
// ---------------------------------------------------------- //
int
charToDec(char charNum) {
int char_size = sizeof(char) * 8;
char result = 0;
int compare = 1;
for (int i = 0; i < char_size; ++i) {
if ((charNum & compare) != 0)
result = result | compare;
compare = compare * 2;
}
return result;
}
// ---------------------------------------------------------- //
int
main(int argc, char const* argv[]) {
cout << bit_pos_count << endl;
int* binaryNum = new int[bit_pos_count];
// printable chars start at 33
for (int i = 33; i < 66; i += 1) {
decToBin(i, binaryNum);
char charNum = binToChar(binaryNum);
cout << "decimal: " << i << endl;
cout << "binary: ";
printBinNumber(binaryNum);
cout << endl;
cout << "decimal from binary: " << binToDec(binaryNum) << endl;
cout << "char from binary: " << charNum << endl;
cout << "decimal from char: " << charToDec(charNum) << endl;
cout << "---" << endl;
}
delete[] binaryNum;
}
decimal: 33
binary: 00000000000000000000000000100001
decimal from binary: 33
char from binary: !
decimal from char: 33
---
decimal: 34
binary: 00000000000000000000000000100010
decimal from binary: 34
char from binary: "
decimal from char: 34
---
decimal: 35
binary: 00000000000000000000000000100011
decimal from binary: 35
char from binary: #
decimal from char: 35
---
int i = static_cast<int>(c);?inthas no "decimal form" or anything like that. It has a value, and that value can be represented using binary, decimal, hexadecimal, etc. The loop you present could be replaced withc = <source_int>;and you can do the same with anotherintvariable.