#include <cstdint>
#include <iostream>

int32_t interpret24bitAsInt32(unsigned char* bytes) {
  int32_t const number =
        (bytes[0] << INT32_C(16))
      | (bytes[1] << INT32_C(8))
      |  bytes[2];
  int32_t const correction = 
     (bytes[0] >> UINT8_C(7)) << INT32_C(24);
  return number - correction;
}

int main() {
  unsigned char n1[] = { 0x80, 0, 0};
  std::cout << interpret24bitAsInt32(n1) << std::endl;
  unsigned char n2[] = { 0x7F, 0xFF, 0xFF};
  std::cout << interpret24bitAsInt32(n2) << std::endl;
}