Convert this python code to C++
def unpack_pixels(packed_pixels, bit_depth, img_shape):
"""
Unpacks a bytearray of pixels into a NumPy array according to the specified bit depth.
:param packed_pixels: bytearray of packed pixels.
:param bit_depth: Bit depth used for packing.
:param img_shape: Shape of the image (height, width).
:return: Unpacked NumPy array of the image.
max_val = 2 ** bit_depth - 1
pixels_per_byte = 8 // bit_depth
unpacked_pixels = np.zeros(img_shape, dtype=np.uint8)
for i in range(img_shape[0]):
for j in range(img_shape[1]):
if pixel_idx == pixels_per_byte:
if byte_idx < len(packed_pixels):
byte = packed_pixels[byte_idx]
# Extract the pixel and scale back to 8-bit
pixel = ((byte >> (8 - bit_depth - (pixel_idx * bit_depth))) & mask) * (255 // max_val)
unpacked_pixels[i, j] = pixel
I want it to be as close as possible to this existing C++ code that only unpack 4-bit per pixel values:
// Assuming each byte in ‘input’ contains two 4-bit pixel values,
// this function unpacks them into a 256x256 grayscale image array.
void unpackPixelData(const std::vector<uint8_t>& input, mxl::Variant* outArray, uint width, uint height)
{
size_t idx = 0;
for (uint i = 0; i < width; i+= 2) {
for (uint j = 0; j < height; j ++) {
if (idx < input.size()) {
uint8_t byte = input[idx++];
int16_t pixel1 = (byte >> 4) * 16; // Extract the first 4 bits and scale to 0-255
int16_t pixel2 = (byte & 0x0F) * 16; // Extract the last 4 bits and scale to 0-255
outArray[id + 1] = pixel2;
}