You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
TermApp/app/src/main/cpp/DngCreator.cpp

2640 lines
96 KiB
C++

9 months ago
//#define LOG_NDEBUG 0
#define LOG_TAG "DngCreator_JNI"
#include <inttypes.h>
#include <string.h>
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include <cmath>
#include <algorithm>
#include <camera/NdkCameraMetadata.h>
#include <img_utils/DngUtils.h>
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffWriter.h>
#include <img_utils/Output.h>
#include <img_utils/Input.h>
#include <img_utils/StripSource.h>
#include <sys/system_properties.h>
#include "DngCreator.h"
// #include "core_jni_helpers.h"
// #include "android_runtime/AndroidRuntime.h"
// #include "android_runtime/android_hardware_camera2_CameraMetadata.h"
#include <jni.h>
// #include <nativehelper/JNIHelp.h>
using namespace android;
using namespace img_utils;
// using android::base::GetProperty;
ByteVectorOutput::ByteVectorOutput(std::vector<uint8_t>& buf) : m_buf(buf)
{
}
ByteVectorOutput::~ByteVectorOutput()
{
}
status_t ByteVectorOutput::open()
{
return OK;
}
status_t ByteVectorOutput::close()
{
return OK;
}
status_t ByteVectorOutput::write(const uint8_t* buf, size_t offset, size_t count)
{
m_buf.insert(m_buf.end(), buf + offset, buf + offset + count);
return OK;
}
ByteVectorInput::ByteVectorInput(const std::vector<uint8_t>& buf) : m_buf(buf), m_offset(0)
{
}
ByteVectorInput::~ByteVectorInput()
{
}
status_t ByteVectorInput::open()
{
return OK;
}
ssize_t ByteVectorInput::read(uint8_t* buf, size_t offset, size_t count)
{
if (m_buf.empty() || m_offset >= m_buf.size())
{
return NOT_ENOUGH_DATA;
}
size_t left = m_buf.size() - m_offset;
if (left >= count)
{
memcpy(buf + offset, &m_buf[m_offset], count);
m_offset += count;
return count;
}
else
{
memcpy(buf + offset, &m_buf[m_offset], left);
m_offset += left;
return left;
}
}
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t ByteVectorInput::skip(size_t count)
{
size_t left = m_buf.size() - m_offset;
if (left >= count)
{
m_offset += count;
return count;
}
else
{
m_offset += left;
return left;
}
}
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
status_t ByteVectorInput::close()
{
8 months ago
return OK;
9 months ago
}
ByteBufferInput::ByteBufferInput(const uint8_t* buf, size_t len) : m_buf(buf), m_len(len), m_offset(0)
{
}
ByteBufferInput::~ByteBufferInput()
{
}
status_t ByteBufferInput::open()
{
return OK;
}
ssize_t ByteBufferInput::read(uint8_t* buf, size_t offset, size_t count)
{
if (m_buf == NULL || m_offset >= m_len)
{
return NOT_ENOUGH_DATA;
}
size_t left = m_len - m_offset;
if (left >= count)
{
memcpy(buf + offset, m_buf + m_offset, count);
m_offset += count;
return count;
}
else
{
memcpy(buf + offset, m_buf + m_offset, left);
m_offset += left;
return left;
}
}
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t ByteBufferInput::skip(size_t count)
{
size_t left = m_len - m_offset;
if (left >= count)
{
m_offset += count;
return count;
}
else
{
m_offset += left;
return left;
}
}
status_t ByteBufferInput::close()
{
8 months ago
return OK;
9 months ago
}
/**
* Convert a single YUV pixel to RGB.
*/
static void yuvToRgb(const uint8_t yuvData[3], int outOffset, /*out*/uint8_t rgbOut[3]) {
const int COLOR_MAX = 255;
float y = yuvData[0] & 0xFF; // Y channel
float cb = yuvData[1] & 0xFF; // U channel
float cr = yuvData[2] & 0xFF; // V channel
// convert YUV -> RGB (from JFIF's "Conversion to and from RGB" section)
float r = y + 1.402f * (cr - 128);
float g = y - 0.34414f * (cb - 128) - 0.71414f * (cr - 128);
float b = y + 1.772f * (cb - 128);
// clamp to [0,255]
rgbOut[outOffset] = (uint8_t) std::max(0, std::min(COLOR_MAX, (int)r));
rgbOut[outOffset + 1] = (uint8_t) std::max(0, std::min(COLOR_MAX, (int)g));
rgbOut[outOffset + 2] = (uint8_t) std::max(0, std::min(COLOR_MAX, (int)b));
}
/**
* Convert a single {@link Color} pixel to RGB.
*/
static void colorToRgb(int color, int outOffset, /*out*/uint8_t rgbOut[3]) {
rgbOut[outOffset] = (uint8_t)(color >> 16) & 0xFF;
rgbOut[outOffset + 1] = (uint8_t)(color >> 8) & 0xFF; // color >> 80xFF
rgbOut[outOffset + 2] = (uint8_t) color & 0xFF;
// Discards Alpha
}
/**
* Generate a direct RGB {@link ByteBuffer} from a YUV420_888 {@link Image}.
*/
#if 0
static ByteBuffer convertToRGB(Image yuvImage) {
// TODO: Optimize this with renderscript intrinsic.
int width = yuvImage.getWidth();
int height = yuvImage.getHeight();
ByteBuffer buf = ByteBuffer.allocateDirect(BYTES_PER_RGB_PIX * width * height);
Image.Plane yPlane = yuvImage.getPlanes()[0];
Image.Plane uPlane = yuvImage.getPlanes()[1];
Image.Plane vPlane = yuvImage.getPlanes()[2];
ByteBuffer yBuf = yPlane.getBuffer();
ByteBuffer uBuf = uPlane.getBuffer();
ByteBuffer vBuf = vPlane.getBuffer();
yBuf.rewind();
uBuf.rewind();
vBuf.rewind();
int yRowStride = yPlane.getRowStride();
int vRowStride = vPlane.getRowStride();
int uRowStride = uPlane.getRowStride();
int yPixStride = yPlane.getPixelStride();
int vPixStride = vPlane.getPixelStride();
int uPixStride = uPlane.getPixelStride();
byte[] yuvPixel = { 0, 0, 0 };
byte[] yFullRow = new byte[yPixStride * (width - 1) + 1];
byte[] uFullRow = new byte[uPixStride * (width / 2 - 1) + 1];
byte[] vFullRow = new byte[vPixStride * (width / 2 - 1) + 1];
byte[] finalRow = new byte[BYTES_PER_RGB_PIX * width];
for (int i = 0; i < height; i++) {
int halfH = i / 2;
yBuf.position(yRowStride * i);
yBuf.get(yFullRow);
uBuf.position(uRowStride * halfH);
uBuf.get(uFullRow);
vBuf.position(vRowStride * halfH);
vBuf.get(vFullRow);
for (int j = 0; j < width; j++) {
int halfW = j / 2;
yuvPixel[0] = yFullRow[yPixStride * j];
yuvPixel[1] = uFullRow[uPixStride * halfW];
yuvPixel[2] = vFullRow[vPixStride * halfW];
yuvToRgb(yuvPixel, j * BYTES_PER_RGB_PIX, /*out*/finalRow);
}
buf.put(finalRow);
}
yBuf.rewind();
uBuf.rewind();
vBuf.rewind();
buf.rewind();
return buf;
}
#endif
DngCreator::DngCreator(ACameraMetadata* characteristics, ACameraMetadata* result) : NativeContext(characteristics, result)
{
// Find current time
time_t ts = time(NULL);
// Find boot time
// long bootTimeMillis = currentTime - SystemClock.elapsedRealtime();
// Find capture time (nanos since boot)
#if 0
Long timestamp = metadata.get(CaptureResult.SENSOR_TIMESTAMP);
long captureTime = currentTime;
if (timestamp != null) {
captureTime = timestamp / 1000000 + bootTimeMillis;
}
// Format for metadata
String formattedCaptureTime = sDateTimeStampFormat.format(captureTime);
#endif
std::string formattedCaptureTime;
init(characteristics, result, formattedCaptureTime);
}
#if 0
void DngCreator::setLocation(Location location)
{
double latitude = location.getLatitude();
double longitude = location.getLongitude();
long time = location.getTime();
int[] latTag = toExifLatLong(latitude);
int[] longTag = toExifLatLong(longitude);
String latRef = latitude >= 0 ? GPS_LAT_REF_NORTH : GPS_LAT_REF_SOUTH;
String longRef = longitude >= 0 ? GPS_LONG_REF_EAST : GPS_LONG_REF_WEST;
String dateTag = sExifGPSDateStamp.format(time);
mGPSTimeStampCalendar.setTimeInMillis(time);
int[] timeTag = new int[] { mGPSTimeStampCalendar.get(Calendar.HOUR_OF_DAY), 1,
mGPSTimeStampCalendar.get(Calendar.MINUTE), 1,
mGPSTimeStampCalendar.get(Calendar.SECOND), 1 };
nativeSetGpsTags(latTag, latRef, longTag, longRef, dateTag, timeTag);
}
#endif
void DngCreator::writeInputStream(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset)
{
int width = size.width;
int height = size.height;
if (width <= 0 || height <= 0) {
#if 0
throw new IllegalArgumentException("Size with invalid width, height: (" + width + "," +
height + ") passed to writeInputStream");
#endif
}
writeInputStream(dngOutput, pixels, width, height, offset);
}
void DngCreator::writeByteBuffer(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset)
{
int width = size.width;
int height = size.height;
writeByteBuffer(width, height, pixels, dngOutput, DEFAULT_PIXEL_STRIDE,
width * DEFAULT_PIXEL_STRIDE, offset);
}
#if 0
void DngCreator::writeImage(OutputStream& dngOutput, AImage& pixels)
{
int format = pixels.getFormat();
if (format != ImageFormat.RAW_SENSOR) {
}
Image.Plane[] planes = pixels.getPlanes();
if (planes == null || planes.length <= 0) {
}
ByteBuffer buf = planes[0].getBuffer();
writeByteBuffer(pixels.getWidth(), pixels.getHeight(), buf, dngOutput,
planes[0].getPixelStride(), planes[0].getRowStride(), 0);
}
#endif
void DngCreator::close() {
}
// private static final DateFormat sExifGPSDateStamp = new SimpleDateFormat(GPS_DATE_FORMAT_STR);
// private static final DateFormat sDateTimeStampFormat = new SimpleDateFormat(TIFF_DATETIME_FORMAT);
#if 0
static {
sDateTimeStampFormat.setTimeZone(TimeZone.getDefault());
sExifGPSDateStamp.setTimeZone(TimeZone.getTimeZone("UTC"));
}
#endif
/**
* Offset, rowStride, and pixelStride are given in bytes. Height and width are given in pixels.
*/
void DngCreator::writeByteBuffer(int width, int height, const std::vector<uint8_t>& pixels, std::vector<uint8_t>& dngOutput, int pixelStride, int rowStride, long offset)
{
if (width <= 0 || height <= 0) {
}
long capacity = pixels.capacity();
long totalSize = ((long) rowStride) * height + offset;
if (capacity < totalSize) {
#if 0
throw new IllegalArgumentException("Image size " + capacity +
" is too small (must be larger than " + totalSize + ")");
#endif
}
int minRowStride = pixelStride * width;
if (minRowStride > rowStride) {
#if 0
throw new IllegalArgumentException("Invalid image pixel stride, row byte width " +
minRowStride + " is too large, expecting " + rowStride);
#endif
}
// pixels.clear(); // Reset mark and limit
writeImage(dngOutput, width, height, pixels, rowStride, pixelStride, offset, true);
// pixels.clear();
}
/**
* Generate a direct RGB {@link ByteBuffer} from a {@link Bitmap}.
*/
#if 0
static ByteBuffer DngCreator::convertToRGB(Bitmap argbBitmap) {
// TODO: Optimize this.
int width = argbBitmap.getWidth();
int height = argbBitmap.getHeight();
ByteBuffer buf = ByteBuffer.allocateDirect(BYTES_PER_RGB_PIX * width * height);
int[] pixelRow = new int[width];
byte[] finalRow = new byte[BYTES_PER_RGB_PIX * width];
for (int i = 0; i < height; i++) {
argbBitmap.getPixels(pixelRow, /*offset*/0, /*stride*/width, /*x*/0, /*y*/i,
/*width*/width, /*height*/1);
for (int j = 0; j < width; j++) {
colorToRgb(pixelRow[j], j * BYTES_PER_RGB_PIX, /*out*/finalRow);
}
buf.put(finalRow);
}
buf.rewind();
return buf;
}
#endif
/**
* Convert coordinate to EXIF GPS tag format.
*/
void DngCreator::toExifLatLong(double value, int data[6])
{
// convert to the format dd/1 mm/1 ssss/100
value = std::abs(value);
data[0] = (int) value;
data[1] = 1;
value = (value - data[0]) * 60;
data[2] = (int) value;
data[3] = 1;
value = (value - data[2]) * 6000;
data[4] = (int) value;
data[5] = 100;
}
NativeContext::NativeContext(ACameraMetadata* characteristics, ACameraMetadata* result) :
mCharacteristics(characteristics), mResult(result), mThumbnailWidth(0),
mThumbnailHeight(0), mOrientation(TAG_ORIENTATION_UNKNOWN), mThumbnailSet(false),
mGpsSet(false), mDescriptionSet(false), mCaptureTimeSet(false) {}
NativeContext::~NativeContext() {}
TiffWriter* NativeContext::getWriter() {
return &mWriter;
}
ACameraMetadata* NativeContext::getCharacteristics() const {
return mCharacteristics;
}
ACameraMetadata* NativeContext::getResult() const {
return mResult;
}
uint32_t NativeContext::getThumbnailWidth() const {
return mThumbnailWidth;
}
uint32_t NativeContext::getThumbnailHeight() const {
return mThumbnailHeight;
}
const uint8_t* NativeContext::getThumbnail() const {
return &mCurrentThumbnail[0];
}
bool NativeContext::hasThumbnail() const {
return mThumbnailSet;
}
bool NativeContext::setThumbnail(const std::vector<uint8_t>& buffer, uint32_t width, uint32_t height) {
mThumbnailWidth = width;
mThumbnailHeight = height;
size_t size = BYTES_PER_RGB_PIXEL * width * height;
mCurrentThumbnail.resize(size);
//if (mCurrentThumbnail.resize(size) < 0) {
// ALOGE("%s: Could not resize thumbnail buffer.", __FUNCTION__);
// return false;
//}
// uint8_t* thumb = mCurrentThumbnail.editArray();
memcpy(&mCurrentThumbnail[0], &buffer[0], size);
mThumbnailSet = true;
return true;
}
void NativeContext::setOrientation(uint16_t orientation) {
mOrientation = orientation;
}
uint16_t NativeContext::getOrientation() const {
return mOrientation;
}
void NativeContext::setDescription(const std::string& desc) {
mDescription = desc;
mDescriptionSet = true;
}
std::string NativeContext::getDescription() const {
return mDescription;
}
bool NativeContext::hasDescription() const {
return mDescriptionSet;
}
void NativeContext::setGpsData(const GpsData& data) {
mGpsData = data;
mGpsSet = true;
}
GpsData NativeContext::getGpsData() const {
return mGpsData;
}
bool NativeContext::hasGpsData() const {
return mGpsSet;
}
void NativeContext::setCaptureTime(const std::string& formattedCaptureTime) {
mFormattedCaptureTime = formattedCaptureTime;
mCaptureTimeSet = true;
}
std::string NativeContext::getCaptureTime() const {
return mFormattedCaptureTime;
}
bool NativeContext::hasCaptureTime() const {
return mCaptureTimeSet;
}
// End of NativeContext
// ----------------------------------------------------------------------------
/**
* StripSource subclass for Input types.
*
* This class is not intended to be used across JNI calls.
*/
class InputStripSource : public StripSource, public LightRefBase<InputStripSource> {
public:
InputStripSource(Input& input, uint32_t ifd, uint32_t width, uint32_t height,
uint32_t pixStride, uint32_t rowStride, uint64_t offset, uint32_t bytesPerSample,
uint32_t samplesPerPixel);
virtual ~InputStripSource();
virtual status_t writeToStream(Output& stream, uint32_t count);
virtual uint32_t getIfd() const;
protected:
uint32_t mIfd;
Input* mInput;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mPixStride;
uint32_t mRowStride;
uint64_t mOffset;
uint32_t mBytesPerSample;
uint32_t mSamplesPerPixel;
};
InputStripSource::InputStripSource(Input& input, uint32_t ifd, uint32_t width,
uint32_t height, uint32_t pixStride, uint32_t rowStride, uint64_t offset,
uint32_t bytesPerSample, uint32_t samplesPerPixel) : mIfd(ifd), mInput(&input),
mWidth(width), mHeight(height), mPixStride(pixStride), mRowStride(rowStride),
mOffset(offset), mBytesPerSample(bytesPerSample),
mSamplesPerPixel(samplesPerPixel) {}
InputStripSource::~InputStripSource() {}
status_t InputStripSource::writeToStream(Output& stream, uint32_t count) {
uint32_t fullSize = mWidth * mHeight * mBytesPerSample * mSamplesPerPixel;
jlong offset = mOffset;
if (fullSize != count) {
ALOGE("%s: Amount to write %u doesn't match image size %u", __FUNCTION__, count,
fullSize);
// jniThrowException(mEnv, "java/lang/IllegalStateException", "Not enough data to write");
return BAD_VALUE;
}
// Skip offset
while (offset > 0) {
ssize_t skipped = mInput->skip(offset);
if (skipped <= 0) {
if (skipped == NOT_ENOUGH_DATA || skipped == 0) {
#if 0
jniThrowExceptionFmt(mEnv, "java/io/IOException",
"Early EOF encountered in skip, not enough pixel data for image of size %u",
fullSize);
#endif
skipped = NOT_ENOUGH_DATA;
} else {
#if 0
if (!mEnv->ExceptionCheck()) {
jniThrowException(mEnv, "java/io/IOException",
"Error encountered while skip bytes in input stream.");
}
#endif
}
return skipped;
}
offset -= skipped;
}
std::vector<uint8_t> row;
row.resize(mRowStride);
#if 0
if (row.resize(mRowStride) < 0) {
jniThrowException(mEnv, "java/lang/OutOfMemoryError", "Could not allocate row vector.");
return BAD_VALUE;
}
#endif
uint8_t* rowBytes = &row[0];
for (uint32_t i = 0; i < mHeight; ++i) {
size_t rowFillAmt = 0;
size_t rowSize = mRowStride;
while (rowFillAmt < mRowStride) {
ssize_t bytesRead = mInput->read(rowBytes, rowFillAmt, rowSize);
if (bytesRead <= 0) {
if (bytesRead == NOT_ENOUGH_DATA || bytesRead == 0) {
ALOGE("%s: Early EOF on row %" PRIu32 ", received bytesRead %zd",
__FUNCTION__, i, bytesRead);
#if 0
jniThrowExceptionFmt(mEnv, "java/io/IOException",
"Early EOF encountered, not enough pixel data for image of size %"
PRIu32, fullSize);
#endif
bytesRead = NOT_ENOUGH_DATA;
} else {
#if 0
if (!mEnv->ExceptionCheck()) {
jniThrowException(mEnv, "java/io/IOException",
"Error encountered while reading");
}
#endif
}
return bytesRead;
}
rowFillAmt += bytesRead;
rowSize -= bytesRead;
}
if (mPixStride == mBytesPerSample * mSamplesPerPixel) {
ALOGV("%s: Using stream per-row write for strip.", __FUNCTION__);
if (stream.write(rowBytes, 0, mBytesPerSample * mSamplesPerPixel * mWidth) != OK) {
#if 0
if (!mEnv->ExceptionCheck()) {
jniThrowException(mEnv, "java/io/IOException", "Failed to write pixel data");
}
#endif
return BAD_VALUE;
}
} else {
ALOGV("%s: Using stream per-pixel write for strip.", __FUNCTION__);
#if 0
jniThrowException(mEnv, "java/lang/IllegalStateException",
"Per-pixel strides are not supported for RAW16 -- pixels must be contiguous");
#endif
return BAD_VALUE;
// TODO: Add support for non-contiguous pixels if needed.
}
}
return OK;
}
uint32_t InputStripSource::getIfd() const {
return mIfd;
}
// End of InputStripSource
// ----------------------------------------------------------------------------
/**
* StripSource subclass for direct buffer types.
*
* This class is not intended to be used across JNI calls.
*/
class DirectStripSource : public StripSource, public LightRefBase<DirectStripSource> {
public:
DirectStripSource(const uint8_t* pixelBytes, uint32_t ifd, uint32_t width,
uint32_t height, uint32_t pixStride, uint32_t rowStride, uint64_t offset,
uint32_t bytesPerSample, uint32_t samplesPerPixel);
virtual ~DirectStripSource();
virtual status_t writeToStream(Output& stream, uint32_t count);
virtual uint32_t getIfd() const;
protected:
uint32_t mIfd;
const uint8_t* mPixelBytes;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mPixStride;
uint32_t mRowStride;
uint16_t mOffset;
uint32_t mBytesPerSample;
uint32_t mSamplesPerPixel;
};
DirectStripSource::DirectStripSource(const uint8_t* pixelBytes, uint32_t ifd,
uint32_t width, uint32_t height, uint32_t pixStride, uint32_t rowStride,
uint64_t offset, uint32_t bytesPerSample, uint32_t samplesPerPixel) : mIfd(ifd),
mPixelBytes(pixelBytes), mWidth(width), mHeight(height), mPixStride(pixStride),
mRowStride(rowStride), mOffset(offset), mBytesPerSample(bytesPerSample),
mSamplesPerPixel(samplesPerPixel) {}
DirectStripSource::~DirectStripSource() {}
status_t DirectStripSource::writeToStream(Output& stream, uint32_t count) {
uint32_t fullSize = mWidth * mHeight * mBytesPerSample * mSamplesPerPixel;
if (fullSize != count) {
ALOGE("%s: Amount to write %u doesn't match image size %u", __FUNCTION__, count,
fullSize);
#if 0
jniThrowException(mEnv, "java/lang/IllegalStateException", "Not enough data to write");
#endif
return BAD_VALUE;
}
if (mPixStride == mBytesPerSample * mSamplesPerPixel
&& mRowStride == mWidth * mBytesPerSample * mSamplesPerPixel) {
ALOGV("%s: Using direct single-pass write for strip.", __FUNCTION__);
if (stream.write(mPixelBytes, mOffset, fullSize) != OK) {
#if 0
if (!mEnv->ExceptionCheck()) {
jniThrowException(mEnv, "java/io/IOException", "Failed to write pixel data");
}
#endif
return BAD_VALUE;
}
} else if (mPixStride == mBytesPerSample * mSamplesPerPixel) {
ALOGV("%s: Using direct per-row write for strip.", __FUNCTION__);
for (size_t i = 0; i < mHeight; ++i) {
if (stream.write(mPixelBytes, mOffset + i * mRowStride, mPixStride * mWidth) != OK/* ||
mEnv->ExceptionCheck()*/) {
#if 0
if (!mEnv->ExceptionCheck()) {
jniThrowException(mEnv, "java/io/IOException", "Failed to write pixel data");
}
#endif
return BAD_VALUE;
}
}
} else {
ALOGV("%s: Using direct per-pixel write for strip.", __FUNCTION__);
#if 0
jniThrowException(mEnv, "java/lang/IllegalStateException",
"Per-pixel strides are not supported for RAW16 -- pixels must be contiguous");
#endif
return BAD_VALUE;
// TODO: Add support for non-contiguous pixels if needed.
}
return OK;
}
uint32_t DirectStripSource::getIfd() const {
return mIfd;
}
// End of DirectStripSource
// ----------------------------------------------------------------------------
/**
* Calculate the default crop relative to the "active area" of the image sensor (this active area
* will always be the pre-correction active area rectangle), and set this.
*/
static status_t calculateAndSetCrop(ACameraMetadata* characteristics,
sp<TiffWriter> writer) {
ACameraMetadata_const_entry entry = { 0 };
// ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
// ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
camera_status_t status = ACameraMetadata_getConstEntry(characteristics,
ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, &entry);
uint32_t width = static_cast<uint32_t>(entry.data.i32[2]);
uint32_t height = static_cast<uint32_t>(entry.data.i32[3]);
const uint32_t margin = 8; // Default margin recommended by Adobe for interpolation.
if (width < margin * 2 || height < margin * 2) {
ALOGE("%s: Cannot calculate default crop for image, pre-correction active area is too"
"small: h=%" PRIu32 ", w=%" PRIu32, __FUNCTION__, height, width);
#if 0
jniThrowException(env, "java/lang/IllegalStateException",
"Pre-correction active area is too small.");
#endif
return BAD_VALUE;
}
uint32_t defaultCropOrigin[] = {margin, margin};
uint32_t defaultCropSize[] = {width - defaultCropOrigin[0] - margin,
height - defaultCropOrigin[1] - margin};
BAIL_IF_INVALID_R(writer->addEntry(TAG_DEFAULTCROPORIGIN, 2, defaultCropOrigin,
TIFF_IFD_0), env, TAG_DEFAULTCROPORIGIN, writer);
BAIL_IF_INVALID_R(writer->addEntry(TAG_DEFAULTCROPSIZE, 2, defaultCropSize,
TIFF_IFD_0), env, TAG_DEFAULTCROPSIZE, writer);
return OK;
}
static bool validateDngHeader(sp<TiffWriter> writer, ACameraMetadata* characteristics, uint32_t width, uint32_t height)
{
if (width <= 0 || height <= 0) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException", \
"Image width %d is invalid", width);
#endif
return false;
}
ACameraMetadata_const_entry preCorrectionEntry = { 0 };
camera_status_t status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, &preCorrectionEntry);
ACameraMetadata_const_entry pixelArrayEntry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE, &pixelArrayEntry);
int pWidth = static_cast<int>(pixelArrayEntry.data.i32[0]);
int pHeight = static_cast<int>(pixelArrayEntry.data.i32[1]);
int cWidth = static_cast<int>(preCorrectionEntry.data.i32[2]);
int cHeight = static_cast<int>(preCorrectionEntry.data.i32[3]);
bool matchesPixelArray = (pWidth == width && pHeight == height);
bool matchesPreCorrectionArray = (cWidth == width && cHeight == height);
if (!(matchesPixelArray || matchesPreCorrectionArray)) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException", \
"Image dimensions (w=%d,h=%d) are invalid, must match either the pixel "
"array size (w=%d, h=%d) or the pre-correction array size (w=%d, h=%d)",
width, height, pWidth, pHeight, cWidth, cHeight);
#endif
return false;
}
return true;
}
static status_t moveEntries(sp<TiffWriter> writer, uint32_t ifdFrom, uint32_t ifdTo,
const std::vector<uint16_t>& entries) {
for (size_t i = 0; i < entries.size(); ++i) {
uint16_t tagId = entries[i];
sp<TiffEntry> entry = writer->getEntry(tagId, ifdFrom);
if (entry.get() == nullptr) {
ALOGE("%s: moveEntries failed, entry %u not found in IFD %u", __FUNCTION__, tagId,
ifdFrom);
return BAD_VALUE;
}
if (writer->addEntry(entry, ifdTo) != OK) {
ALOGE("%s: moveEntries failed, could not add entry %u to IFD %u", __FUNCTION__, tagId,
ifdFrom);
return BAD_VALUE;
}
writer->removeEntry(tagId, ifdFrom);
}
return OK;
}
/**
* Write CFA pattern for given CFA enum into cfaOut. cfaOut must have length >= 4.
* Returns OK on success, or a negative error code if the CFA enum was invalid.
*/
static status_t convertCFA(uint8_t cfaEnum, /*out*/uint8_t* cfaOut) {
acamera_metadata_enum_android_sensor_info_color_filter_arrangement_t cfa =
static_cast<acamera_metadata_enum_android_sensor_info_color_filter_arrangement_t>(
cfaEnum);
switch(cfa) {
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB: {
cfaOut[0] = 0;
cfaOut[1] = 1;
cfaOut[2] = 1;
cfaOut[3] = 2;
break;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG: {
cfaOut[0] = 1;
cfaOut[1] = 0;
cfaOut[2] = 2;
cfaOut[3] = 1;
break;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG: {
cfaOut[0] = 1;
cfaOut[1] = 2;
cfaOut[2] = 0;
cfaOut[3] = 1;
break;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR: {
cfaOut[0] = 2;
cfaOut[1] = 1;
cfaOut[2] = 1;
cfaOut[3] = 0;
break;
}
// MONO and NIR are degenerate case of RGGB pattern: only Red channel
// will be used.
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_MONO:
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR: {
cfaOut[0] = 0;
break;
}
default: {
return BAD_VALUE;
}
}
return OK;
}
/**
* Convert the CFA layout enum to an OpcodeListBuilder::CfaLayout enum, defaults to
* RGGB for an unknown enum.
*/
static OpcodeListBuilder::CfaLayout convertCFAEnumToOpcodeLayout(uint8_t cfaEnum) {
acamera_metadata_enum_android_sensor_info_color_filter_arrangement_t cfa =
static_cast<acamera_metadata_enum_android_sensor_info_color_filter_arrangement_t>(
cfaEnum);
switch(cfa) {
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB: {
return OpcodeListBuilder::CFA_RGGB;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG: {
return OpcodeListBuilder::CFA_GRBG;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG: {
return OpcodeListBuilder::CFA_GBRG;
}
case ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR: {
return OpcodeListBuilder::CFA_BGGR;
}
default: {
return OpcodeListBuilder::CFA_RGGB;
}
}
}
/**
* For each color plane, find the corresponding noise profile coefficients given in the
* per-channel noise profile. If multiple channels in the CFA correspond to a color in the color
* plane, this method takes the pair of noise profile coefficients with the higher S coefficient.
*
* perChannelNoiseProfile - numChannels * 2 noise profile coefficients.
* cfa - numChannels color channels corresponding to each of the per-channel noise profile
* coefficients.
* numChannels - the number of noise profile coefficient pairs and color channels given in
* the perChannelNoiseProfile and cfa arguments, respectively.
* planeColors - the color planes in the noise profile output.
* numPlanes - the number of planes in planeColors and pairs of coefficients in noiseProfile.
* noiseProfile - 2 * numPlanes doubles containing numPlanes pairs of noise profile coefficients.
*
* returns OK, or a negative error code on failure.
*/
static status_t generateNoiseProfile(const double* perChannelNoiseProfile, uint8_t* cfa,
size_t numChannels, const uint8_t* planeColors, size_t numPlanes,
/*out*/double* noiseProfile) {
for (size_t p = 0; p < numPlanes; ++p) {
size_t S = p * 2;
size_t O = p * 2 + 1;
noiseProfile[S] = 0;
noiseProfile[O] = 0;
bool uninitialized = true;
for (size_t c = 0; c < numChannels; ++c) {
if (cfa[c] == planeColors[p] && perChannelNoiseProfile[c * 2] > noiseProfile[S]) {
noiseProfile[S] = perChannelNoiseProfile[c * 2];
noiseProfile[O] = perChannelNoiseProfile[c * 2 + 1];
uninitialized = false;
}
}
if (uninitialized) {
ALOGE("%s: No valid NoiseProfile coefficients for color plane %zu",
__FUNCTION__, p);
return BAD_VALUE;
}
}
return OK;
}
static void undistort(/*inout*/double& x, /*inout*/double& y,
const std::array<float, 6>& distortion,
const float cx, const float cy, const float f) {
double xp = (x - cx) / f;
double yp = (y - cy) / f;
double x2 = xp * xp;
double y2 = yp * yp;
double r2 = x2 + y2;
double xy2 = 2.0 * xp * yp;
const float k0 = distortion[0];
const float k1 = distortion[1];
const float k2 = distortion[2];
const float k3 = distortion[3];
const float p1 = distortion[4];
const float p2 = distortion[5];
double kr = k0 + ((k3 * r2 + k2) * r2 + k1) * r2;
double xpp = xp * kr + p1 * xy2 + p2 * (r2 + 2.0 * x2);
double ypp = yp * kr + p1 * (r2 + 2.0 * y2) + p2 * xy2;
x = xpp * f + cx;
y = ypp * f + cy;
return;
}
static inline bool unDistortWithinPreCorrArray(
double x, double y,
const std::array<float, 6>& distortion,
const float cx, const float cy, const float f,
const int preCorrW, const int preCorrH, const int xMin, const int yMin) {
undistort(x, y, distortion, cx, cy, f);
// xMin and yMin are inclusive, and xMax and yMax are exclusive.
int xMax = xMin + preCorrW;
int yMax = yMin + preCorrH;
if (x < xMin || y < yMin || x >= xMax || y >= yMax) {
return false;
}
return true;
}
static inline bool boxWithinPrecorrectionArray(
int left, int top, int right, int bottom,
const std::array<float, 6>& distortion,
const float cx, const float cy, const float f,
const int preCorrW, const int preCorrH, const int xMin, const int yMin){
// Top row
if (!unDistortWithinPreCorrArray(left, top,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
if (!unDistortWithinPreCorrArray(cx, top,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
if (!unDistortWithinPreCorrArray(right, top,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
// Middle row
if (!unDistortWithinPreCorrArray(left, cy,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
if (!unDistortWithinPreCorrArray(right, cy,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
// Bottom row
if (!unDistortWithinPreCorrArray(left, bottom,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
if (!unDistortWithinPreCorrArray(cx, bottom,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
if (!unDistortWithinPreCorrArray(right, bottom,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
return false;
}
return true;
}
static inline bool scaledBoxWithinPrecorrectionArray(
double scale/*must be <= 1.0*/,
const std::array<float, 6>& distortion,
const float cx, const float cy, const float f,
const int preCorrW, const int preCorrH,
const int xMin, const int yMin){
double left = cx * (1.0 - scale);
double right = (preCorrW - 1) * scale + cx * (1.0 - scale);
double top = cy * (1.0 - scale);
double bottom = (preCorrH - 1) * scale + cy * (1.0 - scale);
return boxWithinPrecorrectionArray(left, top, right, bottom,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin);
}
static status_t findPostCorrectionScale(
double stepSize, double minScale,
const std::array<float, 6>& distortion,
const float cx, const float cy, const float f,
const int preCorrW, const int preCorrH, const int xMin, const int yMin,
/*out*/ double* outScale) {
if (outScale == nullptr) {
ALOGE("%s: outScale must not be null", __FUNCTION__);
return BAD_VALUE;
}
for (double scale = 1.0; scale > minScale; scale -= stepSize) {
if (scaledBoxWithinPrecorrectionArray(
scale, distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin)) {
*outScale = scale;
return OK;
}
}
ALOGE("%s: cannot find cropping scale for lens distortion: stepSize %f, minScale %f",
__FUNCTION__, stepSize, minScale);
return BAD_VALUE;
}
// Apply a scale factor to distortion coefficients so that the image is zoomed out and all pixels
// are sampled within the precorrection array
static void normalizeLensDistortion(
/*inout*/std::array<float, 6>& distortion,
float cx, float cy, float f, int preCorrW, int preCorrH, int xMin = 0, int yMin = 0) {
ALOGV("%s: distortion [%f, %f, %f, %f, %f, %f], (cx,cy) (%f, %f), f %f, (W,H) (%d, %d)"
", (xmin, ymin, xmax, ymax) (%d, %d, %d, %d)",
__FUNCTION__, distortion[0], distortion[1], distortion[2],
distortion[3], distortion[4], distortion[5],
cx, cy, f, preCorrW, preCorrH,
xMin, yMin, xMin + preCorrW - 1, yMin + preCorrH - 1);
// Only update distortion coeffients if we can find a good bounding box
double scale = 1.0;
if (OK == findPostCorrectionScale(0.002, 0.5,
distortion, cx, cy, f, preCorrW, preCorrH, xMin, yMin,
/*out*/&scale)) {
ALOGV("%s: scaling distortion coefficients by %f", __FUNCTION__, scale);
// The formula:
// xc = xi * (k0 + k1*r^2 + k2*r^4 + k3*r^6) + k4 * (2*xi*yi) + k5 * (r^2 + 2*xi^2)
// To create effective zoom we want to replace xi by xi *m, yi by yi*m and r^2 by r^2*m^2
// Factor the extra m power terms into k0~k6
std::array<float, 6> scalePowers = {1, 3, 5, 7, 2, 2};
for (size_t i = 0; i < 6; i++) {
distortion[i] *= pow(scale, scalePowers[i]);
}
}
return;
}
// ----------------------------------------------------------------------------
#if 0
static NativeContext* DngCreator_getNativeContext(JNIEnv* env, jobject thiz) {
ALOGV("%s:", __FUNCTION__);
return reinterpret_cast<NativeContext*>(env->GetLongField(thiz,
gDngCreatorClassInfo.mNativeContext));
}
static void DngCreator_setNativeContext(JNIEnv* env, jobject thiz, sp<NativeContext> context) {
ALOGV("%s:", __FUNCTION__);
NativeContext* current = DngCreator_getNativeContext(env, thiz);
if (context != nullptr) {
context->incStrong((void*) DngCreator_setNativeContext);
}
if (current) {
current->decStrong((void*) DngCreator_setNativeContext);
}
env->SetLongField(thiz, gDngCreatorClassInfo.mNativeContext,
reinterpret_cast<jlong>(context.get()));
}
static void DngCreator_nativeClassInit(JNIEnv* env, jclass clazz) {
ALOGV("%s:", __FUNCTION__);
gDngCreatorClassInfo.mNativeContext = GetFieldIDOrDie(env,
clazz, ANDROID_DNGCREATOR_CTX_JNI_ID, "J");
jclass outputStreamClazz = FindClassOrDie(env, "java/io/OutputStream");
gOutputStreamClassInfo.mWriteMethod = GetMethodIDOrDie(env,
outputStreamClazz, "write", "([BII)V");
jclass inputStreamClazz = FindClassOrDie(env, "java/io/InputStream");
gInputStreamClassInfo.mReadMethod = GetMethodIDOrDie(env, inputStreamClazz, "read", "([BII)I");
gInputStreamClassInfo.mSkipMethod = GetMethodIDOrDie(env, inputStreamClazz, "skip", "(J)J");
jclass inputBufferClazz = FindClassOrDie(env, "java/nio/ByteBuffer");
gInputByteBufferClassInfo.mGetMethod = GetMethodIDOrDie(env,
inputBufferClazz, "get", "([BII)Ljava/nio/ByteBuffer;");
}
#endif
void DngCreator::init(ACameraMetadata* characteristics,
ACameraMetadata* results, const std::string& captureTime) {
ALOGV("%s:", __FUNCTION__);
sp<NativeContext> nativeContext = new NativeContext(characteristics, results);
size_t len = captureTime.size() + 1;
if (len != NativeContext::DATETIME_COUNT) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"Formatted capture time string length is not required 20 characters");
#endif
return;
}
nativeContext->setCaptureTime(captureTime);
// DngCreator_setNativeContext(env, thiz, nativeContext);
}
sp<TiffWriter> DngCreator::setup(uint32_t imageWidth, uint32_t imageHeight)
{
ACameraMetadata* characteristics = getCharacteristics();
ACameraMetadata* results = getResult();
sp<TiffWriter> writer = new TiffWriter();
uint32_t preXMin = 0;
uint32_t preYMin = 0;
uint32_t preWidth = 0;
uint32_t preHeight = 0;
uint8_t colorFilter = 0;
camera_status_t status;
bool isBayer = true;
{
// Check dimensions
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_IMAGEWIDTH, writer);
preXMin = static_cast<uint32_t>(entry.data.i32[0]);
preYMin = static_cast<uint32_t>(entry.data.i32[1]);
preWidth = static_cast<uint32_t>(entry.data.i32[2]);
preHeight = static_cast<uint32_t>(entry.data.i32[3]);
ACameraMetadata_const_entry pixelArrayEntry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE, &pixelArrayEntry);
uint32_t pixWidth = static_cast<uint32_t>(pixelArrayEntry.data.i32[0]);
uint32_t pixHeight = static_cast<uint32_t>(pixelArrayEntry.data.i32[1]);
if (!((imageWidth == preWidth && imageHeight == preHeight) ||
(imageWidth == pixWidth && imageHeight == pixHeight))) {
#if 0
jniThrowException(env, "java/lang/AssertionError",
"Height and width of image buffer did not match height and width of"
"either the preCorrectionActiveArraySize or the pixelArraySize.");
#endif
return nullptr;
}
ACameraMetadata_const_entry colorFilterEntry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, &colorFilterEntry);
colorFilter = colorFilterEntry.data.u8[0];
ACameraMetadata_const_entry capabilitiesEntry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_REQUEST_AVAILABLE_CAPABILITIES, & capabilitiesEntry);
size_t capsCount = capabilitiesEntry.count;
const uint8_t* caps = capabilitiesEntry.data.u8;
if (std::find(caps, caps+capsCount, ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME)
!= caps+capsCount) {
isBayer = false;
} else if (colorFilter == ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_MONO ||
colorFilter == ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR) {
#if 0
jniThrowException(env, "java/lang/AssertionError",
"A camera device with MONO/NIR color filter must have MONOCHROME capability.");
#endif
return nullptr;
}
}
writer->addIfd(TIFF_IFD_0);
status_t err = OK;
const uint32_t samplesPerPixel = 1;
const uint32_t bitsPerSample = BITS_PER_SAMPLE;
OpcodeListBuilder::CfaLayout opcodeCfaLayout = OpcodeListBuilder::CFA_NONE;
uint8_t cfaPlaneColor[3] = {0, 1, 2};
ACameraMetadata_const_entry cfaEntry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, &cfaEntry);
BAIL_IF_EMPTY_RET_NULL_SP(cfaEntry, env, TAG_CFAPATTERN, writer);
uint8_t cfaEnum = cfaEntry.data.u8[0];
// TODO: Greensplit.
// TODO: Add remaining non-essential tags
// Setup main image tags
{
// Set orientation
uint16_t orientation = TAG_ORIENTATION_NORMAL;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_ORIENTATION, 1, &orientation, TIFF_IFD_0),
env, TAG_ORIENTATION, writer);
}
{
// Set subfiletype
uint32_t subfileType = 0; // Main image
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_NEWSUBFILETYPE, 1, &subfileType,
TIFF_IFD_0), env, TAG_NEWSUBFILETYPE, writer);
}
{
// Set bits per sample
uint16_t bits = static_cast<uint16_t>(bitsPerSample);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_BITSPERSAMPLE, 1, &bits, TIFF_IFD_0), env,
TAG_BITSPERSAMPLE, writer);
}
{
// Set compression
uint16_t compression = 1; // None
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_COMPRESSION, 1, &compression,
TIFF_IFD_0), env, TAG_COMPRESSION, writer);
}
{
// Set dimensions
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_IMAGEWIDTH, 1, &imageWidth, TIFF_IFD_0),
env, TAG_IMAGEWIDTH, writer);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_IMAGELENGTH, 1, &imageHeight, TIFF_IFD_0),
env, TAG_IMAGELENGTH, writer);
}
{
// Set photometric interpretation
uint16_t interpretation = isBayer ? 32803 /* CFA */ :
34892; /* Linear Raw */;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_PHOTOMETRICINTERPRETATION, 1,
&interpretation, TIFF_IFD_0), env, TAG_PHOTOMETRICINTERPRETATION, writer);
}
{
uint16_t repeatDim[2] = {2, 2};
if (!isBayer) {
repeatDim[0] = repeatDim[1] = 1;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_BLACKLEVELREPEATDIM, 2, repeatDim,
TIFF_IFD_0), env, TAG_BLACKLEVELREPEATDIM, writer);
// Set blacklevel tags, using dynamic black level if available
ACameraMetadata_const_entry entry = { 0 };
camera_status_t status = ACameraMetadata_getConstEntry(results, ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL, &entry);
uint32_t blackLevelRational[8] = {0};
if (entry.count != 0) {
BAIL_IF_EXPR_RET_NULL_SP(entry.count != 4, env, TAG_BLACKLEVEL, writer);
for (size_t i = 0; i < entry.count; i++) {
blackLevelRational[i * 2] = static_cast<uint32_t>(entry.data.f[i] * 100);
blackLevelRational[i * 2 + 1] = 100;
}
} else {
// Fall back to static black level which is guaranteed
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_BLACK_LEVEL_PATTERN, &entry);
BAIL_IF_EXPR_RET_NULL_SP(entry.count != 4, env, TAG_BLACKLEVEL, writer);
for (size_t i = 0; i < entry.count; i++) {
blackLevelRational[i * 2] = static_cast<uint32_t>(entry.data.i32[i]);
blackLevelRational[i * 2 + 1] = 1;
}
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_BLACKLEVEL, repeatDim[0]*repeatDim[1],
blackLevelRational, TIFF_IFD_0), env, TAG_BLACKLEVEL, writer);
}
{
// Set samples per pixel
uint16_t samples = static_cast<uint16_t>(samplesPerPixel);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_SAMPLESPERPIXEL, 1, &samples, TIFF_IFD_0),
env, TAG_SAMPLESPERPIXEL, writer);
}
{
// Set planar configuration
uint16_t config = 1; // Chunky
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_PLANARCONFIGURATION, 1, &config,
TIFF_IFD_0), env, TAG_PLANARCONFIGURATION, writer);
}
// All CFA pattern tags are not necessary for monochrome cameras.
if (isBayer) {
// Set CFA pattern dimensions
uint16_t repeatDim[2] = {2, 2};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CFAREPEATPATTERNDIM, 2, repeatDim,
TIFF_IFD_0), env, TAG_CFAREPEATPATTERNDIM, writer);
// Set CFA pattern
const int cfaLength = 4;
uint8_t cfa[cfaLength];
if ((err = convertCFA(cfaEnum, /*out*/cfa)) != OK) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalStateException",
"Invalid metadata for tag %d", TAG_CFAPATTERN);
#endif
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CFAPATTERN, cfaLength, cfa, TIFF_IFD_0),
env, TAG_CFAPATTERN, writer);
opcodeCfaLayout = convertCFAEnumToOpcodeLayout(cfaEnum);
// Set CFA plane color
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CFAPLANECOLOR, 3, cfaPlaneColor,
TIFF_IFD_0), env, TAG_CFAPLANECOLOR, writer);
// Set CFA layout
uint16_t cfaLayout = 1;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CFALAYOUT, 1, &cfaLayout, TIFF_IFD_0),
env, TAG_CFALAYOUT, writer);
}
{
// image description
uint8_t imageDescription = '\0'; // empty
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_IMAGEDESCRIPTION, 1, &imageDescription,
TIFF_IFD_0), env, TAG_IMAGEDESCRIPTION, writer);
}
{
// make
// Use "" to represent unknown make as suggested in TIFF/EP spec.
char manufacturer[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.product.manufacturer", manufacturer);
uint32_t count = static_cast<uint32_t>(strlen(manufacturer)) + 1;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_MAKE, count,
reinterpret_cast<const uint8_t*>(manufacturer), TIFF_IFD_0), env, TAG_MAKE,
writer);
}
{
// model
// Use "" to represent unknown model as suggested in TIFF/EP spec.
char model[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.product.model", model);
uint32_t count = static_cast<uint32_t>(strlen(model)) + 1;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_MODEL, count,
reinterpret_cast<const uint8_t*>(model), TIFF_IFD_0), env, TAG_MODEL,
writer);
}
{
// x resolution
uint32_t xres[] = { 72, 1 }; // default 72 ppi
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_XRESOLUTION, 1, xres, TIFF_IFD_0),
env, TAG_XRESOLUTION, writer);
// y resolution
uint32_t yres[] = { 72, 1 }; // default 72 ppi
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_YRESOLUTION, 1, yres, TIFF_IFD_0),
env, TAG_YRESOLUTION, writer);
uint16_t unit = 2; // inches
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_RESOLUTIONUNIT, 1, &unit, TIFF_IFD_0),
env, TAG_RESOLUTIONUNIT, writer);
}
{
// software
char software[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.build.fingerprint", software);
uint32_t count = static_cast<uint32_t>(strlen(software)) + 1;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_SOFTWARE, count,
reinterpret_cast<const uint8_t*>(software), TIFF_IFD_0), env, TAG_SOFTWARE,
writer);
}
if (hasCaptureTime()) {
// datetime
std::string captureTime = getCaptureTime();
if (writer->addEntry(TAG_DATETIME, NativeContext::DATETIME_COUNT,
reinterpret_cast<const uint8_t*>(captureTime.c_str()), TIFF_IFD_0) != OK) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
"Invalid metadata for tag %x", TAG_DATETIME);
#endif
return nullptr;
}
// datetime original
if (writer->addEntry(TAG_DATETIMEORIGINAL, NativeContext::DATETIME_COUNT,
reinterpret_cast<const uint8_t*>(captureTime.c_str()), TIFF_IFD_0) != OK) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
"Invalid metadata for tag %x", TAG_DATETIMEORIGINAL);
#endif
return nullptr;
}
}
{
// TIFF/EP standard id
uint8_t standardId[] = { 1, 0, 0, 0 };
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_TIFFEPSTANDARDID, 4, standardId,
TIFF_IFD_0), env, TAG_TIFFEPSTANDARDID, writer);
}
{
// copyright
uint8_t copyright = '\0'; // empty
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_COPYRIGHT, 1, &copyright,
TIFF_IFD_0), env, TAG_COPYRIGHT, writer);
}
{
// exposure time
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_SENSOR_EXPOSURE_TIME, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_EXPOSURETIME, writer);
int64_t exposureTime = *(entry.data.i64);
if (exposureTime < 0) {
// Should be unreachable
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"Negative exposure time in metadata");
#endif
return nullptr;
}
// Ensure exposure time doesn't overflow (for exposures > 4s)
uint32_t denominator = 1000000000;
while (exposureTime > UINT32_MAX) {
exposureTime >>= 1;
denominator >>= 1;
if (denominator == 0) {
// Should be unreachable
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"Exposure time too long");
#endif
return nullptr;
}
}
uint32_t exposure[] = { static_cast<uint32_t>(exposureTime), denominator };
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_EXPOSURETIME, 1, exposure,
TIFF_IFD_0), env, TAG_EXPOSURETIME, writer);
}
{
// ISO speed ratings
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_SENSOR_SENSITIVITY, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_ISOSPEEDRATINGS, writer);
int32_t tempIso = *(entry.data.i32);
if (tempIso < 0) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"Negative ISO value");
#endif
return nullptr;
}
if (tempIso > UINT16_MAX) {
ALOGW("%s: ISO value overflows UINT16_MAX, clamping to max", __FUNCTION__);
tempIso = UINT16_MAX;
}
uint16_t iso = static_cast<uint16_t>(tempIso);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_ISOSPEEDRATINGS, 1, &iso,
TIFF_IFD_0), env, TAG_ISOSPEEDRATINGS, writer);
}
{
// Baseline exposure
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_BASELINEEXPOSURE, writer);
// post RAW gain should be boostValue / 100
double postRAWGain = static_cast<double> (entry.data.i32[0]) / 100.f;
// Baseline exposure should be in EV units so log2(gain) =
// log10(gain)/log10(2)
double baselineExposure = std::log(postRAWGain) / std::log(2.0f);
int32_t baseExposureSRat[] = { static_cast<int32_t> (baselineExposure * 100),
100 };
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_BASELINEEXPOSURE, 1,
baseExposureSRat, TIFF_IFD_0), env, TAG_BASELINEEXPOSURE, writer);
}
{
// focal length
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_LENS_FOCAL_LENGTH, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_FOCALLENGTH, writer);
uint32_t focalLength[] = { static_cast<uint32_t>(*(entry.data.f) * 100), 100 };
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_FOCALLENGTH, 1, focalLength,
TIFF_IFD_0), env, TAG_FOCALLENGTH, writer);
}
{
// f number
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_LENS_APERTURE, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_FNUMBER, writer);
uint32_t fnum[] = { static_cast<uint32_t>(*(entry.data.f) * 100), 100 };
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_FNUMBER, 1, fnum,
TIFF_IFD_0), env, TAG_FNUMBER, writer);
}
{
// Set DNG version information
uint8_t version[4] = {1, 4, 0, 0};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_DNGVERSION, 4, version, TIFF_IFD_0),
env, TAG_DNGVERSION, writer);
uint8_t backwardVersion[4] = {1, 1, 0, 0};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_DNGBACKWARDVERSION, 4, backwardVersion,
TIFF_IFD_0), env, TAG_DNGBACKWARDVERSION, writer);
}
{
// Set whitelevel
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_WHITE_LEVEL, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_WHITELEVEL, writer);
uint32_t whiteLevel = static_cast<uint32_t>(entry.data.i32[0]);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_WHITELEVEL, 1, &whiteLevel, TIFF_IFD_0),
env, TAG_WHITELEVEL, writer);
}
{
// Set default scale
uint32_t defaultScale[4] = {1, 1, 1, 1};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_DEFAULTSCALE, 2, defaultScale,
TIFF_IFD_0), env, TAG_DEFAULTSCALE, writer);
}
bool singleIlluminant = false;
if (isBayer) {
// Set calibration illuminants
ACameraMetadata_const_entry entry1 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_REFERENCE_ILLUMINANT1, &entry1);
BAIL_IF_EMPTY_RET_NULL_SP(entry1, env, TAG_CALIBRATIONILLUMINANT1, writer);
ACameraMetadata_const_entry entry2 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_REFERENCE_ILLUMINANT2, &entry2);
if (entry2.count == 0) {
singleIlluminant = true;
}
uint16_t ref1 = entry1.data.u8[0];
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CALIBRATIONILLUMINANT1, 1, &ref1,
TIFF_IFD_0), env, TAG_CALIBRATIONILLUMINANT1, writer);
if (!singleIlluminant) {
uint16_t ref2 = entry2.data.u8[0];
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CALIBRATIONILLUMINANT2, 1, &ref2,
TIFF_IFD_0), env, TAG_CALIBRATIONILLUMINANT2, writer);
}
}
if (isBayer) {
// Set color transforms
ACameraMetadata_const_entry entry1 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_COLOR_TRANSFORM1, &entry1);
BAIL_IF_EMPTY_RET_NULL_SP(entry1, env, TAG_COLORMATRIX1, writer);
int32_t colorTransform1[entry1.count * 2];
size_t ctr = 0;
for(size_t i = 0; i < entry1.count; ++i) {
colorTransform1[ctr++] = entry1.data.r[i].numerator;
colorTransform1[ctr++] = entry1.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_COLORMATRIX1, entry1.count,
colorTransform1, TIFF_IFD_0), env, TAG_COLORMATRIX1, writer);
if (!singleIlluminant) {
ACameraMetadata_const_entry entry2 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_COLOR_TRANSFORM2, &entry2);
BAIL_IF_EMPTY_RET_NULL_SP(entry2, env, TAG_COLORMATRIX2, writer);
int32_t colorTransform2[entry2.count * 2];
ctr = 0;
for(size_t i = 0; i < entry2.count; ++i) {
colorTransform2[ctr++] = entry2.data.r[i].numerator;
colorTransform2[ctr++] = entry2.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_COLORMATRIX2, entry2.count,
colorTransform2, TIFF_IFD_0), env, TAG_COLORMATRIX2, writer);
}
}
if (isBayer) {
// Set calibration transforms
ACameraMetadata_const_entry entry1 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_CALIBRATION_TRANSFORM1, &entry1);
BAIL_IF_EMPTY_RET_NULL_SP(entry1, env, TAG_CAMERACALIBRATION1, writer);
int32_t calibrationTransform1[entry1.count * 2];
size_t ctr = 0;
for(size_t i = 0; i < entry1.count; ++i) {
calibrationTransform1[ctr++] = entry1.data.r[i].numerator;
calibrationTransform1[ctr++] = entry1.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CAMERACALIBRATION1, entry1.count,
calibrationTransform1, TIFF_IFD_0), env, TAG_CAMERACALIBRATION1, writer);
if (!singleIlluminant) {
ACameraMetadata_const_entry entry2 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_CALIBRATION_TRANSFORM2, &entry2);
BAIL_IF_EMPTY_RET_NULL_SP(entry2, env, TAG_CAMERACALIBRATION2, writer);
int32_t calibrationTransform2[entry2.count * 2];
ctr = 0;
for(size_t i = 0; i < entry2.count; ++i) {
calibrationTransform2[ctr++] = entry2.data.r[i].numerator;
calibrationTransform2[ctr++] = entry2.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_CAMERACALIBRATION2, entry2.count,
calibrationTransform2, TIFF_IFD_0), env, TAG_CAMERACALIBRATION2, writer);
}
}
if (isBayer) {
// Set forward transforms
ACameraMetadata_const_entry entry1 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_FORWARD_MATRIX1, &entry1);
BAIL_IF_EMPTY_RET_NULL_SP(entry1, env, TAG_FORWARDMATRIX1, writer);
int32_t forwardTransform1[entry1.count * 2];
size_t ctr = 0;
for(size_t i = 0; i < entry1.count; ++i) {
forwardTransform1[ctr++] = entry1.data.r[i].numerator;
forwardTransform1[ctr++] = entry1.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_FORWARDMATRIX1, entry1.count,
forwardTransform1, TIFF_IFD_0), env, TAG_FORWARDMATRIX1, writer);
if (!singleIlluminant) {
ACameraMetadata_const_entry entry2 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_FORWARD_MATRIX2, &entry2);
BAIL_IF_EMPTY_RET_NULL_SP(entry2, env, TAG_FORWARDMATRIX2, writer);
int32_t forwardTransform2[entry2.count * 2];
ctr = 0;
for(size_t i = 0; i < entry2.count; ++i) {
forwardTransform2[ctr++] = entry2.data.r[i].numerator;
forwardTransform2[ctr++] = entry2.data.r[i].denominator;
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_FORWARDMATRIX2, entry2.count,
forwardTransform2, TIFF_IFD_0), env, TAG_FORWARDMATRIX2, writer);
}
}
if (isBayer) {
// Set camera neutral
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_SENSOR_NEUTRAL_COLOR_POINT, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_ASSHOTNEUTRAL, writer);
uint32_t cameraNeutral[entry.count * 2];
size_t ctr = 0;
for(size_t i = 0; i < entry.count; ++i) {
cameraNeutral[ctr++] =
static_cast<uint32_t>(entry.data.r[i].numerator);
cameraNeutral[ctr++] =
static_cast<uint32_t>(entry.data.r[i].denominator);
}
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_ASSHOTNEUTRAL, entry.count, cameraNeutral,
TIFF_IFD_0), env, TAG_ASSHOTNEUTRAL, writer);
}
{
// Set dimensions
if (calculateAndSetCrop(characteristics, writer) != OK) {
return nullptr;
}
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_ACTIVEAREA, writer);
uint32_t xmin = static_cast<uint32_t>(entry.data.i32[0]);
uint32_t ymin = static_cast<uint32_t>(entry.data.i32[1]);
uint32_t width = static_cast<uint32_t>(entry.data.i32[2]);
uint32_t height = static_cast<uint32_t>(entry.data.i32[3]);
// If we only have a buffer containing the pre-correction rectangle, ignore the offset
// relative to the pixel array.
if (imageWidth == width && imageHeight == height) {
xmin = 0;
ymin = 0;
}
uint32_t activeArea[] = {ymin, xmin, ymin + height, xmin + width};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_ACTIVEAREA, 4, activeArea, TIFF_IFD_0),
env, TAG_ACTIVEAREA, writer);
}
{
// Setup unique camera model tag
char model[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.product.model", model);
char manufacturer[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.product.manufacturer", manufacturer);
char brand[PROP_VALUE_MAX] = { 0 };
__system_property_get("ro.product.brand", brand);
std::string cameraModel = model;
cameraModel += "-";
cameraModel += manufacturer;
cameraModel += "-";
cameraModel += brand;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_UNIQUECAMERAMODEL, cameraModel.size() + 1,
reinterpret_cast<const uint8_t*>(cameraModel.c_str()), TIFF_IFD_0), env,
TAG_UNIQUECAMERAMODEL, writer);
}
{
// Setup sensor noise model
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_SENSOR_NOISE_PROFILE, &entry);
const status_t numPlaneColors = isBayer ? 3 : 1;
const status_t numCfaChannels = isBayer ? 4 : 1;
uint8_t cfaOut[numCfaChannels];
if ((err = convertCFA(cfaEnum, /*out*/cfaOut)) != OK) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"Invalid CFA from camera characteristics");
#endif
return nullptr;
}
double noiseProfile[numPlaneColors * 2];
if (entry.count > 0) {
if (entry.count != numCfaChannels * 2) {
ALOGW("%s: Invalid entry count %zu for noise profile returned "
"in characteristics, no noise profile tag written...",
__FUNCTION__, entry.count);
} else {
if ((err = generateNoiseProfile(entry.data.d, cfaOut, numCfaChannels,
cfaPlaneColor, numPlaneColors, /*out*/ noiseProfile)) == OK) {
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_NOISEPROFILE,
numPlaneColors * 2, noiseProfile, TIFF_IFD_0), env, TAG_NOISEPROFILE,
writer);
} else {
ALOGW("%s: Error converting coefficients for noise profile, no noise profile"
" tag written...", __FUNCTION__);
}
}
} else {
ALOGW("%s: No noise profile found in result metadata. Image quality may be reduced.",
__FUNCTION__);
}
}
{
// Set up opcode List 2
OpcodeListBuilder builder;
status_t err = OK;
// Set up lens shading map
ACameraMetadata_const_entry entry1 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_LENS_INFO_SHADING_MAP_SIZE, &entry1);
uint32_t lsmWidth = 0;
uint32_t lsmHeight = 0;
if (entry1.count != 0) {
lsmWidth = static_cast<uint32_t>(entry1.data.i32[0]);
lsmHeight = static_cast<uint32_t>(entry1.data.i32[1]);
}
ACameraMetadata_const_entry entry2 = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_STATISTICS_LENS_SHADING_MAP, &entry2);
ACameraMetadata_const_entry entry = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, &entry);
BAIL_IF_EMPTY_RET_NULL_SP(entry, env, TAG_IMAGEWIDTH, writer);
uint32_t xmin = static_cast<uint32_t>(entry.data.i32[0]);
uint32_t ymin = static_cast<uint32_t>(entry.data.i32[1]);
uint32_t width = static_cast<uint32_t>(entry.data.i32[2]);
uint32_t height = static_cast<uint32_t>(entry.data.i32[3]);
if (entry2.count > 0 && entry2.count == lsmWidth * lsmHeight * 4) {
// GainMap rectangle is relative to the active area origin.
err = builder.addGainMapsForMetadata(lsmWidth,
lsmHeight,
0,
0,
height,
width,
opcodeCfaLayout,
entry2.data.f);
if (err != OK) {
ALOGE("%s: Could not add Lens shading map.", __FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to add lens shading map.");
#endif
return nullptr;
}
}
// Hot pixel map is specific to bayer camera per DNG spec.
if (isBayer) {
// Set up bad pixel correction list
ACameraMetadata_const_entry entry3 = { 0 };
status = ACameraMetadata_getConstEntry(characteristics, ACAMERA_STATISTICS_HOT_PIXEL_MAP, &entry3);
if ((entry3.count % 2) != 0) {
ALOGE("%s: Hot pixel map contains odd number of values, cannot map to pairs!",
__FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to add hotpixel map.");
#endif
return nullptr;
}
// Adjust the bad pixel coordinates to be relative to the origin of the active area DNG tag
std::vector<uint32_t> v;
for (size_t i = 0; i < entry3.count; i += 2) {
int32_t x = entry3.data.i32[i];
int32_t y = entry3.data.i32[i + 1];
x -= static_cast<int32_t>(xmin);
y -= static_cast<int32_t>(ymin);
if (x < 0 || y < 0 || static_cast<uint32_t>(x) >= width ||
static_cast<uint32_t>(y) >= height) {
continue;
}
v.push_back(x);
v.push_back(y);
}
const uint32_t* badPixels = &v[0];
uint32_t badPixelCount = v.size();
if (badPixelCount > 0) {
err = builder.addBadPixelListForMetadata(badPixels, badPixelCount, opcodeCfaLayout);
if (err != OK) {
ALOGE("%s: Could not add hotpixel map.", __FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to add hotpixel map.");
#endif
return nullptr;
}
}
}
if (builder.getCount() > 0) {
size_t listSize = builder.getSize();
uint8_t opcodeListBuf[listSize];
err = builder.buildOpList(opcodeListBuf);
if (err == OK) {
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_OPCODELIST2, listSize,
opcodeListBuf, TIFF_IFD_0), env, TAG_OPCODELIST2, writer);
} else {
ALOGE("%s: Could not build list of opcodes for lens shading map and bad pixel "
"correction.", __FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to construct opcode list for lens shading "
"map and bad pixel correction");
#endif
return nullptr;
}
}
}
{
// Set up opcode List 3
OpcodeListBuilder builder;
status_t err = OK;
// Set up rectilinear distortion correction
std::array<float, 6> distortion = {1.f, 0.f, 0.f, 0.f, 0.f, 0.f};
bool gotDistortion = false;
ACameraMetadata_const_entry entry4 = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_LENS_INTRINSIC_CALIBRATION, &entry4);
if (entry4.count == 5) {
float cx = entry4.data.f[/*c_x*/2];
float cy = entry4.data.f[/*c_y*/3];
// Assuming f_x = f_y, or at least close enough.
// Also assuming s = 0, or at least close enough.
float f = entry4.data.f[/*f_x*/0];
ACameraMetadata_const_entry entry3 = { 0 };
status = ACameraMetadata_getConstEntry(results, ACAMERA_LENS_DISTORTION, &entry3);
if (entry3.count == 5) {
gotDistortion = true;
// Scale the distortion coefficients to create a zoom in warpped image so that all
// pixels are drawn within input image.
for (size_t i = 0; i < entry3.count; i++) {
distortion[i+1] = entry3.data.f[i];
}
if (preWidth == imageWidth && preHeight == imageHeight) {
normalizeLensDistortion(distortion, cx, cy, f, preWidth, preHeight);
} else {
// image size == pixel array size (contains optical black pixels)
// cx/cy is defined in preCorrArray so adding the offset
// Also changes default xmin/ymin so that pixels are only
// sampled within preCorrection array
normalizeLensDistortion(
distortion, cx + preXMin, cy + preYMin, f, preWidth, preHeight,
preXMin, preYMin);
}
float m_x = std::fmaxf(preWidth - cx, cx);
float m_y = std::fmaxf(preHeight - cy, cy);
float m_sq = m_x*m_x + m_y*m_y;
float m = sqrtf(m_sq); // distance to farthest corner from optical center
float f_sq = f * f;
// Conversion factors from Camera2 K factors for new LENS_DISTORTION field
// to DNG spec.
//
// Camera2 / OpenCV assume distortion is applied in a space where focal length
// is factored out, while DNG assumes a normalized space where the distance
// from optical center to the farthest corner is 1.
// Scale from camera2 to DNG spec accordingly.
// distortion[0] is always 1 with the new LENS_DISTORTION field.
const double convCoeff[5] = {
m_sq / f_sq,
pow(m_sq, 2) / pow(f_sq, 2),
pow(m_sq, 3) / pow(f_sq, 3),
m / f,
m / f
};
for (size_t i = 0; i < entry3.count; i++) {
distortion[i+1] *= convCoeff[i];
}
} else {
status = ACameraMetadata_getConstEntry(results, ACAMERA_LENS_RADIAL_DISTORTION, &entry3);
if (entry3.count == 6) {
gotDistortion = true;
// Conversion factors from Camera2 K factors to DNG spec. K factors:
//
// Note: these are necessary because our unit system assumes a
// normalized max radius of sqrt(2), whereas the DNG spec's
// WarpRectilinear opcode assumes a normalized max radius of 1.
// Thus, each K coefficient must include the domain scaling
// factor (the DNG domain is scaled by sqrt(2) to emulate the
// domain used by the Camera2 specification).
const double convCoeff[6] = {
sqrt(2),
2 * sqrt(2),
4 * sqrt(2),
8 * sqrt(2),
2,
2
};
for (size_t i = 0; i < entry3.count; i++) {
distortion[i] = entry3.data.f[i] * convCoeff[i];
}
}
}
if (gotDistortion) {
err = builder.addWarpRectilinearForMetadata(
distortion.data(), preWidth, preHeight, cx, cy);
if (err != OK) {
ALOGE("%s: Could not add distortion correction.", __FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to add distortion correction.");
#endif
return nullptr;
}
}
}
if (builder.getCount() > 0) {
size_t listSize = builder.getSize();
uint8_t opcodeListBuf[listSize];
err = builder.buildOpList(opcodeListBuf);
if (err == OK) {
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_OPCODELIST3, listSize,
opcodeListBuf, TIFF_IFD_0), env, TAG_OPCODELIST3, writer);
} else {
ALOGE("%s: Could not build list of opcodes for distortion correction.",
__FUNCTION__);
#if 0
jniThrowRuntimeException(env, "failed to construct opcode list for distortion"
" correction");
#endif
return nullptr;
}
}
}
{
// Set up orientation tags.
// Note: There's only one orientation field for the whole file, in IFD0
// The main image and any thumbnails therefore have the same orientation.
uint16_t orientation = getOrientation();
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_ORIENTATION, 1, &orientation, TIFF_IFD_0),
env, TAG_ORIENTATION, writer);
}
if (hasDescription()){
// Set Description
std::string description = getDescription();
size_t len = description.size() + 1;
if (writer->addEntry(TAG_IMAGEDESCRIPTION, len,
reinterpret_cast<const uint8_t*>(description.c_str()), TIFF_IFD_0) != OK) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
"Invalid metadata for tag %x", TAG_IMAGEDESCRIPTION);
#endif
}
}
if (hasGpsData()) {
// Set GPS tags
GpsData gpsData = getGpsData();
if (!writer->hasIfd(TIFF_IFD_GPSINFO)) {
if (writer->addSubIfd(TIFF_IFD_0, TIFF_IFD_GPSINFO, TiffWriter::GPSINFO) != OK) {
ALOGE("%s: Failed to add GpsInfo IFD %u to IFD %u", __FUNCTION__, TIFF_IFD_GPSINFO,
TIFF_IFD_0);
#if 0
jniThrowException(env, "java/lang/IllegalStateException", "Failed to add GPSINFO");
#endif
return nullptr;
}
}
{
uint8_t version[] = {2, 3, 0, 0};
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSVERSIONID, 4, version,
TIFF_IFD_GPSINFO), env, TAG_GPSVERSIONID, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSLATITUDEREF,
GpsData::GPS_REF_LENGTH, gpsData.mLatitudeRef, TIFF_IFD_GPSINFO), env,
TAG_GPSLATITUDEREF, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSLONGITUDEREF,
GpsData::GPS_REF_LENGTH, gpsData.mLongitudeRef, TIFF_IFD_GPSINFO), env,
TAG_GPSLONGITUDEREF, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSLATITUDE, 3, gpsData.mLatitude,
TIFF_IFD_GPSINFO), env, TAG_GPSLATITUDE, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSLONGITUDE, 3, gpsData.mLongitude,
TIFF_IFD_GPSINFO), env, TAG_GPSLONGITUDE, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSTIMESTAMP, 3, gpsData.mTimestamp,
TIFF_IFD_GPSINFO), env, TAG_GPSTIMESTAMP, writer);
}
{
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_GPSDATESTAMP,
GpsData::GPS_DATE_LENGTH, gpsData.mDate, TIFF_IFD_GPSINFO), env,
TAG_GPSDATESTAMP, writer);
}
}
if (hasThumbnail()) {
if (!writer->hasIfd(TIFF_IFD_SUB1)) {
if (writer->addSubIfd(TIFF_IFD_0, TIFF_IFD_SUB1) != OK) {
ALOGE("%s: Failed to add SubIFD %u to IFD %u", __FUNCTION__, TIFF_IFD_SUB1,
TIFF_IFD_0);
#if 0
jniThrowException(env, "java/lang/IllegalStateException", "Failed to add SubIFD");
#endif
return nullptr;
}
}
std::vector<uint16_t> tagsToMove;
tagsToMove.push_back(TAG_NEWSUBFILETYPE);
tagsToMove.push_back(TAG_ACTIVEAREA);
tagsToMove.push_back(TAG_BITSPERSAMPLE);
tagsToMove.push_back(TAG_COMPRESSION);
tagsToMove.push_back(TAG_IMAGEWIDTH);
tagsToMove.push_back(TAG_IMAGELENGTH);
tagsToMove.push_back(TAG_PHOTOMETRICINTERPRETATION);
tagsToMove.push_back(TAG_BLACKLEVEL);
tagsToMove.push_back(TAG_BLACKLEVELREPEATDIM);
tagsToMove.push_back(TAG_SAMPLESPERPIXEL);
tagsToMove.push_back(TAG_PLANARCONFIGURATION);
if (isBayer) {
tagsToMove.push_back(TAG_CFAREPEATPATTERNDIM);
tagsToMove.push_back(TAG_CFAPATTERN);
tagsToMove.push_back(TAG_CFAPLANECOLOR);
tagsToMove.push_back(TAG_CFALAYOUT);
}
tagsToMove.push_back(TAG_XRESOLUTION);
tagsToMove.push_back(TAG_YRESOLUTION);
tagsToMove.push_back(TAG_RESOLUTIONUNIT);
tagsToMove.push_back(TAG_WHITELEVEL);
tagsToMove.push_back(TAG_DEFAULTSCALE);
tagsToMove.push_back(TAG_DEFAULTCROPORIGIN);
tagsToMove.push_back(TAG_DEFAULTCROPSIZE);
if (nullptr != writer->getEntry(TAG_OPCODELIST2, TIFF_IFD_0).get()) {
tagsToMove.push_back(TAG_OPCODELIST2);
}
if (nullptr != writer->getEntry(TAG_OPCODELIST3, TIFF_IFD_0).get()) {
tagsToMove.push_back(TAG_OPCODELIST3);
}
if (moveEntries(writer, TIFF_IFD_0, TIFF_IFD_SUB1, tagsToMove) != OK) {
#if 0
jniThrowException(env, "java/lang/IllegalStateException", "Failed to move entries");
#endif
return nullptr;
}
// Setup thumbnail tags
{
// Set photometric interpretation
uint16_t interpretation = 2; // RGB
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_PHOTOMETRICINTERPRETATION, 1,
&interpretation, TIFF_IFD_0), env, TAG_PHOTOMETRICINTERPRETATION, writer);
}
{
// Set planar configuration
uint16_t config = 1; // Chunky
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_PLANARCONFIGURATION, 1, &config,
TIFF_IFD_0), env, TAG_PLANARCONFIGURATION, writer);
}
{
// Set samples per pixel
uint16_t samples = SAMPLES_PER_RGB_PIXEL;
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_SAMPLESPERPIXEL, 1, &samples,
TIFF_IFD_0), env, TAG_SAMPLESPERPIXEL, writer);
}
{
// Set bits per sample
uint16_t bits[SAMPLES_PER_RGB_PIXEL];
for (int i = 0; i < SAMPLES_PER_RGB_PIXEL; i++) bits[i] = BITS_PER_RGB_SAMPLE;
BAIL_IF_INVALID_RET_NULL_SP(
writer->addEntry(TAG_BITSPERSAMPLE, SAMPLES_PER_RGB_PIXEL, bits, TIFF_IFD_0),
env, TAG_BITSPERSAMPLE, writer);
}
{
// Set subfiletype
uint32_t subfileType = 1; // Thumbnail image
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_NEWSUBFILETYPE, 1, &subfileType,
TIFF_IFD_0), env, TAG_NEWSUBFILETYPE, writer);
}
{
// Set compression
uint16_t compression = 1; // None
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_COMPRESSION, 1, &compression,
TIFF_IFD_0), env, TAG_COMPRESSION, writer);
}
{
// Set dimensions
uint32_t uWidth = getThumbnailWidth();
uint32_t uHeight = getThumbnailHeight();
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_IMAGEWIDTH, 1, &uWidth, TIFF_IFD_0),
env, TAG_IMAGEWIDTH, writer);
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_IMAGELENGTH, 1, &uHeight, TIFF_IFD_0),
env, TAG_IMAGELENGTH, writer);
}
{
// x resolution
uint32_t xres[] = { 72, 1 }; // default 72 ppi
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_XRESOLUTION, 1, xres, TIFF_IFD_0),
env, TAG_XRESOLUTION, writer);
// y resolution
uint32_t yres[] = { 72, 1 }; // default 72 ppi
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_YRESOLUTION, 1, yres, TIFF_IFD_0),
env, TAG_YRESOLUTION, writer);
uint16_t unit = 2; // inches
BAIL_IF_INVALID_RET_NULL_SP(writer->addEntry(TAG_RESOLUTIONUNIT, 1, &unit, TIFF_IFD_0),
env, TAG_RESOLUTIONUNIT, writer);
}
}
if (writer->addStrip(TIFF_IFD_0) != OK) {
ALOGE("%s: Could not setup thumbnail strip tags.", __FUNCTION__);
#if 0
jniThrowException(env, "java/lang/IllegalStateException",
"Failed to setup thumbnail strip tags.");
#endif
return nullptr;
}
if (writer->hasIfd(TIFF_IFD_SUB1)) {
if (writer->addStrip(TIFF_IFD_SUB1) != OK) {
ALOGE("%s: Could not main image strip tags.", __FUNCTION__);
#if 0
jniThrowException(env, "java/lang/IllegalStateException",
"Failed to setup main image strip tags.");
#endif
return nullptr;
}
}
return writer;
}
void DngCreator::setGpsTags(const std::vector<int>& latTag,
const std::string& latRef, const std::vector<int>& longTag, const std::string& longRef, const std::string& dateTag, const std::vector<int>& timeTag) {
ALOGV("%s:", __FUNCTION__);
GpsData data;
size_t latLen = latTag.size();
size_t longLen = longTag.size();
size_t timeLen = timeTag.size();
if (latLen != GpsData::GPS_VALUE_LENGTH) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"invalid latitude tag length");
#endif
return;
} else if (longLen != GpsData::GPS_VALUE_LENGTH) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"invalid longitude tag length");
#endif
return;
} else if (timeLen != GpsData::GPS_VALUE_LENGTH) {
#if 0
jniThrowException(env, "java/lang/IllegalArgumentException",
"invalid time tag length");
#endif
return;
}
memcpy(&data.mLatitude, &latTag[0], sizeof(int) * GpsData::GPS_VALUE_LENGTH);
memcpy(&data.mLongitude, &longTag[0], sizeof(int) * GpsData::GPS_VALUE_LENGTH);
memcpy(&data.mTimestamp, &timeTag[0], sizeof(int) * GpsData::GPS_VALUE_LENGTH);
memcpy(&data.mLatitudeRef, latRef.c_str(), 1);
data.mLatitudeRef[GpsData::GPS_REF_LENGTH - 1] = '\0';
memcpy(&data.mLongitudeRef, longRef.c_str(), 1);
data.mLongitudeRef[GpsData::GPS_REF_LENGTH - 1] = '\0';
memcpy(&data.mDate, dateTag.c_str(), GpsData::GPS_DATE_LENGTH - 1);
data.mDate[GpsData::GPS_DATE_LENGTH - 1] = '\0';
setGpsData(data);
}
// TODO: Refactor out common preamble for the two nativeWrite methods.
void DngCreator::writeImage(std::vector<uint8_t>& outStream, uint32_t uWidth,
uint32_t uHeight, const std::vector<uint8_t>& inBuffer, int rowStride, int pixStride, uint64_t uOffset, bool isDirect) {
ALOGV("%s:", __FUNCTION__);
ALOGV("%s: nativeWriteImage called with: width=%d, height=%d, "
"rowStride=%d, pixStride=%d, offset=%" PRId64, __FUNCTION__, uWidth,
uHeight, rowStride, pixStride, uOffset);
uint32_t rStride = static_cast<uint32_t>(rowStride);
uint32_t pStride = static_cast<uint32_t>(pixStride);
std::vector<uint8_t>& out = outStream;
// sp<JniOutputStream> out = new JniOutputStream(env, outStream);
// if(env->ExceptionCheck()) {
// ALOGE("%s: Could not allocate buffers for output stream", __FUNCTION__);
// return;
// }
sp<TiffWriter> writer = setup(uWidth, uHeight);
if (writer.get() == nullptr) {
return;
}
// Validate DNG size
if (!validateDngHeader(writer, getCharacteristics(), uWidth, uHeight)) {
return;
}
// sp<JniInputByteBuffer> inBuf;
std::vector<StripSource*> sources;
sp<DirectStripSource> thumbnailSource;
uint32_t targetIfd = TIFF_IFD_0;
bool hasThumbnail = writer->hasIfd(TIFF_IFD_SUB1);
if (hasThumbnail) {
#if 0
ALOGV("%s: Adding thumbnail strip sources.", __FUNCTION__);
uint32_t bytesPerPixel = SAMPLES_PER_RGB_PIXEL * BYTES_PER_RGB_SAMPLE;
uint32_t thumbWidth = getThumbnailWidth();
thumbnailSource = new DirectStripSource(env, getThumbnail(), TIFF_IFD_0,
thumbWidth, context->getThumbnailHeight(), bytesPerPixel,
bytesPerPixel * thumbWidth, /*offset*/0, BYTES_PER_RGB_SAMPLE,
SAMPLES_PER_RGB_PIXEL);
sources.push_back(thumbnailSource.get());
targetIfd = TIFF_IFD_SUB1;
#endif
}
if (isDirect) {
size_t fullSize = rStride * uHeight;
jlong capacity = inBuffer.size();
if (capacity < 0 || fullSize + uOffset > static_cast<uint64_t>(capacity)) {
#if 0
jniThrowExceptionFmt(env, "java/lang/IllegalStateException",
"Invalid size %d for Image, size given in metadata is %d at current stride",
capacity, fullSize);
#endif
return;
}
uint8_t* pixelBytes = (uint8_t*)&inBuffer[0];
ALOGV("%s: Using direct-type strip source.", __FUNCTION__);
DirectStripSource stripSource(pixelBytes, targetIfd, uWidth, uHeight, pStride,
rStride, uOffset, BYTES_PER_SAMPLE, SAMPLES_PER_RAW_PIXEL);
sources.push_back(&stripSource);
status_t ret = OK;
ByteVectorOutput byteVectorOutput(outStream);
if ((ret = writer->write(&byteVectorOutput, &sources[0], sources.size())) != OK) {
ALOGE("%s: write failed with error %d.", __FUNCTION__, ret);
#if 0
if (!env->ExceptionCheck()) {
jniThrowExceptionFmt(env, "java/io/IOException",
"Encountered error %d while writing file.", ret);
}
#endif
return;
}
} else {
int aa = 0;
// inBuf = new JniInputByteBuffer(env, inBuffer);
#if 0
ALOGV("%s: Using input-type strip source.", __FUNCTION__);
InputStripSource stripSource(*inBuf, targetIfd, uWidth, uHeight, pStride,
rStride, uOffset, BYTES_PER_SAMPLE, SAMPLES_PER_RAW_PIXEL);
sources.push_back(&stripSource);
status_t ret = OK;
if ((ret = writer->write(out.get(), &sources[0], sources.size())) != OK) {
ALOGE("%s: write failed with error %d.", __FUNCTION__, ret);
#if 0
if (!env->ExceptionCheck()) {
jniThrowExceptionFmt(env, "java/io/IOException",
"Encountered error %d while writing file.", ret);
}
#endif
return;
}
#endif
}
}
void DngCreator::writeInputStream(std::vector<uint8_t>& outStream,
const std::vector<uint8_t>& inStream, uint32_t uWidth, uint32_t uHeight, long offset) {
ALOGV("%s:", __FUNCTION__);
uint32_t rowStride = uWidth * BYTES_PER_SAMPLE;
uint32_t pixStride = BYTES_PER_SAMPLE;
uint64_t uOffset = static_cast<uint32_t>(offset);
ALOGV("%s: nativeWriteInputStream called with: width=%u, height=%u, "
"rowStride=%d, pixStride=%d, offset=%" PRId64, __FUNCTION__, uWidth,
uHeight, rowStride, pixStride, offset);
ByteVectorOutput out(outStream);
// std::vector<uint8_t>& out = outStream;
sp<TiffWriter> writer = setup(uWidth, uHeight);
if (writer.get() == nullptr) {
return;
}
// Validate DNG size
if (!validateDngHeader(writer, getCharacteristics(), uWidth, uHeight)) {
return;
}
sp<DirectStripSource> thumbnailSource;
uint32_t targetIfd = TIFF_IFD_0;
bool hasThumbnail = writer->hasIfd(TIFF_IFD_SUB1);
std::vector<StripSource*> sources;
if (hasThumbnail)
{
#if 0
ALOGV("%s: Adding thumbnail strip sources.", __FUNCTION__);
uint32_t bytesPerPixel = SAMPLES_PER_RGB_PIXEL * BYTES_PER_RGB_SAMPLE;
uint32_t width = getThumbnailWidth();
thumbnailSource = new DirectStripSource(getThumbnail(), TIFF_IFD_0,
width, getThumbnailHeight(), bytesPerPixel,
bytesPerPixel * width, /*offset*/0, BYTES_PER_RGB_SAMPLE,
SAMPLES_PER_RGB_PIXEL);
sources.pus_back(thumbnailSource.get());
targetIfd = TIFF_IFD_SUB1;
#endif
}
// sp<JniInputStream> in = new JniInputStream(env, inStream);
ByteVectorInput in(inStream);
ALOGV("%s: Using input-type strip source.", __FUNCTION__);
InputStripSource stripSource(in, targetIfd, uWidth, uHeight, pixStride,
rowStride, uOffset, BYTES_PER_SAMPLE, SAMPLES_PER_RAW_PIXEL);
sources.push_back(&stripSource);
status_t ret = OK;
if ((ret = writer->write(&out, &sources[0], sources.size())) != OK) {
ALOGE("%s: write failed with error %d.", __FUNCTION__, ret);
#if 0
if (!env->ExceptionCheck()) {
jniThrowExceptionFmt(env, "java/io/IOException",
"Encountered error %d while writing file.", ret);
}
#endif
return;
}
}
void DngCreator::writeInputBuffer(std::vector<uint8_t>& outStream,
const uint8_t* inBuffer, size_t bufferLength, uint32_t uWidth, uint32_t uHeight, long offset) {
ALOGV("%s:", __FUNCTION__);
uint32_t rowStride = uWidth * BYTES_PER_SAMPLE;
uint32_t pixStride = BYTES_PER_SAMPLE;
uint64_t uOffset = static_cast<uint32_t>(offset);
ALOGV("%s: nativeWriteInputStream called with: width=%u, height=%u, "
"rowStride=%d, pixStride=%d, offset=%" PRId64, __FUNCTION__, uWidth,
uHeight, rowStride, pixStride, offset);
ByteVectorOutput out(outStream);
// std::vector<uint8_t>& out = outStream;
sp<TiffWriter> writer = setup(uWidth, uHeight);
if (writer.get() == nullptr) {
return;
}
// Validate DNG size
if (!validateDngHeader(writer, getCharacteristics(), uWidth, uHeight)) {
return;
}
sp<DirectStripSource> thumbnailSource;
uint32_t targetIfd = TIFF_IFD_0;
bool hasThumbnail = writer->hasIfd(TIFF_IFD_SUB1);
std::vector<StripSource*> sources;
if (hasThumbnail)
{
#if 0
ALOGV("%s: Adding thumbnail strip sources.", __FUNCTION__);
uint32_t bytesPerPixel = SAMPLES_PER_RGB_PIXEL * BYTES_PER_RGB_SAMPLE;
uint32_t width = getThumbnailWidth();
thumbnailSource = new DirectStripSource(getThumbnail(), TIFF_IFD_0,
width, getThumbnailHeight(), bytesPerPixel,
bytesPerPixel * width, /*offset*/0, BYTES_PER_RGB_SAMPLE,
SAMPLES_PER_RGB_PIXEL);
sources.push_back(thumbnailSource.get());
targetIfd = TIFF_IFD_SUB1;
#endif
}
// sp<JniInputStream> in = new JniInputStream(env, inStream);
ByteBufferInput in(inBuffer, bufferLength);
ALOGV("%s: Using input-type strip source.", __FUNCTION__);
InputStripSource stripSource(in, targetIfd, uWidth, uHeight, pixStride,
rowStride, uOffset, BYTES_PER_SAMPLE, SAMPLES_PER_RAW_PIXEL);
sources.push_back(&stripSource);
status_t ret = OK;
if ((ret = writer->write(&out, &sources[0], sources.size())) != OK) {
ALOGE("%s: write failed with error %d.", __FUNCTION__, ret);
#if 0
if (!env->ExceptionCheck()) {
jniThrowExceptionFmt(env, "java/io/IOException",
"Encountered error %d while writing file.", ret);
}
#endif
return;
}
}