实现RawBurstCapture

TempBranch
Matthew 9 months ago
parent 9c9a122034
commit 72f595b7a3

@ -28,6 +28,8 @@ add_definitions(-DASIO_STANDALONE)
add_definitions(-DUSING_XY_EXTENSION) add_definitions(-DUSING_XY_EXTENSION)
# add_definitions(-DUSING_BREAK_PAD) # add_definitions(-DUSING_BREAK_PAD)
add_definitions(-DSQLITE_THREADSAFE=1) add_definitions(-DSQLITE_THREADSAFE=1)
add_definitions(-DLIBRAW_NO_MEMPOOL_CHECK=1)
# add_definitions(-DHDRPLUS_NO_DETAILED_OUTPUT=1)
add_definitions(-DHAVE_STRING_H) # for memcpy in md5.c add_definitions(-DHAVE_STRING_H) # for memcpy in md5.c
add_definitions(-DUSING_NRSEC) add_definitions(-DUSING_NRSEC)
add_definitions(-DUSING_NRSEC_VPN) add_definitions(-DUSING_NRSEC_VPN)
@ -38,6 +40,8 @@ add_definitions(-DUSING_NRSEC_VPN)
add_definitions(-DALIGN_HB_TIMER_TO_PHOTO) add_definitions(-DALIGN_HB_TIMER_TO_PHOTO)
add_definitions(-DENABLE_3V3_ALWAYS) add_definitions(-DENABLE_3V3_ALWAYS)
add_definitions(-DUSING_HDRPLUS)
#add_definitions(-DUSING_N938) #add_definitions(-DUSING_N938)
# include_directories(${OpenCV_DIR}/include) # include_directories(${OpenCV_DIR}/include)
@ -70,10 +74,39 @@ find_package(ncnn REQUIRED)
# include(mars/src/CMakeUtils.txt) # include(mars/src/CMakeUtils.txt)
message(WARNING "include_directories ${HDRPLUS_ROOT}/${ANDROID_ABI}/include")
include_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/include) include_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/include)
link_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/lib) link_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/lib)
SET(HDRPLUS_LIBS hdrplus) # message(WARNING "exiv2_DIR=${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2")
# SET(exiv2_DIR ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# list(APPEND CMAKE_PREFIX_PATH ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# find_package(exiv2 REQUIRED CONFIG NAMES exiv2)
# message(STATUS "Found Exiv2 and linked")
# OpenMP
find_package(OpenMP REQUIRED)
# library
include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/hdrplus/include )
SET(HDRPLUS_LIBS raw exiv2 lcms2 OpenMP::OpenMP_CXX)
SET(HDRPLUS_SOURCES
hdrplus/src/align.cpp
hdrplus/src/bayer_image.cpp
hdrplus/src/burst.cpp
hdrplus/src/finish.cpp
hdrplus/src/hdrplus_pipeline.cpp
hdrplus/src/merge.cpp
hdrplus/src/params.cpp
)
SET(YAMC_INC_DIR ${CMAKE_SOURCE_DIR}) SET(YAMC_INC_DIR ${CMAKE_SOURCE_DIR})
@ -291,6 +324,7 @@ add_library( # Sets the name of the library.
# camera2/OpenCVFont.cpp # camera2/OpenCVFont.cpp
${HDRPLUS_SOURCES}
${CAMERA2_SOURCES} ${CAMERA2_SOURCES}
${TERM_CORE_ROOT}/Factory.cpp ${TERM_CORE_ROOT}/Factory.cpp
@ -350,9 +384,8 @@ find_library( # Sets the name of the path variable.
# can link multiple libraries, such as libraries you define in this # can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries. # build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library. target_link_libraries( # Specifies the target library.
microphoto ${PROJECT_NAME}
jsoncpp jsoncpp

@ -13,6 +13,8 @@
#include <android/multinetwork.h> #include <android/multinetwork.h>
#define NRSEC_PATH "/dev/spidev0.0" #define NRSEC_PATH "/dev/spidev0.0"
#ifdef USING_BREAK_PAD #ifdef USING_BREAK_PAD
@ -771,6 +773,39 @@ Java_com_xypower_mpapp_MicroPhotoService_captureFinished(
} }
} }
extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_burstCaptureFinished(
JNIEnv* env,
jobject pThis, jlong handler, jboolean result, jint numberOfCaptures, jstring pathsJoinedByTab, jlong photoId) {
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
{
return;
}
/// HDRPlus
#ifdef USING_HDRPLUS
#endif
IDevice* dev = pTerminal->GetDevice();
if (dev != NULL)
{
if (result == JNI_FALSE)
{
cv::Mat mat;
((CPhoneDevice *)dev)->OnCaptureReady(true, false, mat, (unsigned long)photoId);
return;
}
const char* pathsStr = env->GetStringUTFChars(pathsJoinedByTab, 0);
((CPhoneDevice *)dev)->ProcessRawCapture(result != JNI_FALSE, numberOfCaptures, MakeString(pathsStr), photoId);
env->ReleaseStringUTFChars(pathsJoinedByTab, pathsStr);
}
}
extern "C" JNIEXPORT void JNICALL extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_recordingFinished( Java_com_xypower_mpapp_MicroPhotoService_recordingFinished(
JNIEnv* env, JNIEnv* env,
@ -802,7 +837,7 @@ Java_com_xypower_mpapp_MicroPhotoService_recordingFinished(
} }
extern "C" JNIEXPORT void JNICALL extern "C" JNIEXPORT jboolean JNICALL
Java_com_xypower_mpapp_MicroPhotoService_reloadConfigs( Java_com_xypower_mpapp_MicroPhotoService_reloadConfigs(
JNIEnv* env, JNIEnv* env,
jobject pThis, jlong handler) { jobject pThis, jlong handler) {
@ -810,10 +845,11 @@ Java_com_xypower_mpapp_MicroPhotoService_reloadConfigs(
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler); CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL) if (pTerminal == NULL)
{ {
return; return JNI_FALSE;
} }
pTerminal->LoadAppConfigs(); bool res = pTerminal->LoadAppConfigs();
return res ? JNI_TRUE : JNI_FALSE;
} }

@ -23,6 +23,10 @@
#include <sys/system_properties.h> #include <sys/system_properties.h>
#include <mat.h> #include <mat.h>
#ifdef USING_HDRPLUS
#include <hdrplus/hdrplus_pipeline.h>
#endif
#include <fcntl.h> #include <fcntl.h>
#include <filesystem> #include <filesystem>
namespace fs = std::filesystem; namespace fs = std::filesystem;
@ -40,6 +44,32 @@ extern bool GetJniEnv(JavaVM *vm, JNIEnv **env, bool& didAttachThread);
static const int kMaxChannelValue = 262143; static const int kMaxChannelValue = 262143;
cv::Mat convert16bit2_8bit_(cv::Mat ans){
if(ans.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(255.0/USHRT_MAX);
(*it)[1] *=(255.0/USHRT_MAX);
(*it)[2] *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC3);
}else if(ans.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC1);
}else{
// std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
static long getFreeMemoryImpl(const char* const sums[], const size_t sumsLen[], size_t num) static long getFreeMemoryImpl(const char* const sums[], const size_t sumsLen[], size_t num)
{ {
int fd = open("/proc/meminfo", O_RDONLY | O_CLOEXEC); int fd = open("/proc/meminfo", O_RDONLY | O_CLOEXEC);
@ -1987,6 +2017,36 @@ void CPhoneDevice::UpdateSimcard(const std::string& simcard)
m_simcard = simcard; m_simcard = simcard;
} }
bool CPhoneDevice::ProcessRawCapture(bool result, int numberOfCaptures, const std::string& pathsJoinedByTab, long photoId)
{
std::vector<std::string> paths = split(pathsJoinedByTab, "\t");
if (paths.empty())
{
cv::Mat mat;
OnCaptureReady(true, false, mat, (unsigned long)photoId);
return false;
}
XYLOG(XYLOG_SEVERITY_ERROR, "Start Processing Raw Capture CH=%u IMGID=%u", (uint32_t)mPhotoInfo.channel, (uint32_t)mPhotoInfo.photoId);
hdrplus::hdrplus_pipeline pipeline;
cv::Mat mat;
pipeline.run_pipeline(paths, 0, mat);
mat = convert16bit2_8bit_(mat.clone());
cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
XYLOG(XYLOG_SEVERITY_ERROR, "Finish Processing Raw Capture CH=%u IMGID=%u", (uint32_t)mPhotoInfo.channel, (uint32_t)mPhotoInfo.photoId);
#ifdef _DEBUG
// cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite("/sdcard/com.xypower.mpapp/tmp/final.jpg", mat);
#endif
OnCaptureReady(true, result != JNI_FALSE, mat, (unsigned long)photoId);
return true;
}
int CPhoneDevice::GetIceData(IDevice::ICE_INFO *iceInfo, IDevice::ICE_TAIL *iceTail, SENSOR_PARAM *sensorParam) int CPhoneDevice::GetIceData(IDevice::ICE_INFO *iceInfo, IDevice::ICE_TAIL *iceTail, SENSOR_PARAM *sensorParam)
{ {

@ -221,6 +221,7 @@ public:
void UpdatePosition(double lon, double lat, double radius, time_t ts); void UpdatePosition(double lon, double lat, double radius, time_t ts);
bool OnVideoReady(bool photoOrVideo, bool result, const char* path, unsigned int photoId); bool OnVideoReady(bool photoOrVideo, bool result, const char* path, unsigned int photoId);
bool OnCaptureReady(bool photoOrVideo, bool result, cv::Mat& mat, unsigned int photoId); bool OnCaptureReady(bool photoOrVideo, bool result, cv::Mat& mat, unsigned int photoId);
bool ProcessRawCapture(bool result, int numberOfCaptures, const std::string& pathsJoinedByTab, long photoId);
void UpdateSignalLevel(int signalLevel); void UpdateSignalLevel(int signalLevel);
void UpdateTfCardPath(const std::string& tfCardPath) void UpdateTfCardPath(const std::string& tfCardPath)

@ -80,7 +80,7 @@ void onCaptureFailed(void* context, ACameraCaptureSession* session, ACaptureRequ
void onCaptureSequenceCompleted(void* context, ACameraCaptureSession* session, int sequenceId, int64_t frameNumber) void onCaptureSequenceCompleted(void* context, ACameraCaptureSession* session, int sequenceId, int64_t frameNumber)
{ {
ALOGD("onCaptureSequenceCompleted %p %d %ld", session, sequenceId, frameNumber); ALOGD("onCaptureSequenceCompleted %p %d %lld", session, sequenceId, frameNumber);
} }
void onCaptureSequenceAborted(void* context, ACameraCaptureSession* session, int sequenceId) void onCaptureSequenceAborted(void* context, ACameraCaptureSession* session, int sequenceId)

@ -0,0 +1,38 @@
#pragma once
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
namespace hdrplus
{
class align
{
public:
align() = default;
~align() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param aligements alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontle & verticle tiles
*/
void process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& aligements );
private:
// From original image to coarse image
const std::vector<int> inv_scale_factors = { 1, 2, 4, 4 };
const std::vector<int> distances = { 1, 2, 2, 2 }; // L1 / L2 distance
const std::vector<int> grayimg_search_radious = { 1, 4, 4, 4 };
const std::vector<int> grayimg_tile_sizes = { 16, 16, 16, 8 };
const int num_levels = 4;
};
} // namespace hdrplus

@ -0,0 +1,35 @@
#pragma once
#include <string>
#include <vector>
#include <utility> // std::pair
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class bayer_image
{
public:
explicit bayer_image( const std::string& bayer_image_path );
~bayer_image() = default;
std::pair<double, double> get_noise_params() const;
std::shared_ptr<LibRaw> libraw_processor;
cv::Mat raw_image;
cv::Mat grayscale_image;
int width;
int height;
int white_level;
std::vector<int> black_level_per_channel;
float iso;
private:
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
};
} // namespace hdrplus

@ -0,0 +1,43 @@
#pragma once
#include <vector>
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/bayer_image.h"
namespace hdrplus
{
class burst
{
public:
explicit burst( const std::string& burst_path, const std::string& reference_image_path );
explicit burst(const std::vector<std::string>& burst_paths, int reference_image_index);
~burst() = default;
// Reference image index in the array
int reference_image_idx;
// Source bayer images & grayscale unpadded image
std::vector<hdrplus::bayer_image> bayer_images;
// Image padded to upper level tile size (16*2)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> bayer_images_pad;
// Padding information
std::vector<int> padding_info_bayer;
// Image padded to upper level tile size (16)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> grayscale_images_pad;
// number of image (including reference) in burst
int num_images;
// Bayer image after merging, stored as cv::Mat
cv::Mat merged_bayer_image;
};
} // namespace hdrplus

@ -0,0 +1,240 @@
#pragma once
#include <opencv2/opencv.hpp> // all opencv header
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <unordered_map>
#include <hdrplus/bayer_image.h>
#include <dirent.h>
#include <hdrplus/params.h>
#include <hdrplus/burst.h>
namespace hdrplus
{
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat gammasRGB(cv::Mat img, bool mode);
class finish
{
public:
cv::Mat mergedBayer; // merged image from Merge Module
std::string burstPath; // path to burst images
std::vector<std::string> rawPathList; // a list or array of the path to all burst imgs under burst Path
int refIdx; // index of the reference img
Parameters params;
cv::Mat rawReference;
// LibRaw libraw_processor_finish;
bayer_image* refBayer;
std::string mergedImgPath;
finish() = default;
// please use this initialization after merging part finish
finish(std::string burstPath, cv::Mat mergedBayer,int refIdx){
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = mergedBayer;
}
// for local testing only
finish(std::string burstPath, std::string mergedBayerPath,int refIdx){
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = loadFromCSV(mergedBayerPath, CV_16UC1);//
load_rawPathList(burstPath);
refBayer= new bayer_image(this->rawPathList[refIdx]);
this->rawReference = refBayer->raw_image;//;grayscale_image
// initialize parameters in libraw_processor_finish
setLibRawParams();
showParams();
std::cout<<"Finish init() finished!"<<std::endl;
}
~finish() = default;
// finish pipeline func
// void process(std::string burstPath, cv::Mat mergedBayer,int refIdx);
void process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage);
// replace Mat a with Mat b
void copy_mat_16U(cv::Mat& A, cv::Mat B);
void copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B);
// postprocess
// cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr);
void showImg(cv::Mat img)
{
int ch = CV_MAT_CN(CV_8UC1);
// cv::Mat tmp(4208,3120,CV_16UC1);
cv::Mat tmp(img);
// u_int16_t* ptr_tmp = (u_int16_t*)tmp.data;
// u_int16_t* ptr_img = (u_int16_t*)img.data;
// // col major to row major
// for(int r = 0; r < tmp.rows; r++) {
// for(int c = 0; c < tmp.cols; c++) {
// *(ptr_tmp+r*tmp.cols+c) = *(ptr_img+c*tmp.rows+r)/2048.0*255.0;
// }
// }
// std::cout<<"height="<<tmp.rows<<std::endl;
// std::cout<<"width="<<tmp.cols<<std::endl;
// cv::transpose(tmp, tmp);
u_int16_t* ptr = (u_int16_t*)tmp.data;
for(int r = 0; r < tmp.rows; r++) {
for(int c = 0; c < tmp.cols; c++) {
*(ptr+r*tmp.cols+c) = *(ptr+r*tmp.cols+c)/2048.0*255.0;
}
}
tmp = tmp.reshape(ch);
tmp.convertTo(tmp, CV_8UC1);
cv::imshow("test",tmp);
cv::imwrite("test2.jpg", tmp);
cv::waitKey(0);
std::cout<< this->mergedBayer.size()<<std::endl;
}
void showMat(cv::Mat img){
std::cout<<"size="<<img.size()<<std::endl;
std::cout<<"type="<<img.type()<<std::endl;
}
void showParams()
{
std::cout<<"Parameters:"<<std::endl;
std::cout<<"tuning_ltmGain = "<<this->params.tuning.ltmGain<<std::endl;
std::cout<<"tuning_gtmContrast = "<<this->params.tuning.gtmContrast<<std::endl;
for(auto key_val:this->params.flags){
std::cout<<key_val.first<<","<<key_val.second<<std::endl;
}
std::cout<<"demosaic_algorithm = "<<refBayer->libraw_processor->imgdata.params.user_qual<<std::endl;
std::cout<<"half_size = "<<refBayer->libraw_processor->imgdata.params.half_size<<std::endl;
std::cout<<"use_camera_wb = "<<refBayer->libraw_processor->imgdata.params.use_camera_wb<<std::endl;
std::cout<<"use_auto_wb = "<<refBayer->libraw_processor->imgdata.params.use_auto_wb<<std::endl;
std::cout<<"no_auto_bright = "<<refBayer->libraw_processor->imgdata.params.no_auto_bright<<std::endl;
std::cout<<"output_color = "<<refBayer->libraw_processor->imgdata.params.output_color <<std::endl;
std::cout<<"gamma[0] = "<<refBayer->libraw_processor->imgdata.params.gamm[0]<<std::endl;
std::cout<<"gamma[1] = "<<refBayer->libraw_processor->imgdata.params.gamm[1]<<std::endl;
std::cout<<"output_bps = "<<refBayer->libraw_processor->imgdata.params.output_bps<<std::endl;
// std::cout<<"demosaic_algorithm = "<<libraw_processor_finish.imgdata.params.user_qual<<std::endl;
// std::cout<<"half_size = "<<libraw_processor_finish.imgdata.params.half_size<<std::endl;
// std::cout<<"use_camera_wb = "<<libraw_processor_finish.imgdata.params.use_camera_wb<<std::endl;
// std::cout<<"use_auto_wb = "<<libraw_processor_finish.imgdata.params.use_auto_wb<<std::endl;
// std::cout<<"no_auto_bright = "<<libraw_processor_finish.imgdata.params.no_auto_bright<<std::endl;
// std::cout<<"output_color = "<<libraw_processor_finish.imgdata.params.output_color <<std::endl;
// std::cout<<"gamma[0] = "<<libraw_processor_finish.imgdata.params.gamm[0]<<std::endl;
// std::cout<<"gamma[1] = "<<libraw_processor_finish.imgdata.params.gamm[1]<<std::endl;
// std::cout<<"output_bps = "<<libraw_processor_finish.imgdata.params.output_bps<<std::endl;
std::cout<<"===================="<<std::endl;
}
void showRawPathList(){
std::cout<<"RawPathList:"<<std::endl;
for(auto pth:this->rawPathList){
std::cout<<pth<<std::endl;
}
std::cout<<"===================="<<std::endl;
}
private:
cv::Mat loadFromCSV(const std::string& path, int opencv_type)
{
cv::Mat m;
std::ifstream csvFile (path);
std::string line;
while (getline(csvFile, line))
{
std::vector<int> dvals;
std::stringstream ss(line);
std::string val;
// int count=0;
while (getline(ss, val, ','))
{
dvals.push_back(stod(val));//*255.0/2048.0
// count++;
}
// std::cout<<count<<std::endl;
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
int ch = CV_MAT_CN(opencv_type);
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void load_rawPathList(std::string burstPath){
DIR *pDir; // pointer to root
struct dirent *ptr;
if (!(pDir = opendir(burstPath.c_str()))) {
std::cout<<"root dir not found!"<<std::endl;
return;
}
while ((ptr = readdir(pDir)) != nullptr) {
// ptr will move to the next file automatically
std::string sub_file = burstPath + "/" + ptr->d_name; // current filepath that ptr points to
if (ptr->d_type != 8 && ptr->d_type != 4) { // not normal file or dir
return;
}
// only need normal files
if (ptr->d_type == 8) {
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
if (strstr(ptr->d_name, ".dng")) {
rawPathList.emplace_back(sub_file);
}
}
}
}
// close root dir
closedir(pDir);
}
void setLibRawParams(){
refBayer->libraw_processor->imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
refBayer->libraw_processor->imgdata.params.half_size = params.rawpyArgs.half_size;
refBayer->libraw_processor->imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
refBayer->libraw_processor->imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
refBayer->libraw_processor->imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
refBayer->libraw_processor->imgdata.params.output_color = params.rawpyArgs.output_color;
refBayer->libraw_processor->imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
refBayer->libraw_processor->imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
refBayer->libraw_processor->imgdata.params.output_bps = params.rawpyArgs.output_bps;
// libraw_processor_finish.imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
// libraw_processor_finish.imgdata.params.half_size = params.rawpyArgs.half_size;
// libraw_processor_finish.imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
// libraw_processor_finish.imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
// libraw_processor_finish.imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
// libraw_processor_finish.imgdata.params.output_color = params.rawpyArgs.output_color;
// libraw_processor_finish.imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
// libraw_processor_finish.imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
// libraw_processor_finish.imgdata.params.output_bps = params.rawpyArgs.output_bps;
}
};
} // namespace hdrplus

@ -0,0 +1,27 @@
#pragma once
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
namespace hdrplus
{
class hdrplus_pipeline
{
private:
hdrplus::align align_module;
hdrplus::merge merge_module;
hdrplus::finish finish_module;
public:
void run_pipeline( const std::string& burst_path, const std::string& reference_image_path );
bool run_pipeline( const std::vector<std::string>& burst_paths, int reference_image_index, cv::Mat& finalImg );
hdrplus_pipeline() = default;
~hdrplus_pipeline() = default;
};
} // namespace hdrplus

@ -0,0 +1,184 @@
#pragma once
#include <vector>
#include <opencv2/opencv.hpp> // all opencv header
#include <cmath>
#include "hdrplus/burst.h"
#define TILE_SIZE 16
#define TEMPORAL_FACTOR 75
#define SPATIAL_FACTOR 0.1
namespace hdrplus
{
class merge
{
public:
int offset = TILE_SIZE / 2;
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
merge() = default;
~merge() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param alignments alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontal & vertical tiles
*/
void process( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments);
/*
std::vector<cv::Mat> get_other_tiles(); //return the other tile list T_1 to T_n
std::vector<cv::Mat> vector_math(string operation, reference_tile, other_tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> scalar_vector_math(string operation, scalar num, std::vector<cv::Mat> tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> average_vector(std::vector<cv::Mat> tile_list); //take average of vector elements
*/
private:
float tileRMS(cv::Mat tile) {
cv::Mat squared;
cv::multiply(tile, tile, squared);
return sqrt(cv::mean(squared)[0]);
}
std::vector<float> getNoiseVariance(std::vector<cv::Mat> tiles, float lambda_shot, float lambda_read) {
std::vector<float> noise_variance;
for (auto tile : tiles) {
noise_variance.push_back(lambda_shot * tileRMS(tile) + lambda_read);
}
return noise_variance;
}
cv::Mat cosineWindow1D(cv::Mat input, int window_size = TILE_SIZE) {
cv::Mat output = input.clone();
for (int i = 0; i < input.cols; ++i) {
output.at<float>(0, i) = 1. / 2. - 1. / 2. * cos(2 * M_PI * (input.at<float>(0, i) + 1 / 2.) / window_size);
}
return output;
}
cv::Mat cosineWindow2D(cv::Mat tile) {
int window_size = tile.rows; // Assuming square tile
cv::Mat output_tile = tile.clone();
cv::Mat window = cv::Mat::zeros(1, window_size, CV_32F);
for(int i = 0; i < window_size; ++i) {
window.at<float>(i) = i;
}
cv::Mat window_x = cosineWindow1D(window, window_size);
window_x = cv::repeat(window_x, window_size, 1);
cv::Mat window_2d = window_x.mul(window_x.t());
cv::Mat window_applied;
cv::multiply(tile, window_2d, window_applied, 1, CV_32F);
return window_applied;
}
cv::Mat cat2Dtiles(std::vector<std::vector<cv::Mat>> tiles) {
std::vector<cv::Mat> rows;
for (auto row_tiles : tiles) {
cv::Mat row;
cv::hconcat(row_tiles, row);
rows.push_back(row);
}
cv::Mat img;
cv::vconcat(rows, img);
return img;
}
void circshift(cv::Mat &out, const cv::Point &delta)
{
cv::Size sz = out.size();
// error checking
assert(sz.height > 0 && sz.width > 0);
// no need to shift
if ((sz.height == 1 && sz.width == 1) || (delta.x == 0 && delta.y == 0))
return;
// delta transform
int x = delta.x;
int y = delta.y;
if (x > 0) x = x % sz.width;
if (y > 0) y = y % sz.height;
if (x < 0) x = x % sz.width + sz.width;
if (y < 0) y = y % sz.height + sz.height;
// in case of multiple dimensions
std::vector<cv::Mat> planes;
split(out, planes);
for (size_t i = 0; i < planes.size(); i++)
{
// vertical
cv::Mat tmp0, tmp1, tmp2, tmp3;
cv::Mat q0(planes[i], cv::Rect(0, 0, sz.width, sz.height - y));
cv::Mat q1(planes[i], cv::Rect(0, sz.height - y, sz.width, y));
q0.copyTo(tmp0);
q1.copyTo(tmp1);
tmp0.copyTo(planes[i](cv::Rect(0, y, sz.width, sz.height - y)));
tmp1.copyTo(planes[i](cv::Rect(0, 0, sz.width, y)));
// horizontal
cv::Mat q2(planes[i], cv::Rect(0, 0, sz.width - x, sz.height));
cv::Mat q3(planes[i], cv::Rect(sz.width - x, 0, x, sz.height));
q2.copyTo(tmp2);
q3.copyTo(tmp3);
tmp2.copyTo(planes[i](cv::Rect(x, 0, sz.width - x, sz.height)));
tmp3.copyTo(planes[i](cv::Rect(0, 0, x, sz.height)));
}
cv::merge(planes, out);
}
void fftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) floor(sz.width / 2.0);
pt.y = (int) floor(sz.height / 2.0);
circshift(out, pt);
}
void ifftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) ceil(sz.width / 2.0);
pt.y = (int) ceil(sz.height / 2.0);
circshift(out, pt);
}
std::vector<cv::Mat> getReferenceTiles(cv::Mat reference_image);
cv::Mat mergeTiles(std::vector<cv::Mat> tiles, int rows, int cols);
cv::Mat processChannel( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read);
//temporal denoise
std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor);
std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor);
};
} // namespace hdrplus

@ -0,0 +1,69 @@
#pragma once
#include <string>
#include <unordered_map>
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class RawpyArgs{
public:
int demosaic_algorithm = 3;// 3 - AHD interpolation <->int user_qual
bool half_size = false;
bool use_camera_wb = true;
bool use_auto_wb = false;
bool no_auto_bright = true;
int output_color = LIBRAW_COLORSPACE_sRGB;
int gamma[2] = {1,1}; //# gamma correction not applied by rawpy (not quite understand)
int output_bps = 16;
};
class Options{
public:
std::string input = "";
std::string output = "";
std::string mode = "full"; //'full' 'align' 'merge' 'finish'
int reference = 0;
float temporalfactor=75.0;
float spatialfactor = 0.1;
int ltmGain=-1;
double gtmContrast=0.075;
int verbose=2; // (0, 1, 2, 3, 4, 5)
};
class Tuning{
public:
std::string ltmGain = "auto";
double gtmContrast = 0.075;
std::vector<float> sharpenAmount{1,0.5,0.5};
std::vector<float> sharpenSigma{1,2,4};
std::vector<float> sharpenThreshold{0.02,0.04,0.06};
};
class Parameters{
public:
std::unordered_map<std::string,bool> flags;
RawpyArgs rawpyArgs;
Options options;
Tuning tuning;
Parameters()
{
const char* keys[] = {"writeReferenceImage", "writeGammaReference", "writeMergedImage", "writeGammaMerged",
"writeShortExposure", "writeLongExposure", "writeFusedExposure", "writeLTMImage",
"writeLTMGamma", "writeGTMImage", "writeReferenceFinal", "writeFinalImage"};
for (int idx = 0; idx < sizeof(keys) / sizeof(const char*); idx++) {
flags[keys[idx]] = true;
}
}
};
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
} // namespace hdrplus

@ -0,0 +1,326 @@
#pragma once
#include <string>
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
// https://stackoverflow.com/questions/63404539/portable-loop-unrolling-with-template-parameter-in-c-with-gcc-icc
/// Helper macros for stringification
#define TO_STRING_HELPER(X) #X
#define TO_STRING(X) TO_STRING_HELPER(X)
// Define loop unrolling depending on the compiler
#if defined(__ICC) || defined(__ICL)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__GNUC__) && !defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(GCC unroll (16)))
#elif defined(_MSC_BUILD)
#pragma message ("Microsoft Visual C++ (MSVC) detected: Loop unrolling not supported!")
#define UNROLL_LOOP(n)
#else
#warning "Unknown compiler: Loop unrolling not supported!"
#define UNROLL_LOOP(n)
#endif
namespace hdrplus
{
template <typename T, int kernel>
cv::Mat box_filter_kxk( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
if ( kernel <= 0 )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error(std::string( __FILE__ ) + "::" + __func__ + " box filter only support kernel size >= 1");
#endif
}
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
for ( int row_i = 0; row_i < dst_height; ++row_i )
{
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
// Take ceiling for rounding
T box_sum = T( 0 );
UNROLL_LOOP( kernel )
for ( int kernel_row_i = 0; kernel_row_i < kernel; ++kernel_row_i )
{
UNROLL_LOOP( kernel )
for ( int kernel_col_i = 0; kernel_col_i < kernel; ++kernel_col_i )
{
box_sum += src_image_ptr[ ( row_i * kernel + kernel_row_i ) * src_step + ( col_i * kernel + kernel_col_i ) ];
}
}
// Average by taking ceiling
T box_avg = box_sum / T( kernel * kernel );
dst_image_ptr[ row_i * dst_step + col_i ] = box_avg;
}
}
return dst_image;
}
template <typename T, int kernel>
cv::Mat downsample_nearest_neighbour( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image = cv::Mat( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
// -03 should be enough to optimize below code
for ( int row_i = 0; row_i < dst_height; row_i++ )
{
UNROLL_LOOP( 32 )
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
dst_image_ptr[ row_i * dst_step + col_i ] = \
src_image_ptr[ (row_i * kernel) * src_step + (col_i * kernel) ];
}
}
return dst_image;
}
template< typename T >
void print_cvmat( cv::Mat image )
{
const T* img_ptr = (const T*)image.data;
int height = image.size().height;
int width = image.size().width;
int step = image.step1();
int chns = image.channels();
printf("print_cvmat()::Image of size height = %d, width = %d, step = %d\n", \
height, width, step );
if ( chns == 1 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("%3.d ", img_ptr[ row_i_offset + col_i ]);
//printf("%3.d ", int( image.at<T>( row_i, col_i ) ) );
}
printf("\n");
}
}
else if ( chns == 3 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("[%3.d, %3.d, %3.d] ", img_ptr[ row_i_offset + col_i * 3 + 0 ], \
img_ptr[ row_i_offset + col_i * 3 + 1 ], \
img_ptr[ row_i_offset + col_i * 3 + 2 ] );
}
printf("\n");
}
}
else
{
#ifdef __ANDROID__
#else
throw std::runtime_error("cv::Mat number of channel currently not support to print\n");
#endif
}
}
/**
* @brief Extract RGB channel seprately from bayer image
*
* @tparam T data tyoe of bayer image.
* @return vector of RGB image. OpenCV internally maintain reference count.
* Thus this step won't create deep copy overhead.
*
* @example extract_rgb_from_bayer<uint16_t>( bayer_img, rgb_vector_container );
*/
template <typename T>
void extract_rgb_from_bayer( const cv::Mat& bayer_img, \
cv::Mat& img_ch1, cv::Mat& img_ch2, cv::Mat& img_ch3, cv::Mat& img_ch4 )
{
const T* bayer_img_ptr = (const T*)bayer_img.data;
int bayer_width = bayer_img.size().width;
int bayer_height = bayer_img.size().height;
int bayer_step = bayer_img.step1();
if ( bayer_width % 2 != 0 || bayer_height % 2 != 0 )
{
#ifdef __ANDROID__
#else
throw std::runtime_error("Bayer image data size incorrect, must be multiplier of 2\n");
#endif
}
// RGB image is half the size of bayer image
int rgb_width = bayer_width / 2;
int rgb_height = bayer_height / 2;
img_ch1.create( rgb_height, rgb_width, bayer_img.type() );
img_ch2.create( rgb_height, rgb_width, bayer_img.type() );
img_ch3.create( rgb_height, rgb_width, bayer_img.type() );
img_ch4.create( rgb_height, rgb_width, bayer_img.type() );
int rgb_step = img_ch1.step1();
T* img_ch1_ptr = (T*)img_ch1.data;
T* img_ch2_ptr = (T*)img_ch2.data;
T* img_ch3_ptr = (T*)img_ch3.data;
T* img_ch4_ptr = (T*)img_ch4.data;
#pragma omp parallel for
for ( int rgb_row_i = 0; rgb_row_i < rgb_height; rgb_row_i++ )
{
int rgb_row_i_offset = rgb_row_i * rgb_step;
// Every RGB row corresbonding to two Bayer image row
int bayer_row_i_offset0 = ( rgb_row_i * 2 + 0 ) * bayer_step; // For RG
int bayer_row_i_offset1 = ( rgb_row_i * 2 + 1 ) * bayer_step; // For GB
for ( int rgb_col_j = 0; rgb_col_j < rgb_width; rgb_col_j++ )
{
// img_ch1/2/3/4 : (0,0), (1,0), (0,1), (1,1)
int bayer_col_i_offset0 = rgb_col_j * 2 + 0;
int bayer_col_i_offset1 = rgb_col_j * 2 + 1;
img_ch1_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset0 ];
img_ch3_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset1 ];
img_ch2_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset0 ];
img_ch4_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset1 ];
}
}
}
/**
* @brief Convert RGB image to gray image through same weight linear combination.
* Also support implicit data type conversion.
*
* @tparam RGB_DTYPE rgb image type (e.g. uint16_t)
* @tparam GRAY_DTYPE gray image type (e.g. uint16_t)
* @tparam GRAY_CVTYPE opencv gray image type
*/
template< typename RGB_DTYPE, typename GRAY_DTYPE, int GRAY_CVTYPE >
cv::Mat rgb_2_gray( const cv::Mat& rgb_img )
{
const RGB_DTYPE* rgb_img_ptr = (const RGB_DTYPE*)rgb_img.data;
int img_width = rgb_img.size().width;
int img_height = rgb_img.size().height;
int rgb_img_step = rgb_img.step1();
// Create output gray cv::Mat
cv::Mat gray_img( img_height, img_width, GRAY_CVTYPE );
GRAY_DTYPE* gray_img_ptr = (GRAY_DTYPE*)gray_img.data;
int gray_img_step = gray_img.step1();
#pragma omp parallel for
for ( int row_i = 0; row_i < img_height; row_i++ )
{
int rgb_row_i_offset = row_i * rgb_img_step;
int gray_row_i_offset = row_i * gray_img_step;
UNROLL_LOOP( 32 ) // multiplier of cache line size
for ( int col_j = 0; col_j < img_width; col_j++ )
{
GRAY_DTYPE avg_ij(0);
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 0) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 1) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 2) ];
avg_ij /= 3;
gray_img_ptr[ gray_row_i_offset + col_j ] = avg_ij;
}
}
// OpenCV use reference count. Thus return won't create deep copy
return gray_img;
}
template <typename T>
void print_tile( const cv::Mat& img, int tile_size, int start_idx_row, int start_idx_col )
{
const T* img_ptr = (T*)img.data;
int src_step = img.step1();
for ( int row = start_idx_row; row < tile_size + start_idx_row; ++row )
{
const T* img_ptr_row = img_ptr + row * src_step;
for ( int col = start_idx_col; col < tile_size + start_idx_col; ++col )
{
printf("%u ", img_ptr_row[ col ] );
}
printf("\n");
}
printf("\n");
}
template< typename T>
void print_img( const cv::Mat& img, int img_height = -1, int img_width = -1 )
{
const T* img_ptr = (T*)img.data;
if ( img_height == -1 && img_width == -1 )
{
img_height = img.size().height;
img_width = img.size().width;
}
else
{
img_height = std::min( img.size().height, img_height );
img_width = std::min( img.size().width, img_width );
}
printf("Image size (h=%d, w=%d), Print range (h=0-%d, w=0-%d)]\n", \
img.size().height, img.size().width, img_height, img_width );
int img_step = img.step1();
for ( int row = 0; row < img_height; ++row )
{
const T* img_ptr_row = img_ptr + row * img_step;
for ( int col = 0; col < img_width; ++col )
{
printf("%u ", img_ptr_row[ col ]);
}
printf("\n");
}
printf("\n");
}
} // namespace hdrplus

@ -0,0 +1,990 @@
#include <vector>
#include <string>
#include <limits>
#include <cstdio>
#include <utility> // std::make_pair
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
#include "hdrplus/align.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
// Function declration
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors );
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr = false );
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
static void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type );
// Function Implementations
// static function only visible within file
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors )
{
// #ifndef NDEBUG
// printf("%s::%s build_per_grayimg_pyramid start with scale factor : ", __FILE__, __func__ );
// for ( int i = 0; i < inv_scale_factors.size(); ++i )
// {
// printf("%d ", inv_scale_factors.at( i ));
// }
// printf("\n");
// #endif
images_pyramid.resize( inv_scale_factors.size() );
for ( size_t i = 0; i < inv_scale_factors.size(); ++i )
{
cv::Mat blur_image;
cv::Mat downsample_image;
switch ( inv_scale_factors[ i ] )
{
case 1:
images_pyramid[ i ] = src_image.clone();
// cv::Mat use reference count, will not create deep copy
downsample_image = src_image;
break;
case 2:
// printf("(2) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
// // Gaussian blur
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
// // Downsample
downsample_image = downsample_nearest_neighbour<uint16_t, 2>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 2>( images_pyramid.at( i-1 ) );
// Add
images_pyramid.at( i ) = downsample_image.clone();
break;
case 4:
// printf("(4) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
downsample_image = downsample_nearest_neighbour<uint16_t, 4>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 4>( images_pyramid.at( i-1 ) );
images_pyramid.at( i ) = downsample_image.clone();
break;
default:
#ifdef __ANDROID__
#else
throw std::runtime_error("inv scale factor " + std::to_string( inv_scale_factors[ i ]) + "invalid" );
#endif
}
}
}
static bool operator!=( const std::pair<int, int>& lhs, const std::pair<int, int>& rhs )
{
return lhs.first != rhs.first || lhs.second != rhs.second;
}
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr )
{
int src_num_tiles_h = src_alignment.size();
int src_num_tiles_w = src_alignment[ 0 ].size();
constexpr int repeat_factor = pyramid_scale_factor_prev_curr / tilesize_scale_factor_prev_curr;
// printf("build_upsampled_prev_aligement with scale factor %d, repeat factor %d, tile size factor %d\n", \
// pyramid_scale_factor_prev_curr, repeat_factor, tilesize_scale_factor_prev_curr );
int dst_num_tiles_main_h = src_num_tiles_h * repeat_factor;
int dst_num_tiles_main_w = src_num_tiles_w * repeat_factor;
if ( dst_num_tiles_main_h > num_tiles_h || dst_num_tiles_main_w > num_tiles_w )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("current level number of tiles smaller than upsampled tiles\n");
#endif
}
// Allocate data for dst_alignment
// NOTE: number of tiles h, number of tiles w might be different from dst_num_tiles_main_h, dst_num_tiles_main_w
// For tiles between num_tile_h and dst_num_tiles_main_h, use (0,0)
dst_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
// Upsample alignment
#pragma omp parallel for collapse(2)
for ( int row_i = 0; row_i < src_num_tiles_h; row_i++ )
{
for ( int col_i = 0; col_i < src_num_tiles_w; col_i++ )
{
// Scale alignment
std::pair<int, int> align_i = src_alignment[ row_i ][ col_i ];
align_i.first *= pyramid_scale_factor_prev_curr;
align_i.second *= pyramid_scale_factor_prev_curr;
// repeat
UNROLL_LOOP( repeat_factor )
for ( int repeat_row_i = 0; repeat_row_i < repeat_factor; ++repeat_row_i )
{
int repeat_row_i_offset = row_i * repeat_factor + repeat_row_i;
UNROLL_LOOP( repeat_factor )
for ( int repeat_col_i = 0; repeat_col_i < repeat_factor; ++repeat_col_i )
{
int repeat_col_i_offset = col_i * repeat_factor + repeat_col_i;
dst_alignment[ repeat_row_i_offset ][ repeat_col_i_offset ] = align_i;
}
}
}
}
if ( consider_nbr )
{
// Copy consurtctor
std::vector<std::vector<std::pair<int, int>>> upsampled_alignment{ dst_alignment };
// Distance function
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = \
&l1_distance<uint16_t, unsigned long long, tile_size>;
#pragma omp parallel for collapse(2)
for ( int tile_row_i = 0; tile_row_i < num_tiles_h; tile_row_i++ )
{
for ( int tile_col_i = 0; tile_col_i < num_tiles_w; tile_col_i++ )
{
const auto& curr_align_i = upsampled_alignment[ tile_row_i ][ tile_col_i ];
// Container for nbr alignment pair
std::vector<std::pair<int, int>> nbrs_align_i;
// Consider 4 neighbour's alignment
// Only compute distance if alignment is different
if ( tile_col_i > 0 )
{
const auto& nbr1_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i - 1 ];
if ( curr_align_i != nbr1_align_i ) nbrs_align_i.emplace_back( nbr1_align_i );
}
if ( tile_col_i < num_tiles_w - 1 )
{
const auto& nbr2_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i + 1 ];
if ( curr_align_i != nbr2_align_i ) nbrs_align_i.emplace_back( nbr2_align_i );
}
if ( tile_row_i > 0 )
{
const auto& nbr3_align_i = upsampled_alignment[ tile_row_i - 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr3_align_i ) nbrs_align_i.emplace_back( nbr3_align_i );
}
if ( tile_row_i < num_tiles_h - 1 )
{
const auto& nbr4_align_i = upsampled_alignment[ tile_row_i + 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr4_align_i ) nbrs_align_i.emplace_back( nbr4_align_i );
}
// If there is a nbr alignment that need to be considered. Compute distance
if ( ! nbrs_align_i.empty() )
{
int ref_tile_row_start_idx_i = tile_row_i * tile_size / 2;
int ref_tile_col_start_idx_i = tile_col_i * tile_size / 2;
// curr_align_i's distance
auto curr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + curr_align_i.first, \
ref_tile_col_start_idx_i + curr_align_i.second );
for ( const auto& nbr_align_i : nbrs_align_i )
{
auto nbr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + nbr_align_i.first, \
ref_tile_col_start_idx_i + nbr_align_i.second );
if ( nbr_align_i_distance < curr_align_i_distance )
{
#ifdef NDEBUG
printf("tile [%d, %d] update align, prev align (%d, %d) curr align (%d, %d), prev distance %d curr distance %d\n", \
tile_row_i, tile_col_i, \
curr_align_i.first, curr_align_i.second, \
nbr_align_i.first, nbr_align_i.second, \
int(curr_align_i_distance), int(nbr_align_i_distance) );
#endif
dst_alignment[ tile_row_i ][ tile_col_i ] = nbr_align_i;
curr_align_i_distance = nbr_align_i_distance;
}
}
}
}
}
}
}
// Set tilesize as template argument for better compiler optimization result.
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += l1;
}
}
#undef CUSTOME_ABS
return sum;
}
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
// printf("Search two tile with ref : \n");
// print_tile<data_type>( img1, tile_size, img1_tile_row_start_idx, img1_tile_col_start_idx );
// printf("Search two tile with alt :\n");
// print_tile<data_type>( img2, tile_size, img2_tile_row_start_idx, img2_tile_col_start_idx );
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += ( l1 * l1 );
}
}
#undef CUSTOME_ABS
return sum;
}
template<typename T, int tile_size>
static cv::Mat extract_img_tile( const cv::Mat& img, int img_tile_row_start_idx, int img_tile_col_start_idx )
{
const T* img_ptr = (const T*)img.data;
int img_width = img.size().width;
int img_height = img.size().height;
int img_step = img.step1();
if ( img_tile_row_start_idx < 0 || img_tile_row_start_idx > img_height - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_row_start_idx " + std::to_string( img_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img_height - tile_size ) + ")\n" );
#endif
}
if ( img_tile_col_start_idx < 0 || img_tile_col_start_idx > img_width - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_col_start_idx " + std::to_string( img_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img_width - tile_size ) + ")\n" );
#endif
}
cv::Mat img_tile( tile_size, tile_size, img.type() );
T* img_tile_ptr = (T*)img_tile.data;
int img_tile_step = img_tile.step1();
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const T* img_ptr_row_i = img_ptr + img_step * ( img_tile_row_start_idx + row_i );
T* img_tile_ptr_row_i = img_tile_ptr + img_tile_step * row_i;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
img_tile_ptr_row_i[ col_i ] = img_ptr_row_i[ img_tile_col_start_idx + col_i ];
}
}
return img_tile;
}
void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type )
{
// Every align image level share the same distance function.
// Use function ptr to reduce if else overhead inside for loop
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = nullptr;
if ( distance_type == 1 ) // l1 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 16>;
}
}
else if ( distance_type == 2 ) // l2 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 16>;
}
}
// Every level share the same upsample function
void (*upsample_alignment_func_ptr)(const std::vector<std::vector<std::pair<int, int>>>&, \
std::vector<std::vector<std::pair<int, int>>>&, \
int, int, const cv::Mat&, const cv::Mat&, bool) = nullptr;
if ( scale_factor_prev_curr == 2 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( scale_factor_prev_curr == 4 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
// Function to extract reference image tile for memory cache
cv::Mat (*extract_ref_img_tile)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 8>;
}
else if ( curr_tile_size == 16 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 16>;
}
// Function to extract search image tile for memory cache
cv::Mat (*extract_alt_img_search)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+4*2>;
}
}
else if ( curr_tile_size == 16 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+4*2>;
}
}
int num_tiles_h = ref_img.size().height / (curr_tile_size / 2) - 1;
int num_tiles_w = ref_img.size().width / (curr_tile_size / 2 ) - 1;
/* Upsample pervious layer alignment */
std::vector<std::vector<std::pair<int, int>>> upsampled_prev_aligement;
// Coarsest level
// prev_alignment is invalid / empty, construct alignment as (0,0)
if ( prev_tile_size == -1 )
{
upsampled_prev_aligement.resize( num_tiles_h, \
std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
}
// Upsample previous level alignment
else
{
upsample_alignment_func_ptr( prev_aligement, upsampled_prev_aligement, \
num_tiles_h, num_tiles_w, ref_img, alt_img, false );
// printf("\n!!!!!Upsampled previous alignment\n");
// for ( int tile_row = 0; tile_row < int(upsampled_prev_aligement.size()); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < int(upsampled_prev_aligement.at(0).size()); tile_col++ )
// {
// const auto tile_start = upsampled_prev_aligement.at( tile_row ).at( tile_col );
// printf("up tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
#ifndef NDEBUG
printf("%s::%s start: \n", __FILE__, __func__ );
printf(" scale_factor_prev_curr %d, tile_size %d, prev_tile_size %d, search_radiou %d, distance L%d, \n", \
scale_factor_prev_curr, curr_tile_size, prev_tile_size, search_radiou, distance_type );
printf(" ref img size h=%d w=%d, alt img size h=%d w=%d, \n", \
ref_img.size().height, ref_img.size().width, alt_img.size().height, alt_img.size().width );
printf(" num tile h (upsampled) %d, num tile w (upsampled) %d\n", num_tiles_h, num_tiles_w);
#endif
// allocate memory for current alignmenr
curr_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
/* Pad alternative image */
cv::Mat alt_img_pad;
cv::copyMakeBorder( alt_img, \
alt_img_pad, \
search_radiou, search_radiou, search_radiou, search_radiou, \
cv::BORDER_CONSTANT, cv::Scalar( UINT_LEAST16_MAX ) );
// printf("Reference image h=%d, w=%d: \n", ref_img.size().height, ref_img.size().width );
// print_img<uint16_t>( ref_img );
// printf("Alter image pad h=%d, w=%d: \n", alt_img_pad.size().height, alt_img_pad.size().width );
// print_img<uint16_t>( alt_img_pad );
// printf("!! enlarged tile size %d\n", curr_tile_size + 2 * search_radiou );
int alt_tile_row_idx_max = alt_img_pad.size().height - ( curr_tile_size + 2 * search_radiou );
int alt_tile_col_idx_max = alt_img_pad.size().width - ( curr_tile_size + 2 * search_radiou );
// Dlete below distance vector, this is for debug only
std::vector<std::vector<uint16_t>> distances( num_tiles_h, std::vector<uint16_t>( num_tiles_w, 0 ));
/* Iterate through all reference tile & compute distance */
#pragma omp parallel for collapse(2)
for ( int ref_tile_row_i = 0; ref_tile_row_i < num_tiles_h; ref_tile_row_i++ )
{
for ( int ref_tile_col_i = 0; ref_tile_col_i < num_tiles_w; ref_tile_col_i++ )
{
// Upper left index of reference tile
int ref_tile_row_start_idx_i = ref_tile_row_i * curr_tile_size / 2;
int ref_tile_col_start_idx_i = ref_tile_col_i * curr_tile_size / 2;
// printf("\nRef img tile [%d, %d] -> start idx [%d, %d] (row, col)\n", \
// ref_tile_row_i, ref_tile_col_i, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// printf("\nRef img tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( ref_img, curr_tile_size, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// Upsampled alignment at this tile
// Alignment are relative displacement in pixel value
int prev_alignment_row_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).first;
int prev_alignment_col_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).second;
// Alternative image tile start idx
int alt_tile_row_start_idx_i = ref_tile_row_start_idx_i + prev_alignment_row_i;
int alt_tile_col_start_idx_i = ref_tile_col_start_idx_i + prev_alignment_col_i;
// Ensure alternative image tile within range
if ( alt_tile_row_start_idx_i < 0 )
alt_tile_row_start_idx_i = 0;
if ( alt_tile_col_start_idx_i < 0 )
alt_tile_col_start_idx_i = 0;
if ( alt_tile_row_start_idx_i > alt_tile_row_idx_max )
{
// int before = alt_tile_row_start_idx_i;
alt_tile_row_start_idx_i = alt_tile_row_idx_max;
// printf("@@ change start x from %d to %d\n", before, alt_tile_row_idx_max);
}
if ( alt_tile_col_start_idx_i > alt_tile_col_idx_max )
{
// int before = alt_tile_col_start_idx_i;
alt_tile_col_start_idx_i = alt_tile_col_idx_max;
// printf("@@ change start y from %d to %d\n", before, alt_tile_col_idx_max );
}
// Explicitly caching reference image tile
cv::Mat ref_img_tile_i = extract_ref_img_tile( ref_img, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
cv::Mat alt_img_search_i = extract_alt_img_search( alt_img_pad, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Because alternative image is padded with search radious.
// Using same coordinate with reference image will automatically considered search radious * 2
// printf("Alt image tile [%d, %d]-> start idx [%d, %d]\n", \
// ref_tile_row_i, ref_tile_col_i, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// printf("\nAlt image tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( alt_img_pad, curr_tile_size + 2 * search_radiou, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Search based on L1/L2 distance
unsigned long long min_distance_i = ULONG_LONG_MAX;
int min_distance_row_i = -1;
int min_distance_col_i = -1;
for ( int search_row_j = 0; search_row_j < ( search_radiou * 2 + 1 ); search_row_j++ )
{
for ( int search_col_j = 0; search_col_j < ( search_radiou * 2 + 1 ); search_col_j++ )
{
// printf("\n--->tile at [%d, %d] search (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou );
// unsigned long long distance_j = distance_func_ptr( ref_img, alt_img_pad, \
// ref_tile_row_start_idx_i, ref_tile_col_start_idx_i, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
// unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_pad, \
// 0, 0, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_search_i, \
0, 0, \
search_row_j, search_col_j );
// printf("<---tile at [%d, %d] search (%d, %d), new dis %llu, old dis %llu\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou, distance_j, min_distance_i );
// If this is smaller distance
if ( distance_j < min_distance_i )
{
min_distance_i = distance_j;
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
// If same value, choose the one closer to the original tile location
if ( distance_j == min_distance_i && min_distance_row_i != -1 && min_distance_col_i != -1 )
{
int prev_distance_row_2_ref = min_distance_row_i - search_radiou;
int prev_distance_col_2_ref = min_distance_col_i - search_radiou;
int curr_distance_row_2_ref = search_row_j - search_radiou;
int curr_distance_col_2_ref = search_col_j - search_radiou;
int prev_distance_2_ref_sqr = prev_distance_row_2_ref * prev_distance_row_2_ref + prev_distance_col_2_ref * prev_distance_col_2_ref;
int curr_distance_2_ref_sqr = curr_distance_row_2_ref * curr_distance_row_2_ref + curr_distance_col_2_ref * curr_distance_col_2_ref;
// previous min distance idx is farther away from ref tile start location
if ( prev_distance_2_ref_sqr > curr_distance_2_ref_sqr )
{
// printf("@@@ Same distance %d, choose closer one (%d, %d) instead of (%d, %d)\n", \
// distance_j, search_row_j, search_col_j, min_distance_row_i, min_distance_col_i);
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
}
}
}
// printf("tile at (%d, %d) alignment (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, min_distance_row_i, min_distance_col_i );
int alignment_row_i = prev_alignment_row_i + min_distance_row_i - search_radiou;
int alignment_col_i = prev_alignment_col_i + min_distance_col_i - search_radiou;
std::pair<int, int> alignment_i( alignment_row_i, alignment_col_i );
// Add min_distance_i's corresbonding idx as min
curr_alignment.at( ref_tile_row_i ).at( ref_tile_col_i ) = alignment_i;
distances.at( ref_tile_row_i ).at( ref_tile_col_i ) = min_distance_i;
}
}
// printf("\n!!!!!Min distance for each tile \n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; ++tile_col )
// {
// printf("tile (%d, %d) distance %u\n", \
// tile_row, tile_col, distances.at( tile_row).at(tile_col ) );
// }
// }
// printf("\n!!!!!Alignment at current level\n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; tile_col++ )
// {
// const auto tile_start = curr_alignment.at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
void align::process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& images_alignment )
{
#ifndef NDEBUG
printf("%s::%s align::process start\n", __FILE__, __func__ ); fflush(stdout);
#endif
images_alignment.clear();
images_alignment.resize( burst_images.num_images );
// image pyramid per image, per pyramid level
std::vector<std::vector<cv::Mat>> per_grayimg_pyramid;
// printf("!!!!! ref bayer padded\n");
// print_img<uint16_t>( burst_images.bayer_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
// printf("!!!!! ref gray padded\n");
// print_img<uint16_t>( burst_images.grayscale_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
per_grayimg_pyramid.resize( burst_images.num_images );
#pragma omp parallel for
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// per_grayimg_pyramid[ img_idx ][ 0 ] is the original image
// per_grayimg_pyramid[ img_idx ][ 3 ] is the coarsest image
build_per_grayimg_pyramid( per_grayimg_pyramid.at( img_idx ), \
burst_images.grayscale_images_pad.at( img_idx ), \
this->inv_scale_factors );
}
// #ifndef NDEBUG
// printf("%s::%s build image pyramid of size : ", __FILE__, __func__ );
// for ( int level_i = 0; level_i < num_levels; ++level_i )
// {
// printf("(%d, %d) ", per_grayimg_pyramid[ 0 ][ level_i ].size().height,
// per_grayimg_pyramid[ 0 ][ level_i ].size().width );
// }
// printf("\n"); fflush(stdout);
// #endif
// print image pyramid
// for ( int level_i; level_i < num_levels; ++level_i )
// {
// printf("\n\n!!!!! ref gray pyramid level %d img : \n" , level_i );
// print_img<uint16_t>( per_grayimg_pyramid[ burst_images.reference_image_idx ][ level_i ] );
// }
// exit(-1);
// Align every image
const std::vector<cv::Mat>& ref_grayimg_pyramid = per_grayimg_pyramid[ burst_images.reference_image_idx ];
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// Do not align with reference image
if ( img_idx == burst_images.reference_image_idx )
continue;
const std::vector<cv::Mat>& alt_grayimg_pyramid = per_grayimg_pyramid[ img_idx ];
// Align every level from coarse to grain
// level 0 : finest level, the original image
// level 3 : coarsest level
std::vector<std::vector<std::pair<int, int>>> curr_alignment;
std::vector<std::vector<std::pair<int, int>>> prev_alignment;
for ( int level_i = num_levels - 1; level_i >= 0; level_i-- ) // 3,2,1,0
{
// make curr alignment as previous alignment
prev_alignment.swap( curr_alignment );
curr_alignment.clear();
// printf("\n\n########################align level %d\n", level_i );
align_image_level(
ref_grayimg_pyramid[ level_i ], // reference image at current level
alt_grayimg_pyramid[ level_i ], // alternative image at current level
prev_alignment, // previous layer alignment
curr_alignment, // current layer alignment
( level_i == ( num_levels - 1 ) ? -1 : inv_scale_factors[ level_i + 1 ] ), // scale factor between previous layer and current layer. -1 if current layer is the coarsest layer, [-1, 4, 4, 2]
grayimg_tile_sizes[ level_i ], // current level tile size
( level_i == ( num_levels - 1 ) ? -1 : grayimg_tile_sizes[ level_i + 1 ] ), // previous level tile size
grayimg_search_radious[ level_i ], // search radious
distances[ level_i ] ); // L1/L2 distance
// printf("@@@Alignment at level %d is h=%d, w=%d", level_i, curr_alignment.size(), curr_alignment.at(0).size() );
} // for pyramid level
// Alignment at grayscale image
images_alignment.at( img_idx ).swap( curr_alignment );
// printf("\n!!!!!Alternative Image Alignment\n");
// for ( int tile_row = 0; tile_row < images_alignment.at( img_idx ).size(); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < images_alignment.at( img_idx ).at(0).size(); tile_col++ )
// {
// const auto tile_start = images_alignment.at( img_idx ).at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
} // for alternative image
}
} // namespace hdrplus

@ -0,0 +1,102 @@
#include <string>
#include <cstdio>
#include <iostream>
#include <utility> // std::pair, std::makr_pair
#include <memory> // std::shared_ptr
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
#include <exiv2/exiv2.hpp> // exiv2
#include "hdrplus/bayer_image.h"
#include "hdrplus/utility.h" // box_filter_kxk
namespace hdrplus
{
bayer_image::bayer_image( const std::string& bayer_image_path )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
if ( ( return_code = libraw_processor->open_file( bayer_image_path.c_str() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(bayer_image_path);
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error(bayer_image_path);
error += ": No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image %s with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, bayer_image_path.c_str(), width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
std::pair<double, double> bayer_image::get_noise_params() const
{
// Set ISO to 100 if not positive
double iso_ = iso <= 0 ? 100 : iso;
// Calculate shot noise and read noise parameters w.r.t ISO 100
double lambda_shot_p = iso_ / 100.0f * baseline_lambda_shot;
double lambda_read_p = (iso_ / 100.0f) * (iso_ / 100.0f) * baseline_lambda_read;
double black_level = (black_level_per_channel[0] + \
black_level_per_channel[1] + \
black_level_per_channel[2] + \
black_level_per_channel[3]) / 4.0;
// Rescale shot and read noise to normal range
double lambda_shot = lambda_shot_p * (white_level - black_level);
double lambda_read = lambda_read_p * (white_level - black_level) * (white_level - black_level);
// return pair
return std::make_pair(lambda_shot, lambda_read);
}
}

@ -0,0 +1,181 @@
#include <cstdio>
#include <string>
#include <omp.h>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
burst::burst( const std::string& burst_path, const std::string& reference_image_path )
{
std::vector<cv::String> bayer_image_paths;
// Search through the input path directory to get all input image path
if ( burst_path.at( burst_path.size() - 1) == '/')
cv::glob( burst_path + "*.dng", bayer_image_paths, false );
else
cv::glob( burst_path + "/*.dng", bayer_image_paths, false );
#ifndef NDEBUG
for ( const auto& bayer_img_path_i : bayer_image_paths )
{
printf("img i path %s\n", bayer_img_path_i.c_str()); fflush(stdout);
}
printf("ref img path %s\n", reference_image_path.c_str()); fflush(stdout);
#endif
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
for ( size_t i = 0; i < bayer_image_paths.size(); ++i )
{
if ( bayer_image_paths[ i ] == reference_image_path )
{
reference_image_idx = i;
}
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error unable to locate reference image " + reference_image_path );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::string>& bayer_image_paths, int reference_image_index )
{
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_paths.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
} // namespace hdrplus

@ -0,0 +1,784 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/finish.h"
#include "hdrplus/utility.h"
#include <cmath>
#ifdef __ANDROID__
#define DBG_OUTPUT_ROOT "/sdcard/com.xypower.mpapp/tmp/"
#else
#define DBG_OUTPUT_ROOT ""
#endif
// #include <type_traits>
namespace hdrplus
{
cv::Mat convert16bit2_8bit_(cv::Mat ans){
if(ans.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(255.0/USHRT_MAX);
(*it)[1] *=(255.0/USHRT_MAX);
(*it)[2] *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC3);
}else if(ans.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC1);
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_16bit_(cv::Mat ans){
if(ans.type()==CV_8UC3){
ans.convertTo(ans, CV_16UC3);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(65535.0/255.0);
(*it)[1] *=(65535.0/255.0);
(*it)[2] *=(65535.0/255.0);
}
}else if(ans.type()==CV_8UC1){
ans.convertTo(ans, CV_16UC1);
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(65535.0/255.0);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_12bit_(cv::Mat ans){
// cv::Mat ans(I);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(2048.0/255.0);
(*it)[1] *=(2048.0/255.0);
(*it)[2] *=(2048.0/255.0);
}
ans.convertTo(ans, CV_16UC3);
return ans;
}
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=USHRT_MAX;
// check the val against the threshold
if(x<=threshold){
x =gainMin*x;
}else{
x = gainMax* pow(x,exponent)-gainMax+1;
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=USHRT_MAX;
return (uint16_t)x;
}
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=65535.0;
// check the val against the threshold
if(x<=threshold){
x = x/gainMin;
}else{
x = pow((x+gainMax-1)/gainMax,exponent);
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=65535;
return (uint16_t)x;
}
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaCompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaCompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaCompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaCompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaDecompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaDecompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaDecompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaDecompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat gammasRGB(cv::Mat img, bool mode){
if(mode){// compress
return uGammaCompress_(img,0.0031308, 12.92, 1.055, 1. / 2.4);
}else{ // decompress
return uGammaDecompress_(img, 0.04045, 12.92, 1.055, 2.4);
}
}
void copy_mat_16U_2(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
cv::Mat mean_(cv::Mat img){
// initialize processedImg
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
// traverse img
int idx = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
uint32_t tmp = (*it)[0]+(*it)[1]+(*it)[2];
uint16_t avg_val = tmp/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
double getMean(cv::Mat img){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
double sum=0;
for(int i=0;i<max_idx;i++){
sum += *(ptr+i);
}
sum/=max_idx;
sum/=USHRT_MAX;
return sum;
}
cv::Mat matMultiply_scalar(cv::Mat img,float gain){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
double tmp = *(ptr+i)*gain;
if(tmp<0){
*(ptr+i)=0;
}else if(tmp>USHRT_MAX){
*(ptr+i) = USHRT_MAX;
}else{
*(ptr+i)=(u_int16_t)tmp;
}
}
return img;
}
double getSaturated(cv::Mat img, double threshold){
threshold *= USHRT_MAX;
double count=0;
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
if(*(ptr+i)>threshold){
count++;
}
}
return count/(double)max_idx;
}
cv::Mat meanGain_(cv::Mat img,int gain){
if(img.channels()!=3){
std::cout<<"unsupport img type in meanGain_()"<<std::endl;
return cv::Mat();
}else{ // RGB img
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
int idx=0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
double sum = 0;
// R
double tmp = (*it)[0]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// G
tmp = (*it)[1]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// B
tmp = (*it)[2]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// put into processedImg
uint16_t avg_val = sum/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
}
cv::Mat applyScaling_(cv::Mat mergedImage, cv::Mat shortGray, cv::Mat fusedGray){
cv::Mat result = mergedImage.clone();
u_int16_t* ptr_shortg = (u_int16_t*)shortGray.data;
u_int16_t* ptr_fusedg = (u_int16_t*)fusedGray.data;
int count = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = result.begin<cv::Vec3w>(), end = result.end<cv::Vec3w>(); it != end; ++it)
{
double s = 1;
if(*(ptr_shortg+count)!=0){
s = *(ptr_fusedg+count);
s/=*(ptr_shortg+count);
}
for(int c=0;c<mergedImage.channels();c++){
double tmp = (*it)[c]*s;
if(tmp<0){
(*it)[c] = 0;
}else if(tmp>USHRT_MAX){
(*it)[c] = USHRT_MAX;
}else{
(*it)[c] = tmp;
}
}
}
return result;
}
void localToneMap(cv::Mat& mergedImage, Options options, cv::Mat& shortg,
cv::Mat& longg, cv::Mat& fusedg, int& gain){
std::cout<<"HDR Tone Mapping..."<<std::endl;
// # Work with grayscale images
cv::Mat shortGray = rgb_2_gray<uint16_t, uint16_t, CV_16U>(mergedImage); //mean_(mergedImage);
std::cout<<"--- Compute grayscale image"<<std::endl;
// compute gain
gain = 0;
if(options.ltmGain==-1){
double dsFactor = 25;
int down_height = round(shortGray.rows/dsFactor);
int down_width = round(shortGray.cols/dsFactor);
cv::Mat shortS;
cv::resize(shortGray,shortS,cv::Size(down_height,down_width),cv::INTER_LINEAR);
shortS = shortS.reshape(1,1);
bool bestGain = false;
double compression = 1.0;
double saturated = 0.0;
cv::Mat shortSg = gammasRGB(shortS.clone(), true);
double sSMean = getMean(shortSg);
while((compression < 1.9 && saturated < .95)||((!bestGain) && (compression < 6) && (gain < 30) && (saturated < 0.33))){
gain += 2;
cv::Mat longSg = gammasRGB(shortS.clone()*gain, true);
double lSMean = getMean(longSg);
compression = lSMean / sSMean;
bestGain = lSMean > (1 - sSMean) / 2; // only works if burst underexposed
saturated = getSaturated(longSg,0.95);
if(options.verbose==4){
}
}
}else{
if(options.ltmGain>0){
gain = options.ltmGain;
}
}
std::cout<<"--- Compute gain"<<std::endl;
// create a synthetic long exposure
cv::Mat longGray = meanGain_(mergedImage.clone(),gain);
std::cout<<"--- Synthetic long expo"<<std::endl;
// apply gamma correction to both
longg = gammasRGB(longGray.clone(), true);
shortg = gammasRGB(shortGray.clone(),true);
std::cout<<"--- Apply Gamma correction"<<std::endl;
// perform tone mapping by exposure fusion in grayscale
cv::Ptr<cv::MergeMertens> mergeMertens = cv::createMergeMertens();
std::cout<<"--- Create Mertens"<<std::endl;
// hack: cv2 mergeMertens expects inputs between 0 and 255
// but the result is scaled between 0 and 1 (some values can actually be greater than 1!)
std::vector<cv::Mat> src_expos;
src_expos.push_back(convert16bit2_8bit_(shortg.clone()));
src_expos.push_back(convert16bit2_8bit_(longg.clone()));
mergeMertens->process(src_expos, fusedg);
fusedg = fusedg*USHRT_MAX;
fusedg.convertTo(fusedg, CV_16UC1);
std::cout<<"--- Apply Mertens"<<std::endl;
// undo gamma correction
cv::Mat fusedGray = gammasRGB(fusedg.clone(), false);
// cv::imwrite("fusedg_degamma.png", fusedGray);
std::cout<<"--- Un-apply Gamma correction"<<std::endl;
// scale each RGB channel of the short exposure accordingly
mergedImage = applyScaling_(mergedImage, shortGray, fusedGray);
std::cout<<"--- Scale channels"<<std::endl;
}
u_int16_t enhanceContrast_1pix(u_int16_t pix_val,double gain){
double x = pix_val;
x/=USHRT_MAX;
x = x - gain*sin(2*M_PI*x);
if(x<0){
x = 0;
}else if(x>1){
x = 1;
}
u_int16_t result = x*USHRT_MAX;
return result;
}
cv::Mat enhanceContrast(cv::Mat image, Options options){
if(options.gtmContrast>=0 && options.gtmContrast<=1){
u_int16_t* ptr = (u_int16_t*)image.data;
int end = image.rows*image.cols*image.channels();
for(int idx = 0;idx<end;idx++){
*(ptr+idx) = enhanceContrast_1pix(*(ptr+idx),options.gtmContrast);
}
}else{
std::cout<<"GTM ignored, expected a contrast enhancement ratio between 0 and 1"<<std::endl;
}
return image;
}
cv::Mat distL1_(cv::Mat X, cv::Mat Y){
int end_x = X.rows*X.cols*X.channels();
int end_y = Y.rows*Y.cols*Y.channels();
cv::Mat result = cv::Mat(X.rows,X.cols,X.type());
if(end_x==end_y){
u_int16_t* ptr_x = (u_int16_t*)X.data;
u_int16_t* ptr_y = (u_int16_t*)Y.data;
u_int16_t* ptr_r = (u_int16_t*)result.data;
for(int i=0;i<end_x;i++){
if(*(ptr_x+i)<*(ptr_y+i)){
*(ptr_r+i) = *(ptr_y+i) - *(ptr_x+i);
}else{
*(ptr_r+i) = *(ptr_x+i) - *(ptr_y+i);
}
}
}else{
std::cout<<"Mat size not match. distL1_ failed!"<<std::endl;
}
return result;
}
cv::Mat sharpenTriple_(cv::Mat image,
cv::Mat blur0, cv::Mat low0, float th0, float k0,
cv::Mat blur1, cv::Mat low1, float th1, float k1,
cv::Mat blur2, cv::Mat low2, float th2, float k2){
// create result mat
cv::Mat result = cv::Mat(image.rows,image.cols,image.type());
// initialize iteraters
u_int16_t* ptr_r = (u_int16_t*)result.data;
u_int16_t* ptr_img = (u_int16_t*)image.data;
u_int16_t* ptr_blur0 = (u_int16_t*)blur0.data;
u_int16_t* ptr_low0 = (u_int16_t*)low0.data;
u_int16_t* ptr_blur1 = (u_int16_t*)blur1.data;
u_int16_t* ptr_low1 = (u_int16_t*)low1.data;
u_int16_t* ptr_blur2 = (u_int16_t*)blur2.data;
u_int16_t* ptr_low2 = (u_int16_t*)low2.data;
int n_channels = image.channels();
int end = image.rows*image.cols*n_channels;
// traverse Image
for(int idx = 0;idx<end;idx++){
double r, r0, r1, r2;
double x = *(ptr_img+idx);
double l0 = *(ptr_low0+idx)/(double)USHRT_MAX;
double l1 = *(ptr_low1+idx)/(double)USHRT_MAX;
double l2 = *(ptr_low2+idx)/(double)USHRT_MAX;
double b0 = *(ptr_blur0+idx);
double b1 = *(ptr_blur1+idx);
double b2 = *(ptr_blur2+idx);
r0 = l0<th0? x:x+k0*(x-b0);
r1 = l1<th1? x:x+k1*(x-b1);
r2 = l2<th2? x:x+k2*(x-b2);
r = (r0+r1+r2)/3.0;
if(r<0) r=0;
if(r>USHRT_MAX) r = USHRT_MAX;
*(ptr_r+idx) = (u_int16_t)r;
}
return result;
}
cv::Mat sharpenTriple(cv::Mat image, Tuning tuning, Options options){
// sharpen the image using unsharp masking
std::vector<float> amounts = tuning.sharpenAmount;
std::vector<float> sigmas = tuning.sharpenSigma;
std::vector<float> thresholds = tuning.sharpenThreshold;
// Compute all Gaussian blur
cv::Mat blur0,blur1,blur2;
cv::GaussianBlur(image,blur0,cv::Size(0,0),sigmas[0]);
cv::GaussianBlur(image,blur1,cv::Size(0,0),sigmas[1]);
cv::GaussianBlur(image,blur2,cv::Size(0,0),sigmas[2]);
std::cout<<" --- gaussian blur"<<std::endl;
// cv::imwrite("blur2.png", blur2);
// Compute all low contrast images
cv::Mat low0 = distL1_(blur0, image);
cv::Mat low1 = distL1_(blur1, image);
cv::Mat low2 = distL1_(blur2, image);
std::cout<<" --- low contrast"<<std::endl;
// cv::imwrite("low2.png", low2);
// Compute the triple sharpen
cv::Mat sharpImage = sharpenTriple_(image,
blur0, low0, thresholds[0], amounts[0],
blur1, low1, thresholds[1], amounts[1],
blur2, low2, thresholds[2], amounts[2]);
std::cout<<" --- sharpen"<<std::endl;
return sharpImage;
}
void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
int H = B.rows;
int W = B.cols;
int end = H*W;
for(int i=0;i<end;i++){
*(ptr_A+i) = *(ptr_B+i);
}
}
// void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// // u_int16_t* ptr_A = (u_int16_t*)A.data;
// u_int16_t* ptr_B = (u_int16_t*)B.data;
// for(int r = 0; r < B.rows; r++) {
// for(int c = 0; c < B.cols; c++) {
// *(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
// }
// }
// }
cv::Mat processMergedMat(cv::Mat mergedImg, int opencv_type){
cv::Mat m;
uint16_t* ptr = (uint16_t*)mergedImg.data;
for(int r = 0; r < mergedImg.rows; r++) {
std::vector<int> dvals;
for(int c = 0; c < mergedImg.cols; c++) {
dvals.push_back(*(ptr+r*mergedImg.cols+c));
}
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
int ch = CV_MAT_CN(opencv_type);
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void show20_20(cv::Mat m){
u_int16_t* ptr = (u_int16_t*)m.data;
for(int i=0;i<20;i++){
for(int j=0;j<20;j++){
std::cout<<*(ptr+i*m.cols+j)<<", ";
}
std::cout<<std::endl;
}
}
void writeCSV(std::string filename, cv::Mat m)
{
std::ofstream myfile;
myfile.open(filename.c_str());
myfile<< cv::format(m, cv::Formatter::FMT_CSV) << std::endl;
myfile.close();
}
void finish::process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage){
// copy mergedBayer to rawReference
std::cout<<"finish pipeline start ..."<<std::endl;
// save merged Image value
// #ifndef HDRPLUS_NO_DETAILED_OUTPUT
writeCSV(DBG_OUTPUT_ROOT "merged.csv",burst_images.merged_bayer_image);
// #endif
this->refIdx = burst_images.reference_image_idx;
// this->burstPath = burstPath;
// std::cout<<"processMerged:"<<std::endl;
// show20_20(mergedB);
this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
// this->mergedBayer = processMergedMat(mergedB,CV_16UC1);//loadFromCSV("merged.csv", CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
// show20_20(this->mergedBayer);
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
#else
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
#endif
// std::cout<<"csv:"<<std::endl;
// show20_20(this->mergedBayer);
// load_rawPathList(burstPath);
// read in ref img
// bayer_image* ref = new bayer_image(rawPathList[refIdx]);
bayer_image* ref = new bayer_image(burst_images.bayer_images[burst_images.reference_image_idx]);
cv::Mat processedRefImage = postprocess(ref->libraw_processor,params.rawpyArgs);
std::cout<<"size ref: "<<processedRefImage.rows<<"*"<<processedRefImage.cols<<std::endl;
// write reference image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceImage"]){
std::cout<<"writing reference img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
// cv::imshow("test",processedImage);
cv::imwrite(DBG_OUTPUT_ROOT "processedRef.jpg", outputImg);
// cv::waitKey(0);
}
#endif
// write gamma reference
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGammaReference"]){
std::cout<<"writing Gamma reference img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedRefImage.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "processedRefGamma.jpg", outputImg);
}
#endif
// get the bayer_image of the merged image
// bayer_image* mergedImg = new bayer_image(rawPathList[refIdx]);
bayer_image* mergedImg = new bayer_image(burst_images.bayer_images[this->refIdx]);
mergedImg->libraw_processor->imgdata.rawdata.raw_image = (uint16_t*)this->mergedBayer.data;
// copy_mat_16U_3(mergedImg->libraw_processor->imgdata.rawdata.raw_image,this->mergedBayer);
cv::Mat processedMerge = postprocess(mergedImg->libraw_processor,params.rawpyArgs);
// write merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Merged img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImg.jpg", outputImg);
}
#endif
// write gamma merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Gamma Merged img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImgGamma.jpg", outputImg);
}
#endif
// step 5. HDR tone mapping
// processedImage, gain, shortExposure, longExposure, fusedExposure = localToneMap(burstPath, processedImage, options)
int gain;
if(params.options.ltmGain){
cv::Mat shortExposure, longExposure, fusedExposure;
localToneMap(processedMerge, params.options,shortExposure,longExposure,fusedExposure,gain);
std::cout<<"gain="<< gain<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeShortExposure"]){
std::cout<<"writing ShortExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(shortExposure);
cv::imwrite(DBG_OUTPUT_ROOT "shortg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLongExposure"]){
std::cout<<"writing LongExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(longExposure);
cv::imwrite(DBG_OUTPUT_ROOT "longg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFusedExposure"]){
std::cout<<"writing FusedExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(fusedExposure);
cv::imwrite(DBG_OUTPUT_ROOT "fusedg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMImage"]){
std::cout<<"writing LTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMGamma"]){
std::cout<<"writing LTMImage Gamma ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain_gamma.jpg", outputImg);
}
#endif
}
// step 6 GTM: contrast enhancement / global tone mapping
if(params.options.gtmContrast){
processedMerge = enhanceContrast(processedMerge, params.options);
std::cout<<"STEP 6 -- Apply GTM"<<std::endl;
}
// apply the final sRGB gamma curve
processedMerge = gammasRGB(processedMerge.clone(),true);
std::cout<<"-- Apply Gamma"<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGTMImage"]) {
std::cout<<"writing GTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "GTM_gamma.jpg", outputImg);
}
#endif
// Step 7: sharpen
finalOutputImage = sharpenTriple(processedMerge.clone(), params.tuning, params.options);
cv::Mat& processedImage = finalOutputImage;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFinalImage"]){
std::cout<<"writing FinalImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalImage.jpg", outputImg);
}
#endif
// write final ref
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceFinal"]){
std::cout<<"writing Final Ref Image ..."<<std::endl;
if(params.options.ltmGain){
params.options.ltmGain = gain;
}
cv::Mat shortExposureRef, longExposureRef, fusedExposureRef;
localToneMap(processedRefImage, params.options,shortExposureRef,longExposureRef,fusedExposureRef,gain);
if(params.options.gtmContrast){ // contrast enhancement / global tone mapping
processedRefImage = enhanceContrast(processedRefImage, params.options);
}
processedRefImage = gammasRGB(processedRefImage.clone(),true);
// sharpen
processedRefImage = sharpenTriple(processedRefImage.clone(), params.tuning, params.options);
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalReference.jpg", outputImg);
}
#endif
// End of finishing
}
void finish::copy_mat_16U(cv::Mat& A, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < A.rows; r++) {
for(int c = 0; c < A.cols; c++) {
*(ptr_A+r*A.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
void finish::copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)libraw_ptr->imgdata.rawdata.raw_image;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
} // namespace hdrplus

@ -0,0 +1,55 @@
#include <cstdio>
#include <string>
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/hdrplus_pipeline.h"
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
#include <fstream>
namespace hdrplus
{
void hdrplus_pipeline::run_pipeline( \
const std::string& burst_path, \
const std::string& reference_image_path )
{
// Create burst of images
burst burst_images( burst_path, reference_image_path );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
// Run align
align_module.process( burst_images, alignments );
// Run merging
merge_module.process( burst_images, alignments );
// Run finishing
cv::Mat finalImg;
finish_module.process( burst_images, finalImg);
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::string>& burst_paths, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_paths, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
// Run align
align_module.process( burst_images, alignments );
// Run merging
merge_module.process( burst_images, alignments );
// Run finishing
finish_module.process( burst_images, finalImg);
return true;
}
} // namespace hdrplus

@ -0,0 +1,333 @@
#include <opencv2/opencv.hpp> // all opencv header
#include <vector>
#include <utility>
#include "hdrplus/merge.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
void merge::process(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments)
{
// 4.1 Noise Parameters and RMS
// Noise parameters calculated from baseline ISO noise parameters
double lambda_shot, lambda_read;
std::tie(lambda_shot, lambda_read) = burst_images.bayer_images[burst_images.reference_image_idx].get_noise_params();
// 4.2-4.4 Denoising and Merging
// Get padded bayer image
cv::Mat reference_image = burst_images.bayer_images_pad[burst_images.reference_image_idx];
cv::imwrite("ref.jpg", reference_image);
// Get raw channels
std::vector<cv::Mat> channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(reference_image, channels[0], channels[1], channels[2], channels[3]);
std::vector<cv::Mat> processed_channels(4);
// For each channel, perform denoising and merge
for (int i = 0; i < 4; ++i) {
// Get channel mat
cv::Mat channel_i = channels[i];
// cv::imwrite("ref" + std::to_string(i) + ".jpg", channel_i);
//we should be getting the individual channel in the same place where we call the processChannel function with the reference channel in its arguments
//possibly we could add another argument in the processChannel function which is the channel_i for the alternate image. maybe using a loop to cover all the other images
//create list of channel_i of alternate images:
std::vector<cv::Mat> alternate_channel_i_list;
for (int j = 0; j < burst_images.num_images; j++) {
if (j != burst_images.reference_image_idx) {
//get alternate image
cv::Mat alt_image = burst_images.bayer_images_pad[j];
std::vector<cv::Mat> alt_channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(alt_image, alt_channels[0], alt_channels[1], alt_channels[2], alt_channels[3]);
alternate_channel_i_list.push_back(alt_channels[i]);
}
}
// Apply merging on the channel
cv::Mat merged_channel = processChannel(burst_images, alignments, channel_i, alternate_channel_i_list, lambda_shot, lambda_read);
// cv::imwrite("merged" + std::to_string(i) + ".jpg", merged_channel);
// Put channel raw data back to channels
merged_channel.convertTo(processed_channels[i], CV_16U);
}
// Write all channels back to a bayer mat
cv::Mat merged(reference_image.rows, reference_image.cols, CV_16U);
int x, y;
for (y = 0; y < reference_image.rows; ++y){
uint16_t* row = merged.ptr<uint16_t>(y);
if (y % 2 == 0){
uint16_t* i0 = processed_channels[0].ptr<uint16_t>(y / 2);
uint16_t* i1 = processed_channels[1].ptr<uint16_t>(y / 2);
for (x = 0; x < reference_image.cols;){
//R
row[x] = i0[x / 2];
x++;
//G1
row[x] = i1[x / 2];
x++;
}
}
else {
uint16_t* i2 = processed_channels[2].ptr<uint16_t>(y / 2);
uint16_t* i3 = processed_channels[3].ptr<uint16_t>(y / 2);
for(x = 0; x < reference_image.cols;){
//G2
row[x] = i2[x / 2];
x++;
//B
row[x] = i3[x / 2];
x++;
}
}
}
// Remove padding
std::vector<int> padding = burst_images.padding_info_bayer;
cv::Range horizontal = cv::Range(padding[2], reference_image.cols - padding[3]);
cv::Range vertical = cv::Range(padding[0], reference_image.rows - padding[1]);
burst_images.merged_bayer_image = merged(vertical, horizontal);
cv::imwrite("merged.jpg", burst_images.merged_bayer_image);
}
std::vector<cv::Mat> merge::getReferenceTiles(cv::Mat reference_image) {
std::vector<cv::Mat> reference_tiles;
for (int y = 0; y < reference_image.rows - offset; y += offset) {
for (int x = 0; x < reference_image.cols - offset; x += offset) {
cv::Mat tile = reference_image(cv::Rect(x, y, TILE_SIZE, TILE_SIZE));
reference_tiles.push_back(tile);
}
}
return reference_tiles;
}
cv::Mat merge::mergeTiles(std::vector<cv::Mat> tiles, int num_rows, int num_cols) {
// 1. get all four subsets: original (evenly split), horizontal overlapped,
// vertical overlapped, 2D overlapped
std::vector<std::vector<cv::Mat>> tiles_original;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
std::vector<cv::Mat> row;
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_original.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_horizontal;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
std::vector<cv::Mat> row;
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_horizontal.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_vertical;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
std::vector<cv::Mat> row;
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_vertical.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_2d;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
std::vector<cv::Mat> row;
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_2d.push_back(row);
}
// 2. Concatenate the four subsets
cv::Mat img_original = cat2Dtiles(tiles_original);
cv::Mat img_horizontal = cat2Dtiles(tiles_horizontal);
cv::Mat img_vertical = cat2Dtiles(tiles_vertical);
cv::Mat img_2d = cat2Dtiles(tiles_2d);
// 3. Add the four subsets together
img_original(cv::Rect(offset, 0, num_cols - TILE_SIZE, num_rows)) += img_horizontal;
img_original(cv::Rect(0, offset, num_cols, num_rows - TILE_SIZE)) += img_vertical;
img_original(cv::Rect(offset, offset, num_cols - TILE_SIZE, num_rows - TILE_SIZE)) += img_2d;
return img_original;
}
cv::Mat merge::processChannel(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read) {
// Get tiles of the reference image
std::vector<cv::Mat> reference_tiles = getReferenceTiles(channel_image);
// Get noise variance (sigma**2 = lambda_shot * tileRMS + lambda_read)
std::vector<float> noise_variance = getNoiseVariance(reference_tiles, lambda_shot, lambda_read);
// Apply FFT on reference tiles (spatial to frequency)
std::vector<cv::Mat> reference_tiles_DFT;
for (auto ref_tile : reference_tiles) {
cv::Mat ref_tile_DFT;
ref_tile.convertTo(ref_tile_DFT, CV_32F);
cv::dft(ref_tile_DFT, ref_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
reference_tiles_DFT.push_back(ref_tile_DFT);
}
// Acquire alternate tiles and apply FFT on them as well
std::vector<std::vector<cv::Mat>> alt_tiles_list(reference_tiles.size());
int num_tiles_row = alternate_channel_i_list[0].rows / offset - 1;
int num_tiles_col = alternate_channel_i_list[0].cols / offset - 1;
for (int y = 0; y < num_tiles_row; ++y) {
for (int x = 0; x < num_tiles_col; ++x) {
std::vector<cv::Mat> alt_tiles;
// Get reference tile location
int top_left_y = y * offset;
int top_left_x = x * offset;
for (int i = 0; i < alternate_channel_i_list.size(); ++i) {
// Get alignment displacement
int displacement_y, displacement_x;
std::tie(displacement_y, displacement_x) = alignments[i + 1][y][x];
// Get tile
cv::Mat alt_tile = alternate_channel_i_list[i](cv::Rect(top_left_x + displacement_x, top_left_y + displacement_y, TILE_SIZE, TILE_SIZE));
// Apply FFT
cv::Mat alt_tile_DFT;
alt_tile.convertTo(alt_tile_DFT, CV_32F);
cv::dft(alt_tile_DFT, alt_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
alt_tiles.push_back(alt_tile_DFT);
}
alt_tiles_list[y * num_tiles_col + x] = alt_tiles;
}
}
// 4.2 Temporal Denoising
reference_tiles_DFT = temporal_denoise(reference_tiles_DFT, alt_tiles_list, noise_variance, TEMPORAL_FACTOR);
// 4.3 Spatial Denoising
reference_tiles_DFT = spatial_denoise(reference_tiles_DFT, alternate_channel_i_list.size(), noise_variance, SPATIAL_FACTOR);
//now reference tiles are temporally and spatially denoised
// Apply IFFT on reference tiles (frequency to spatial)
std::vector<cv::Mat> denoised_tiles;
for (auto dft_tile : reference_tiles_DFT) {
cv::Mat denoised_tile;
cv::divide(dft_tile, TILE_SIZE * TILE_SIZE, dft_tile);
cv::dft(dft_tile, denoised_tile, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
denoised_tiles.push_back(denoised_tile);
}
reference_tiles = denoised_tiles;
// 4.4 Cosine Window Merging
// Process tiles through 2D cosine window
std::vector<cv::Mat> windowed_tiles;
for (auto tile : reference_tiles) {
windowed_tiles.push_back(cosineWindow2D(tile));
}
// Merge tiles
return mergeTiles(windowed_tiles, channel_image.rows, channel_image.cols);
}
std::vector<cv::Mat> merge::temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor) {
// goal: temporially denoise using the weiner filter
// input:
// 1. array of 2D dft tiles of the reference image
// 2. array of 2D dft tiles of the aligned alternate image
// 3. estimated noise variance
// 4. temporal factor
// return: merged image patches dft
// calculate noise scaling
double temporal_noise_scaling = (TILE_SIZE * TILE_SIZE * (2.0 / 16)) * TEMPORAL_FACTOR;
// loop across tiles
std::vector<cv::Mat> denoised;
for (int i = 0; i < tiles.size(); ++i) {
// sum of pairwise denoising
cv::Mat tile_sum = tiles[i].clone();
double coeff = temporal_noise_scaling * noise_variance[i];
// Ref tile
cv::Mat tile = tiles[i];
// Alt tiles
std::vector<cv::Mat> alt_tiles_i = alt_tiles[i];
for (int j = 0; j < alt_tiles_i.size(); ++j) {
// Alt tile
cv::Mat alt_tile = alt_tiles_i[j];
// Tile difference
cv::Mat diff = tile - alt_tile;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(diff, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// find shrinkage operator A
cv::Mat shrinkage;
cv::divide(absolute_diff, absolute_diff + coeff, shrinkage);
cv::merge(std::vector<cv::Mat>{shrinkage, shrinkage}, shrinkage);
// Interpolation
tile_sum += alt_tile + diff.mul(shrinkage);
}
// Average by num of frames
cv::divide(tile_sum, alt_tiles_i.size() + 1, tile_sum);
denoised.push_back(tile_sum);
}
return denoised;
}
std::vector<cv::Mat> merge::spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor) {
double spatial_noise_scaling = (TILE_SIZE * TILE_SIZE * (1.0 / 16)) * spatial_factor;
// Calculate |w| using ifftshift
cv::Mat row_distances = cv::Mat::zeros(1, TILE_SIZE, CV_32F);
for(int i = 0; i < TILE_SIZE; ++i) {
row_distances.at<float>(i) = i - offset;
}
row_distances = cv::repeat(row_distances.t(), 1, TILE_SIZE);
cv::Mat col_distances = row_distances.t();
cv::Mat distances;
cv::sqrt(row_distances.mul(row_distances) + col_distances.mul(col_distances), distances);
ifftshift(distances);
std::vector<cv::Mat> denoised;
// Loop through all tiles
for (int i = 0; i < tiles.size(); ++i) {
cv::Mat tile = tiles[i];
float coeff = noise_variance[i] / (num_alts + 1) * spatial_noise_scaling;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(tile, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// Division
cv::Mat scale;
cv::divide(absolute_diff, absolute_diff + distances * coeff, scale);
cv::merge(std::vector<cv::Mat>{scale, scale}, scale);
denoised.push_back(tile.mul(scale));
}
return denoised;
}
} // namespace hdrplus

@ -0,0 +1,53 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include <hdrplus/params.h>
namespace hdrplus
{
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
libraw_ptr->imgdata.params.user_qual = rawpyArgs.demosaic_algorithm;
libraw_ptr->imgdata.params.half_size = rawpyArgs.half_size;
libraw_ptr->imgdata.params.use_camera_wb = rawpyArgs.use_camera_wb;
libraw_ptr->imgdata.params.use_auto_wb = rawpyArgs.use_auto_wb;
libraw_ptr->imgdata.params.no_auto_bright = rawpyArgs.no_auto_bright;
libraw_ptr->imgdata.params.output_color = rawpyArgs.output_color;
libraw_ptr->imgdata.params.gamm[0] = rawpyArgs.gamma[0];
libraw_ptr->imgdata.params.gamm[1] = rawpyArgs.gamma[1];
libraw_ptr->imgdata.params.output_bps = rawpyArgs.output_bps;
}
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
std::cout<<"postprocessing..."<<std::endl;
// set parameters
setParams(libraw_ptr,rawpyArgs);
std::cout<<"conversion to 16 bit using black and white levels, demosaicking, white balance, color correction..."<<std::endl;
libraw_ptr->dcraw_process();
int errorcode;
libraw_processed_image_t *ret_img = libraw_ptr->dcraw_make_mem_image(&errorcode);
int opencv_type = CV_16UC3; // 16bit RGB
if(ret_img->colors==1){ // grayscale
if(ret_img->bits == 8){ // uint8
opencv_type = CV_8UC1;
}else{ // uint16
opencv_type = CV_16UC1;
}
}else{// RGB
if(ret_img->bits == 8){ //8bit
opencv_type = CV_8UC3;
}else{ // 16bit
opencv_type = CV_16UC3;
}
}
cv::Mat processedImg(ret_img->height,ret_img->width,opencv_type,ret_img->data);
std::cout<<"postprocess finished!"<<std::endl;
return processedImg;
}
}

@ -404,12 +404,46 @@ public class MicroPhotoService extends Service {
final long videoId = intent.getLongExtra("videoId", 0); final long videoId = intent.getLongExtra("videoId", 0);
final int orientation = intent.getIntExtra("orientation", 0); final int orientation = intent.getIntExtra("orientation", 0);
final boolean frontCamera = intent.getBooleanExtra("frontCamera", false); final boolean frontCamera = intent.getBooleanExtra("frontCamera", false);
final int numberOfCaptures = intent.getIntExtra("captures", 1);
final List<String> paths = new ArrayList<>();
if (numberOfCaptures > 1) {
for (int idx = 0; idx < numberOfCaptures; idx++) {
String p = intent.getStringExtra("path" + Integer.toString(idx));
if (!TextUtils.isEmpty(p)) {
paths.add(p);
}
}
}
Log.i(TAG, "Recording received(" + Long.toString(videoId) + "):" + path); Log.i(TAG, "Recording received(" + Long.toString(videoId) + "):" + path);
if (photoOrVideo) { if (photoOrVideo) {
Thread thread = new Thread(new Runnable() { Thread thread = new Thread(new Runnable() {
@Override @Override
public void run() { public void run() {
if (numberOfCaptures == 1) {
processCapture();
} else {
processCaptures();
}
}
private void processCaptures() {
Bitmap bm = null;
File rawFile = new File(path);
String pathsStr = String.join("\t", paths);
mService.burstCaptureFinished(mService.mNativeHandle, result, numberOfCaptures, pathsStr, videoId);
for (String p : paths) {
try {
File f = new File(p);
f.delete();
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
private void processCapture() {
Bitmap bm = null; Bitmap bm = null;
File rawFile = new File(path); File rawFile = new File(path);
try { try {
@ -600,6 +634,10 @@ public class MicroPhotoService extends Service {
intent.putExtra("rightBottomOsd", rightBottomOsd); intent.putExtra("rightBottomOsd", rightBottomOsd);
intent.putExtra("leftBottomOsd", leftBottomOsd); intent.putExtra("leftBottomOsd", leftBottomOsd);
if (photoOrVideo) {
intent.putExtra("burstCaptures", 8);
}
// intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); // intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent; return intent;
@ -1269,6 +1307,7 @@ cellSignalStrengthGsm.getDbm();
protected native boolean uninit(long handler); protected native boolean uninit(long handler);
protected native void recordingFinished(long handler, boolean photoOrVideo, boolean result, String path, long videoId); protected native void recordingFinished(long handler, boolean photoOrVideo, boolean result, String path, long videoId);
protected native void captureFinished(long handler, boolean photoOrVideo, boolean result, Bitmap bm, long videoId); protected native void captureFinished(long handler, boolean photoOrVideo, boolean result, Bitmap bm, long videoId);
protected native void burstCaptureFinished(long handler, boolean result, int numberOfCaptures, String pathsJoinedByTab, long photoId);
public static native long takePhoto(int channel, int preset, boolean photoOrVideo, String configFilePath, String path); public static native long takePhoto(int channel, int preset, boolean photoOrVideo, String configFilePath, String path);
public static native void releaseDeviceHandle(long deviceHandle); public static native void releaseDeviceHandle(long deviceHandle);
public static native boolean sendExternalPhoto(long deviceHandle, String path); public static native boolean sendExternalPhoto(long deviceHandle, String path);

@ -273,6 +273,9 @@ public class RawActivity extends AppCompatActivity {
private boolean mFrontCamera = false; private boolean mFrontCamera = false;
private int mBurstCaptures = 1;
private List<String> mPathsOfCapture = new ArrayList<>();
/** /**
* A {@link CameraCaptureSession } for camera preview. * A {@link CameraCaptureSession } for camera preview.
*/ */
@ -672,6 +675,12 @@ public class RawActivity extends AppCompatActivity {
mPhotoId = intent.getLongExtra("videoId", 0); mPhotoId = intent.getLongExtra("videoId", 0);
mBurstCaptures = intent.getIntExtra("burstCaptures", 1);
if (mBurstCaptures < 1) {
mBurstCaptures = 1;
}
MicroPhotoService.infoLog("RawActivity created PHOTOID=" + Long.toString(mPhotoId)); MicroPhotoService.infoLog("RawActivity created PHOTOID=" + Long.toString(mPhotoId));
mTextureView.setAspectRatio(width, height); mTextureView.setAspectRatio(width, height);
@ -1205,33 +1214,41 @@ public class RawActivity extends AppCompatActivity {
final CaptureRequest.Builder captureBuilder = final CaptureRequest.Builder captureBuilder =
mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE); mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE);
captureBuilder.addTarget(mJpegImageReader.get().getSurface()); // captureBuilder.addTarget(mJpegImageReader.get().getSurface());
captureBuilder.addTarget(mRawImageReader.get().getSurface()); captureBuilder.addTarget(mRawImageReader.get().getSurface());
// Use the same AE and AF modes as the preview. // Use the same AE and AF modes as the preview.
setup3AControlsLocked(captureBuilder); setup3AControlsLocked(captureBuilder);
// Set orientation. // Set orientation.
int rotation = getDeviceRotation(); int rotation = getDeviceRotation();
captureBuilder.set(CaptureRequest.JPEG_ORIENTATION, captureBuilder.set(CaptureRequest.JPEG_ORIENTATION,
sensorToDeviceRotation(mCharacteristics, rotation)); sensorToDeviceRotation(mCharacteristics, rotation));
int numberOfCaptures = mBurstCaptures > 1 ? mBurstCaptures : 1;
List<CaptureRequest> requests = new ArrayList<>();
for (int idx = 0; idx < mBurstCaptures; idx++) {
// Set request tag to easily track results in callbacks. // Set request tag to easily track results in callbacks.
captureBuilder.setTag(mRequestCounter.getAndIncrement()); captureBuilder.setTag(mRequestCounter.getAndIncrement());
CaptureRequest request = captureBuilder.build(); CaptureRequest request = captureBuilder.build();
requests.add(request);
// Create an ImageSaverBuilder in which to collect results, and add it to the queue // Create an ImageSaverBuilder in which to collect results, and add it to the queue
// of active requests. // of active requests.
ImageSaver.ImageSaverBuilder jpegBuilder = new ImageSaver.ImageSaverBuilder(this) // ImageSaver.ImageSaverBuilder jpegBuilder = new ImageSaver.ImageSaverBuilder(this)
.setCharacteristics(mCharacteristics); // .setCharacteristics(mCharacteristics);
ImageSaver.ImageSaverBuilder rawBuilder = new ImageSaver.ImageSaverBuilder(this) ImageSaver.ImageSaverBuilder rawBuilder = new ImageSaver.ImageSaverBuilder(this)
.setCharacteristics(mCharacteristics); .setCharacteristics(mCharacteristics);
mJpegResultQueue.put((int) request.getTag(), jpegBuilder); // mJpegResultQueue.put((int) request.getTag(), jpegBuilder);
mRawResultQueue.put((int) request.getTag(), rawBuilder); mRawResultQueue.put((int) request.getTag(), rawBuilder);
}
mCaptureSession.capture(request, mCaptureCallback, mBackgroundHandler); mCaptureSession.captureBurst(requests, mCaptureCallback, mBackgroundHandler);
} catch (CameraAccessException e) { } catch (CameraAccessException e) {
e.printStackTrace(); e.printStackTrace();
@ -1751,6 +1768,16 @@ public class RawActivity extends AppCompatActivity {
@Override @Override
public void run() { public void run() {
saver.run(); saver.run();
int numberOfCaptured = 0;
synchronized (mPathsOfCapture) {
mPathsOfCapture.add(saver.getPath());
numberOfCaptured = mPathsOfCapture.size();
}
if (numberOfCaptured >= mBurstCaptures) {
if (saver.getFormat() == ImageFormat.RAW_SENSOR) { if (saver.getFormat() == ImageFormat.RAW_SENSOR) {
runOnUiThread(new Runnable() { runOnUiThread(new Runnable() {
@Override @Override
@ -1769,6 +1796,8 @@ public class RawActivity extends AppCompatActivity {
} }
} }
}
}); });
} }
} }
@ -1817,13 +1846,16 @@ public class RawActivity extends AppCompatActivity {
intent.putExtra("result", result); intent.putExtra("result", result);
intent.putExtra("orientation", mOrientation); intent.putExtra("orientation", mOrientation);
intent.putExtra("frontCamera", mFrontCamera); intent.putExtra("frontCamera", mFrontCamera);
if (!TextUtils.isEmpty(path)) {
intent.putExtra("path", path); intent.putExtra("path", path);
int numberOfCaptures = mPathsOfCapture.size();
intent.putExtra("captures", numberOfCaptures);
for (int idx = 0; idx < numberOfCaptures; idx++) {
intent.putExtra("path" + Integer.toString(idx), mPathsOfCapture.get(idx));
} }
intent.putExtra("videoId", mPhotoId);
// intent.setComponent(new ComponentName(packageName, receiverName)); intent.putExtra("videoId", mPhotoId);
if (mActivityResult) { if (mActivityResult) {
setResult(RESULT_OK, intent); setResult(RESULT_OK, intent);

@ -25,7 +25,7 @@ COMPILE_MIN_SDK_VERSION=25
opencvsdk=D:/Workspace/deps/opencv-mobile-4.9.0-android opencvsdk=D:/Workspace/deps/opencv-mobile-4.9.0-android
coreroot=D:/Workspace/Github/xymp/xymp/Core coreroot=D:/Workspace/Github/xymp/xymp/Core
hdrplusroot=D:/Workspace/deps/hdrplus hdrplusroot=D:/Workspace/deps/hdrplus_libs
ncnnroot=D:/Workspace/deps/ncnn-20240410-android-vulkan ncnnroot=D:/Workspace/deps/ncnn-20240410-android-vulkan
# ncnnroot=D:/Workspace/deps/ncnn-20230517-android-vulkan # ncnnroot=D:/Workspace/deps/ncnn-20230517-android-vulkan
libzipsdkroot=D:/Workspace/deps/libzip-android-sdk libzipsdkroot=D:/Workspace/deps/libzip-android-sdk

Loading…
Cancel
Save