增加云台机测试代码

hdrplus
jxjajs 7 months ago
commit 494b26d92d

@ -4,8 +4,8 @@ plugins {
// 10,00,000 major-minor-build
def AppMajorVersion = 1
def AppMinorVersion = 0
def AppBuildNumber = 190
def AppMinorVersion = 1
def AppBuildNumber = 1
def AppVersionName = AppMajorVersion + "." + AppMinorVersion + "." + AppBuildNumber
def AppVersionCode = AppMajorVersion * 100000 + AppMinorVersion * 1000 + AppBuildNumber
@ -26,7 +26,7 @@ android {
applicationId "com.xypower.mpapp"
minSdk COMPILE_MIN_SDK_VERSION as int
//noinspection ExpiredTargetSdkVersion
targetSdk 28
targetSdk TARGET_SDK_VERSION as int
versionCode AppVersionCode
versionName AppVersionName
@ -39,8 +39,8 @@ android {
cppFlags '-std=c++17 -fexceptions -Wno-error=format-security'
// cppFlags '-std=c++17 -Wno-error=format-security'
// arguments "-DANDROID_STL=c++_shared"
arguments "-DNCNN_DISABLE_EXCEPTION=OFF", "-DTERM_CORE_ROOT=" + coreroot, "-DOpenCV_DIR=" + opencvsdk + "/sdk/native/jni", "-DASIO_ROOT=" + asioroot, "-DEVPP_ROOT=" + evpproot, "-DNCNN_ROOT=" + ncnnroot
abiFilters 'arm64-v8a'
arguments "-DNCNN_DISABLE_EXCEPTION=OFF", "-DTERM_CORE_ROOT=" + coreroot, "-DOpenCV_DIR=" + opencvsdk + "/sdk/native/jni", "-DHDRPLUS_ROOT=" + hdrplusroot, "-DNCNN_ROOT=" + ncnnroot
abiFilters 'arm64-v8a', 'armeabi-v7a'
// setAbiFilters(['arm64-v8a'])
}
}
@ -52,6 +52,7 @@ android {
proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
}
debug {
minifyEnabled false
jniDebuggable true
testCoverageEnabled false
}
@ -74,6 +75,17 @@ android {
}
}
splits {
boolean isReleaseTask = gradle.startParameter.taskNames.any { it.contains("Release") }
// enabled on release build
abi {
enable isReleaseTask
reset()
include "armeabi-v7a", "arm64-v8a"
universalApk false
}
}
android.applicationVariants.all { variant ->
variant.outputs.all { output ->
if (outputFileName.endsWith('.apk')) {
@ -81,7 +93,9 @@ android {
if(variant.buildType.name.equals('release')) {
buildTypeFlag = "rel"
}
def fileName = "mpapp_v${defaultConfig.versionName}_${buildTypeFlag}_${new Date(System.currentTimeMillis()).format("yyyyMMdd")}.apk"
def abi = output.getFilter(com.android.build.OutputFile.ABI)
if (abi == null) abi = "all"
def fileName = "mpapp_v${defaultConfig.versionName}_${buildTypeFlag}_${new Date(System.currentTimeMillis()).format("yyyyMMdd")}_${abi}.apk"
outputFileName = fileName
}
}
@ -97,6 +111,13 @@ android {
exclude 'META-INF/INDEX.LIST'
exclude 'META-INF/io.netty.versions.properties'
exclude 'META-INF/DEPENDENCIES'
exclude 'META-INF/LICENSE-notice.md'
exclude 'META-INF/LICENSE.md'
jniLibs {
useLegacyPackaging true
}
}
}
@ -119,9 +140,13 @@ dependencies {
// implementation 'com.tencent:mmkv-static:1.3.0'
// implementation project(path: ':opencv')
implementation files('libs/devapi.aar')
debugImplementation files('libs/rtmp-client-debug.aar')
releaseImplementation files('libs/rtmp-client.aar')
implementation project(':gpuv')
// debugImplementation files('libs/rtmp-client-debug.aar')
implementation files('libs/android-openGL-canvas-1.5.4.0.aar')
implementation files('libs/rtmp-client.aar')
api project(':gpuv')
implementation project(':stream')
implementation 'dev.mobile:dadb:1.2.7'
// implementation group: 'io.netty', name: 'netty-all', version: '4.1.96.Final'
// implementation 'io.netty:netty-all:4.1.23.Final'

@ -72,6 +72,15 @@
<uses-permission android:name="android.hardware.usb.accessory" />
<queries>
<provider
android:name=".BridgeProvider"
android:authorities="com.xypower.mpapp.provider"
android:enabled="true"
android:exported="false"
android:grantUriPermissions="true" />
</queries>
<application
android:dataExtractionRules="@xml/data_extraction_rules"
android:extractNativeLibs="true"
@ -83,14 +92,28 @@
android:supportsRtl="true"
android:theme="@style/Theme.MicroPhoto"
tools:targetApi="28">
<service
android:name=".BridgeService"
<activity
android:name=".video.RawActivity"
android:exported="false"
android:screenOrientation="landscape" />
<activity
android:name=".StreamActivity"
android:exported="false"
android:screenOrientation="landscape" />
<provider
android:name=".BridgeProvider"
android:authorities="com.xypower.mpapp.provider"
android:enabled="true"
android:exported="true"></service>
android:exported="true"
android:grantUriPermissions="true" />
<activity
android:name=".BridgeActivity"
android:exported="true" />
android:exported="true"
android:process=":bridge_proc"
android:screenOrientation="landscape" />
<activity
android:name=".video.VideoActivity"
android:exported="false"
@ -177,16 +200,6 @@
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<provider
android:name="androidx.core.content.FileProvider"
android:authorities="${applicationId}.fileProvider"
android:exported="false"
android:grantUriPermissions="true">
<meta-data
android:name="android.support.FILE_PROVIDER_PATHS"
android:resource="@xml/file_provider_paths" />
</provider>
</application>
</manifest>

@ -28,15 +28,21 @@ add_definitions(-DASIO_STANDALONE)
add_definitions(-DUSING_XY_EXTENSION)
# add_definitions(-DUSING_BREAK_PAD)
add_definitions(-DSQLITE_THREADSAFE=1)
add_definitions(-DLIBRAW_NO_MEMPOOL_CHECK=1)
# add_definitions(-DHDRPLUS_NO_DETAILED_OUTPUT=1)
add_definitions(-DHAVE_STRING_H) # for memcpy in md5.c
add_definitions(-DUSING_NRSEC)
add_definitions(-DUSING_NRSEC_VPN)
# add_definitions(-DUSING_CERT)
# add_definitions(-DUSING_DOWSE)
# OUTPUT_CAMERA_DBG_INFO: CARERA
add_definitions(-DOUTPUT_CAMERA_DBG_INFO)
# add_definitions(-DOUTPUT_CAMERA_DBG_INFO)
add_definitions(-DALIGN_HB_TIMER_TO_PHOTO)
add_definitions(-DENABLE_3V3_ALWAYS)
# set(OpenCV_DIR D:/Workspace/deps/OpenCV-android-sdk/sdk/native/jni/)
set(OPENCV_EXTRA_MODULES_PATH D:/Workspace/Github/opencv_contrib/modules)
add_definitions(-DUSING_HDRPLUS)
add_definitions(-DUSING_N938)
# include_directories(${OpenCV_DIR}/include)
# add_library( lib_opencv SHARED IMPORTED )
@ -46,10 +52,7 @@ set(OPENCV_EXTRA_MODULES_PATH D:/Workspace/Github/opencv_contrib/modules)
project("microphoto")
# message(FATAL_ERROR "OpenCV ${OpenCV_DIR}")
find_package(OpenCV REQUIRED core imgproc highgui)
find_package(OpenCV REQUIRED core imgproc highgui photo)
# find_package(OpenCV REQUIRED core imgproc)
if(OpenCV_FOUND)
include_directories(${OpenCV_INCLUDE_DIRS})
@ -69,7 +72,67 @@ endif(OpenCV_FOUND)
set(ncnn_DIR ${NCNN_ROOT}/${ANDROID_ABI}/lib/cmake/ncnn)
find_package(ncnn REQUIRED)
# include(mars/src/CMakeUtils.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/libcutils/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/libutils/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/img_utils/include)
SET( IMG_UTILS_SRCS
"img_utils/src/EndianUtils.cpp"
#"img_utils/src/FileInput.cpp"
#"img_utils/src/FileOutput.cpp"
#"img_utils/src/SortedEntryVector.cpp"
"img_utils/src/Input.cpp"
"img_utils/src/Output.cpp"
"img_utils/src/Orderable.cpp"
"img_utils/src/TiffIfd.cpp"
"img_utils/src/TiffWritable.cpp"
"img_utils/src/TiffWriter.cpp"
"img_utils/src/TiffEntry.cpp"
"img_utils/src/TiffEntryImpl.cpp"
"img_utils/src/ByteArrayOutput.cpp"
"img_utils/src/DngUtils.cpp"
"img_utils/src/StripSource.cpp"
libutils/SharedBuffer.cpp
libutils/StrongPointer.cpp
DngCreator.cpp
)
message(WARNING "include_directories ${HDRPLUS_ROOT}/${ANDROID_ABI}/include")
include_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/include)
link_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/lib)
# message(WARNING "exiv2_DIR=${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2")
# SET(exiv2_DIR ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# list(APPEND CMAKE_PREFIX_PATH ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# find_package(exiv2 REQUIRED CONFIG NAMES exiv2)
# message(STATUS "Found Exiv2 and linked")
# OpenMP
find_package(OpenMP REQUIRED)
# library
include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/hdrplus/include )
SET(HDRPLUS_LIBS raw exiv2 exiv2-xmp expat lcms2 OpenMP::OpenMP_CXX)
SET(HDRPLUS_SOURCES
hdrplus/src/align.cpp
hdrplus/src/bayer_image.cpp
hdrplus/src/burst.cpp
hdrplus/src/finish.cpp
hdrplus/src/hdrplus_pipeline.cpp
hdrplus/src/merge.cpp
hdrplus/src/params.cpp
)
SET(YAMC_INC_DIR ${CMAKE_SOURCE_DIR})
@ -236,31 +299,6 @@ include_directories(${FREETYPE_ROOT}/include)
include_directories(${TERM_CORE_ROOT})
# include_directories(${PROJECT_SOURCE_DIR}/../../../../../libs/inc/)
#[[
add_library( # Sets the name of the library.
evpp_lite
# Sets the library as a shared library.
STATIC
# Provides a relative path to your source file(s).
${EVPP_SOURCES} )
]]
IF (CMAKE_HOST_SYSTEM_NAME MATCHES "Windows")
# set(BOOST_ROOT C:/ProgramData/boost_1_82_0/)
# set(BOOST_INCLUDEDIR C:/ProgramData/boost_1_82_0/)
include_directories(C:/ProgramData/boost_1_82_0/)
ELSE()
# find_package(Boost 1.58.0 COMPONENTS)
find_package(Boost 1.58.0)
if(Boost_FOUND)
include_directories(${Boost_INCLUDE_DIRS})
else()
message(FATAL_ERROR "Boost Not Found")
endif()
endif()
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
@ -304,7 +342,7 @@ add_library( # Sets the name of the library.
SerialPort.cpp
#WeatherComm.cpp
SensorsProtocol.cpp
serialComm.cpp
SerialComm.cpp
ncnn/yolov5ncnn.cpp
@ -312,8 +350,11 @@ add_library( # Sets the name of the library.
# camera2/OpenCVFont.cpp
${HDRPLUS_SOURCES}
${CAMERA2_SOURCES}
${IMG_UTILS_SRCS}
${TERM_CORE_ROOT}/Factory.cpp
${TERM_CORE_ROOT}/FilePoster.cpp
${TERM_CORE_ROOT}/LogThread.cpp
@ -350,6 +391,7 @@ add_library( # Sets the name of the library.
${TERM_CORE_ROOT}/Client/Terminal_NW.cpp
${TERM_CORE_ROOT}/Client/UpgradeReceiver.cpp
${TERM_CORE_ROOT}/Client/Database.cpp
${TERM_CORE_ROOT}/Client/SimulatorDevice.cpp
)
@ -370,9 +412,8 @@ find_library( # Sets the name of the path variable.
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
microphoto
${PROJECT_NAME}
jsoncpp
@ -385,7 +426,7 @@ target_link_libraries( # Specifies the target library.
android camera2ndk mediandk z
ncnn ${OpenCV_LIBS} sqlite3
ncnn ${OpenCV_LIBS} sqlite3 ${HDRPLUS_LIBS}
)

@ -354,7 +354,18 @@ namespace cv {
mvFn(NULL, (void*)userData);
// Update current position ( in FreeType coordinates )
#if defined(USING_HB)
currentPos.x += mFace->glyph->advance.x;
#else
if (wstr[i] == ' ')
{
currentPos.x += mFace->glyph->advance.x << 1;
}
else
{
currentPos.x += mFace->glyph->advance.x;
}
#endif
currentPos.y += mFace->glyph->advance.y;
}
delete userData;
@ -1036,7 +1047,19 @@ namespace cv {
(this->*putPixel)(dst, gPos.y + row, gPos.x + col, _colorUC8n, cl);
}
}
_org.x += (mFace->glyph->advance.x) >> 6;
#if defined(USING_HB)
_org.x += (mFace->glyph->advance.x) >> 6;
#else
if (wstr[i] == ' ')
{
_org.x += ((mFace->glyph->advance.x) >> 6) << 1;
}
else
{
_org.x += (mFace->glyph->advance.x) >> 6;
}
#endif
_org.y += (mFace->glyph->advance.y) >> 6;
}
@ -1045,7 +1068,6 @@ namespace cv {
#endif
}
int FreeType2Impl::mvFn(const FT_Vector *to, void * user)
{
if (user == NULL) { return 1; }

File diff suppressed because it is too large Load Diff

@ -0,0 +1,332 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "DngCreator_JNI"
#include <inttypes.h>
#include <string.h>
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include <cmath>
#include <algorithm>
#include <camera/NdkCameraMetadata.h>
#include <img_utils/DngUtils.h>
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffWriter.h>
#include <img_utils/Output.h>
#include <img_utils/Input.h>
#include <img_utils/StripSource.h>
#include <sys/system_properties.h>
// #include "core_jni_helpers.h"
// #include "android_runtime/AndroidRuntime.h"
// #include "android_runtime/android_hardware_camera2_CameraMetadata.h"
#include <jni.h>
// #include <nativehelper/JNIHelp.h>
using namespace android;
using namespace img_utils;
// using android::base::GetProperty;
/**
* Max width or height dimension for thumbnails.
*/
// max pixel dimension for TIFF/EP
#define MAX_THUMBNAIL_DIMENSION 256
// bytes per sample
#define DEFAULT_PIXEL_STRIDE 2
// byts per pixel
#define BYTES_PER_RGB_PIX 3
#define GPS_LAT_REF_NORTH "N"
#define GPS_LAT_REF_SOUTH "S"
#define GPS_LONG_REF_EAST "E"
#define GPS_LONG_REF_WEST "W"
#define GPS_DATE_FORMAT_STR "yyyy:MM:dd"
#define TIFF_DATETIME_FORMAT "yyyy:MM:dd kk:mm:ss"
class ByteVectorOutput : public Output {
public:
ByteVectorOutput(std::vector<uint8_t>& buf);
virtual ~ByteVectorOutput();
virtual status_t open();
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t close();
protected:
std::vector<uint8_t>& m_buf;
};
class ByteVectorInput : public Input {
public:
ByteVectorInput(const std::vector<uint8_t>& buf);
virtual ~ByteVectorInput();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
status_t close();
protected:
const std::vector<uint8_t>& m_buf;
size_t m_offset;
};
class ByteBufferInput : public Input {
public:
ByteBufferInput(const uint8_t* buf, size_t len);
virtual ~ByteBufferInput();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
status_t close();
protected:
const uint8_t* m_buf;
size_t m_len;
size_t m_offset;
};
struct SIZE
{
int width;
int height;
};
#define BAIL_IF_INVALID_RET_BOOL(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return false; \
}
#define BAIL_IF_INVALID_RET_NULL_SP(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return nullptr; \
}
#define BAIL_IF_INVALID_R(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return -1; \
}
#define BAIL_IF_EMPTY_RET_NULL_SP(entry, jnienv, tagId, writer) \
if ((entry).count == 0) { \
return nullptr; \
}
#define BAIL_IF_EXPR_RET_NULL_SP(expr, jnienv, tagId, writer) \
if (expr) { \
return nullptr; \
}
#define ANDROID_DNGCREATOR_CTX_JNI_ID "mNativeContext"
enum {
BITS_PER_SAMPLE = 16,
BYTES_PER_SAMPLE = 2,
BYTES_PER_RGB_PIXEL = 3,
BITS_PER_RGB_SAMPLE = 8,
BYTES_PER_RGB_SAMPLE = 1,
SAMPLES_PER_RGB_PIXEL = 3,
SAMPLES_PER_RAW_PIXEL = 1,
TIFF_IFD_0 = 0,
TIFF_IFD_SUB1 = 1,
TIFF_IFD_GPSINFO = 2,
};
/**
* POD container class for GPS tag data.
*/
class GpsData {
public:
enum {
GPS_VALUE_LENGTH = 6,
GPS_REF_LENGTH = 2,
GPS_DATE_LENGTH = 11,
};
uint32_t mLatitude[GPS_VALUE_LENGTH];
uint32_t mLongitude[GPS_VALUE_LENGTH];
uint32_t mTimestamp[GPS_VALUE_LENGTH];
uint8_t mLatitudeRef[GPS_REF_LENGTH];
uint8_t mLongitudeRef[GPS_REF_LENGTH];
uint8_t mDate[GPS_DATE_LENGTH];
};
// ----------------------------------------------------------------------------
/**
* Container class for the persistent native context.
*/
class NativeContext : public LightRefBase<NativeContext> {
public:
enum {
DATETIME_COUNT = 20,
};
NativeContext(ACameraMetadata* characteristics, ACameraMetadata* result);
virtual ~NativeContext();
TiffWriter* getWriter();
ACameraMetadata* getCharacteristics() const;
ACameraMetadata* getResult() const;
uint32_t getThumbnailWidth() const;
uint32_t getThumbnailHeight() const;
const uint8_t* getThumbnail() const;
bool hasThumbnail() const;
bool setThumbnail(const std::vector<uint8_t>& buffer, uint32_t width, uint32_t height);
void setOrientation(uint16_t orientation);
uint16_t getOrientation() const;
void setDescription(const std::string& desc);
std::string getDescription() const;
bool hasDescription() const;
void setGpsData(const GpsData& data);
GpsData getGpsData() const;
bool hasGpsData() const;
void setCaptureTime(const std::string& formattedCaptureTime);
std::string getCaptureTime() const;
bool hasCaptureTime() const;
protected:
std::vector<uint8_t> mCurrentThumbnail;
TiffWriter mWriter;
ACameraMetadata* mCharacteristics;
ACameraMetadata* mResult;
uint32_t mThumbnailWidth;
uint32_t mThumbnailHeight;
uint16_t mOrientation;
bool mThumbnailSet;
bool mGpsSet;
bool mDescriptionSet;
bool mCaptureTimeSet;
std::string mDescription;
GpsData mGpsData;
std::string mFormattedCaptureTime;
};
class DngCreator : public NativeContext
{
public:
DngCreator(ACameraMetadata* characteristics, ACameraMetadata* result);
#if 0
void setLocation(Location location);
#endif
void writeInputStream(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset);
void writeByteBuffer(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset);
#if 0
void writeImage(OutputStream& dngOutput, AImage& pixels);
#endif
void close();
// private static final DateFormat sExifGPSDateStamp = new SimpleDateFormat(GPS_DATE_FORMAT_STR);
// private static final DateFormat sDateTimeStampFormat = new SimpleDateFormat(TIFF_DATETIME_FORMAT);
#if 0
static {
sDateTimeStampFormat.setTimeZone(TimeZone.getDefault());
sExifGPSDateStamp.setTimeZone(TimeZone.getTimeZone("UTC"));
}
#endif
/**
* Offset, rowStride, and pixelStride are given in bytes. Height and width are given in pixels.
*/
void writeByteBuffer(int width, int height, const std::vector<uint8_t>& pixels, std::vector<uint8_t>& dngOutput, int pixelStride, int rowStride, long offset);
/**
* Generate a direct RGB {@link ByteBuffer} from a {@link Bitmap}.
*/
/**
* Convert coordinate to EXIF GPS tag format.
*/
void toExifLatLong(double value, int data[6]);
void init(ACameraMetadata* characteristics, ACameraMetadata* result, const std::string& captureTime);
sp<TiffWriter> setup(uint32_t imageWidth, uint32_t imageHeight);
void destroy();
void setGpsTags(const std::vector<int>& latTag, const std::string& latRef, const std::vector<int>& longTag, const std::string& longRef, const std::string& dateTag, const std::vector<int>& timeTag);
void writeImage(std::vector<uint8_t>& out, uint32_t width, uint32_t height, const std::vector<uint8_t>& rawBuffer, int rowStride, int pixStride, uint64_t offset, bool isDirect);
void writeInputStream(std::vector<uint8_t>& out, const std::vector<uint8_t>& rawStream, uint32_t width, uint32_t height, long offset);
void writeInputBuffer(std::vector<uint8_t>& out, const uint8_t* rawBuffer, size_t bufferLen, uint32_t width, uint32_t height, long offset);
};

@ -31,19 +31,9 @@ typedef struct
char str[MAX_STRING_LEN];
}IOT_PARAM;
typedef struct{
float airtemp; /* 空气温度*/
float RH; /* 相对湿度*/
float atmos; /* 大气压*/
float windspeed; /* 风速*/
float winddirection; /* 风向*/
float rainfall; /* 雨量*/
float sunshine; /* 日照*/
}Weather;
void GpioControl::setInt(int cmd, int value)
{
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
IOT_PARAM param;
param.cmd = cmd;
param.value = value;
@ -59,7 +49,7 @@ void GpioControl::setInt(int cmd, int value)
int GpioControl::getInt(int cmd)
{
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_int fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
{
@ -77,7 +67,7 @@ int GpioControl::getInt(int cmd)
void GpioControl::setLong(int cmd, long value)
{
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
IOT_PARAM param;
param.cmd = cmd;
param.value2 = value;
@ -93,7 +83,7 @@ void GpioControl::setLong(int cmd, long value)
long GpioControl::getLong(int cmd)
{
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_long fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
{
@ -110,8 +100,7 @@ long GpioControl::getLong(int cmd)
void GpioControl::setString(int cmd, const std::string& value)
{
IOT_PARAM param;
// char *pval = jstringToChars(env, value);
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
int len = MAX_STRING_LEN < value.size() ? MAX_STRING_LEN : value.size();
param.cmd = cmd;
@ -129,7 +118,7 @@ void GpioControl::setString(int cmd, const std::string& value)
std::string GpioControl::getString(int cmd)
{
int fd = open("/dev/mtkgpioctrl", O_RDONLY);
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_string fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
{
@ -142,3 +131,65 @@ std::string GpioControl::getString(int cmd)
}
return "";
}
#ifdef USING_N938
#if 0
bool GpioControl::SetN938Cmd(int cmd, int val)
{
char buf[32] = { 0 };
snprintf(buf, "out %d %d", cmd, val);
IOT_PARAM param;
int len = MAX_STRING_LEN < strlen(buf) ? MAX_STRING_LEN : strlen(buf);
param.cmd = cmd;
memset(param.str, 0, MAX_STRING_LEN);
memcpy(param.str, value.c_str(), len);
int fd = open(GPIO_NODE_MP, O_RDONLY);
if( fd > 0 )
{
ioctl(fd, IOT_PARAM_WRITE, &param);
close(fd);
}
return;
}
#endif
bool GpioControl::OpenSensors()
{
GpioControl::setCam3V3Enable(true);
GpioControl::setInt(CMD_SET_485_EN_STATE, true ? 1 : 0);
int igpio;
GpioControl::setInt(CMD_SET_WTH_POWER, 1);
GpioControl::setInt(CMD_SET_PULL_POWER, 1);
GpioControl::setInt(CMD_SET_ANGLE_POWER, 1);
GpioControl::setInt(CMD_SET_OTHER_POWER, 1);
GpioControl::setInt(CMD_SET_PIC1_POWER, 1);
igpio = GpioControl::getInt(CMD_SET_WTH_POWER);
igpio = GpioControl::getInt(CMD_SET_PULL_POWER);
igpio = GpioControl::getInt(CMD_SET_ANGLE_POWER);
igpio = GpioControl::getInt(CMD_SET_OTHER_POWER);
igpio = GpioControl::getInt(CMD_SET_PIC1_POWER);
GpioControl::setInt(CMD_SET_SPI_POWER, 1);
GpioControl::setInt(CMD_SET_485_en0, 1);
GpioControl::setInt(CMD_SET_485_en1, 1);
GpioControl::setInt(CMD_SET_485_en2, 1);
GpioControl::setInt(CMD_SET_485_en3, 1);
GpioControl::setInt(CMD_SET_485_en4, 1);
igpio = GpioControl::getInt(CMD_SET_SPI_POWER);
igpio = GpioControl::getInt(CMD_SET_485_en0);
igpio = GpioControl::getInt(CMD_SET_485_en1);
igpio = GpioControl::getInt(CMD_SET_485_en2);
igpio = GpioControl::getInt(CMD_SET_485_en3);
igpio = GpioControl::getInt(CMD_SET_485_en4);
return 0;
}
#endif

@ -6,6 +6,8 @@
#define MICROPHOTO_GPIOCONTROL_H
#include <string>
#include <chrono>
#include <thread>
#define CMD_GET_LIGHT_ADC 101
#define CMD_SET_LIGHT_ADC 102
@ -36,16 +38,86 @@
#define CMD_SET_CAM_3V3_EN_STATE 132
#define CMD_SET_12V_EN_STATE 133
#define CMD_SET_SYSTEM_RESET 202
#define CMD_SET_WTH_POWER 490
#define CMD_SET_PULL_POWER 491
#define CMD_SET_ANGLE_POWER 492
#define CMD_SET_OTHER_POWER 493
#define CMD_SET_PIC1_POWER 494
#define CMD_SET_485_en0 301
#define CMD_SET_485_en1 302
#define CMD_SET_485_en2 303
#define CMD_SET_485_en3 304
#define CMD_SET_485_en4 305
#ifdef USING_N938
#define CMD_SET_485_EN_STATE 131
#define CMD_SET_CAM_3V3_EN_STATE 132
#define CMD_SET_12V_EN_STATE 133
#define CMD_SET_485_STATE 121
#define CMD_SET_SPI_MODE 123
#define CMD_SET_SPI_BITS_PER_WORD 124
#define CMD_SET_SPI_MAXSPEEDHZ 125
#define CMD_SET_SPI_POWER 129
#define CMD_SET_WTH_POWER 490
#define CMD_SET_PULL_POWER 491
#define CMD_SET_ANGLE_POWER 492
#define CMD_SET_OTHER_POWER 493
#define CMD_SET_PIC1_POWER 494
#define CMD_SET_GPIO157_POWER 510
#define CMD_SET_GPIO5_POWER 511
#define CMD_SET_PWM_BEE_STATE 126
#define CMD_SET_ALM_MODE 128
#define CMD_SET_485_en0 301
#define CMD_SET_485_en1 302
#define CMD_SET_485_en2 303
#define CMD_SET_485_en3 304
#define CMD_SET_485_en4 305
#define CMD_SET_OTG_STATE 107
#define CMD_GET_OTG_STATE 108
#if 0
#define CMD_485_0_DE 156 // 485_0 DE信号
#define CMD_485_0_PWR_EN 157 // 485_0 电源使能
#define CMD_485_0_1_DE_EN 171 // 485_0&1DE电平转换芯片使能信号
#define CMD_485_1_DE 172 //
#define CMD_SET_CAM_3V3_EN_STATE 72 // 整板3V3上电使能
#define CMD_3V3_SWITCH_EN 45 // 整板485_3V3信号电平转换电源使能
#define CMD_UART0_EN 73 // 预留UART0电平转换芯片使能
#define CMD_485_1_PWR_EN 5 // 485_1 电源使能
#define CMD_485_3_DE 6 // 485_3 DE信号
#define CMD_485_2_DE 7 // 485_2 DE信号
#define CMD_485_4_DE 13 // 485_4 DE信号
#define CMD_NETWORK_PWR_EN 94 // 100M网络电源使能
#define CMD_485_2_PWR_EN 92 // 485_2 电源使能
#define CMD_485_3_PWR_EN 91 // 485_3 电源使能
#define CMD_485_4_PWR_EN 90 // 485_4 电源使能
#define CMD_SEC_EN 27 // 加密芯片上电使能
#define CMD_485_2_3_DE_EN 26 // 485_2&3 DE电平转换芯片使能信号
#define CMD_5V_PWR_EN 14 // 整板5V0上电使能
#define CMD_SD_CARD_DECT 15 // SD CARD DECT
#define CMD_PIC1_EN 16
#define CMD_OTHER_EN 21
#define CMD_ANGLE_EN 22
#define CMD_PULL_EN 23
#define CMD_WEATHER_EN 24
#define CMD_LED_CTRL 46
#define CMD_BD_EN 47
#define CMD_ADC_EN 44
#define CMD_SPI_PWR_EN 43 // SPI转串口电源使能
#endif
#endif // USING_N938
#ifndef USING_N938
#define GPIO_NODE_N938 "/sys/devices/platform/1000b000.pinctrl/mt_gpio"
#else
#define GPIO_NODE_MP "/dev/mtkgpioctrl"
#endif // USING_N938
class GpioControl
@ -71,7 +143,11 @@ public:
static void setCam3V3Enable(bool enabled)
{
#ifdef ENABLE_3V3_ALWAYS
setInt(CMD_SET_CAM_3V3_EN_STATE, 1);
#else
setInt(CMD_SET_CAM_3V3_EN_STATE, enabled ? 1 : 0);
#endif
}
static void reboot()
@ -161,6 +237,10 @@ public:
static void setSpiPower(bool on) {
setInt(CMD_SET_SPI_POWER, on ? 1 : 0);
if (on)
{
std::this_thread::sleep_for(std::chrono::milliseconds(40));
}
}
static void setRS485Enable(bool z) {
@ -172,6 +252,12 @@ public:
setInt(CMD_SET_12V_EN_STATE, z ? 1 : 0);
}
#ifdef USING_N938
static bool SetN938Cmd(int cmd, int val);
static bool OpenSensors();
static bool CloseSensors();
#endif
};

@ -10,9 +10,12 @@
#include <sys/system_properties.h>
#include <AndroidHelper.h>
#include <linux/spi/spidev.h>
#include "ncnn/yolov5ncnn.h"
#include <android/multinetwork.h>
#define NRSEC_PATH "/dev/spidev0.0"
#ifdef USING_BREAK_PAD
@ -326,7 +329,7 @@ Java_com_xypower_mpapp_MicroPhotoService_init(
pTerminal->InitServerInfo(MakeString(appPathStr), MakeString(cmdidStr), MakeString(ipStr), port, udpOrTcp, encryptData);
// pTerminal->SetPacketSize(1 * 1024); // 1K
#ifdef USING_NRSEC
#if defined(USING_NRSEC) && !defined(USING_NRSEC_VPN)
pTerminal->InitEncryptionInfo(simcardStr, "/dev/spidev0.0", "");
#endif
bool res = pTerminal->Startup(device);
@ -394,9 +397,9 @@ Java_com_xypower_mpapp_MicroPhotoService_takePhoto(
if (photoInfo.usbCamera)
{
device->TurnOnOtg(NULL);
CPhoneDevice::TurnOnOtg(NULL);
}
device->TurnOnCameraPower(NULL);
CPhoneDevice::TurnOnCameraPower(NULL);
std::vector<IDevice::OSD_INFO> osds;
osds.resize(4);
@ -415,11 +418,11 @@ Java_com_xypower_mpapp_MicroPhotoService_takePhoto(
env->ReleaseStringUTFChars(path, pathStr);
device->TurnOffCameraPower(NULL);
if (photoInfo.usbCamera)
{
device->TurnOffOtg(NULL);
}
// device->TurnOffCameraPower(NULL);
// if (photoInfo.usbCamera)
//{
// device->TurnOffOtg(NULL);
//}
return reinterpret_cast<jlong>(device);
}
@ -497,6 +500,10 @@ Java_com_xypower_mpapp_MicroPhotoService_uninit(
}
pTerminal->SignalExit();
pTerminal->Shutdown();
if (dev != NULL)
{
delete dev;
}
delete pTerminal;
@ -636,6 +643,90 @@ Java_com_xypower_mpapp_MicroPhotoService_getPhotoTimeData(
return data;
}
extern "C" JNIEXPORT jintArray JNICALL
Java_com_xypower_mpapp_MicroPhotoService_recoganizePicture(
JNIEnv* env,
jclass cls, jstring paramPath, jstring binPath, jstring blobName8, jstring blobName16, jstring blobName32, jstring picPath) {
const char* pParamPathStr = env->GetStringUTFChars(paramPath, 0);
std::string paramPathStr = MakeString(pParamPathStr);
env->ReleaseStringUTFChars(paramPath, pParamPathStr);
const char* pBinPathStr = env->GetStringUTFChars(binPath, 0);
std::string binPathStr = MakeString(pBinPathStr);
env->ReleaseStringUTFChars(binPath, pBinPathStr);
const char* pBlobName8Str = env->GetStringUTFChars(blobName8, 0);
std::string blobName8Str = MakeString(pBlobName8Str);
env->ReleaseStringUTFChars(blobName8, pBlobName8Str);
const char* pBlobName16Str = env->GetStringUTFChars(blobName16, 0);
std::string blobName16Str = MakeString(pBlobName16Str);
env->ReleaseStringUTFChars(blobName16, pBlobName16Str);
const char* pBlobName32Str = env->GetStringUTFChars(blobName32, 0);
std::string blobName32Str = MakeString(pBlobName32Str);
env->ReleaseStringUTFChars(blobName32, pBlobName32Str);
const char* pPicPathStr = env->GetStringUTFChars(picPath, 0);
std::string picPathStr = MakeString(pPicPathStr);
env->ReleaseStringUTFChars(picPath, pPicPathStr);
cv::Mat mat = cv::imread(picPathStr);
if (mat.empty())
{
return NULL;
}
std::vector<int> dataArray;
ncnn_init();
ncnn::Net net;
bool res = YoloV5Ncnn_Init(net, paramPathStr, binPathStr);
if (res)
{
std::vector<IDevice::RECOG_OBJECT> objs;
res = YoloV5NcnnDetect(net, mat, true, blobName8Str, blobName16Str, blobName32Str, objs);
if (res && !objs.empty())
{
for (std::vector<IDevice::RECOG_OBJECT>::const_iterator it = objs.cbegin(); it != objs.cend(); ++it)
{
// float x;
// float y;
// float w;
// float h;
// int label;
// float prob;
dataArray.push_back(it->x);
dataArray.push_back(it->y);
dataArray.push_back(it->w);
dataArray.push_back(it->h);
dataArray.push_back(it->label);
dataArray.push_back((int)(it->prob * 100.0f));
}
}
}
// ncnn_uninit();
if (dataArray.empty())
{
return NULL;
}
jintArray data = env->NewIntArray(dataArray.size());
if (data == NULL) {
return NULL;
}
env->SetIntArrayRegion(data, 0, dataArray.size(), &dataArray[0]);
return data;
}
/*
extern "C" JNIEXPORT jlongArray JNICALL
Java_com_xypower_mpapp_MicroPhotoService_getNextScheduleItem(
@ -704,10 +795,106 @@ Java_com_xypower_mpapp_MicroPhotoService_getNextScheduleItem(
*/
extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_captureFinished(
JNIEnv* env,
jobject pThis, jlong handler, jboolean photoOrVideo, jboolean result, jobject bitmap, jlong photoId) {
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
{
return;
}
IDevice* dev = pTerminal->GetDevice();
if (dev != NULL)
{
if (result == JNI_FALSE || bitmap == NULL)
{
cv::Mat mat;
((CPhoneDevice *)dev)->OnCaptureReady(photoOrVideo != JNI_FALSE, result != JNI_FALSE, mat, (unsigned long)photoId);
return;
}
AndroidBitmapInfo info = { 0 };
int res = AndroidBitmap_getInfo(env, bitmap, &info);
if (res < 0 || info.format != ANDROID_BITMAP_FORMAT_RGBA_8888)
{
}
bool hardwareBitmap = (info.flags & ANDROID_BITMAP_FLAGS_IS_HARDWARE) != 0;
void* pixels = NULL;
AHardwareBuffer* hardwareBuffer = NULL;
if (hardwareBitmap)
{
#if 0
res = AndroidBitmap_getHardwareBuffer(env, bitmap, &hardwareBuffer);
int32_t fence = -1;
res = AHardwareBuffer_lock(hardwareBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, fence, NULL, &pixels);
cv::Mat mat(info.height, info.width, CV_8UC4, pixels);
AHardwareBuffer_unlock(hardwareBuffer, &fence);
cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
((CPhoneDevice *)dev)->OnCaptureReady(photoOrVideo != JNI_FALSE, result != JNI_FALSE, mat, (unsigned long)photoId);
#endif // 0
} else
{
res = AndroidBitmap_lockPixels(env, bitmap, &pixels);
cv::Mat tmp(info.height, info.width, CV_8UC4, pixels);
cv::Mat raw(info.height, info.width, CV_8UC4);
// tmp.copyTo(mat);
cv::cvtColor(tmp, raw, cv::COLOR_RGBA2BGR);
cv::Mat mat;
cv::fastNlMeansDenoisingColored(raw, mat, 13, 13, 7, 21);
AndroidBitmap_unlockPixels(env, bitmap);
((CPhoneDevice *)dev)->OnCaptureReady(photoOrVideo != JNI_FALSE, result != JNI_FALSE, mat, (unsigned long)photoId);
}
}
}
extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_burstCaptureFinished(
JNIEnv* env,
jobject pThis, jlong handler, jboolean result, jint numberOfCaptures,
jstring pathsJoinedByTab, jboolean frontCamera, jint rotation, jlong photoId) {
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
{
return;
}
/// HDRPlus
#ifdef USING_HDRPLUS
#endif
IDevice* dev = pTerminal->GetDevice();
if (dev != NULL)
{
if (result == JNI_FALSE)
{
cv::Mat mat;
((CPhoneDevice *)dev)->OnCaptureReady(true, false, mat, (unsigned long)photoId);
return;
}
const char* pathsStr = env->GetStringUTFChars(pathsJoinedByTab, 0);
((CPhoneDevice *)dev)->ProcessRawCapture(result != JNI_FALSE, numberOfCaptures, MakeString(pathsStr), frontCamera != JNI_FALSE, rotation, photoId);
env->ReleaseStringUTFChars(pathsJoinedByTab, pathsStr);
}
}
extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_recordingFinished(
JNIEnv* env,
jobject pThis, jlong handler, jboolean result, jstring path, jlong videoId) {
jobject pThis, jlong handler, jboolean photoOrVideo, jboolean result, jstring path, jlong videoId) {
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
@ -726,7 +913,7 @@ Java_com_xypower_mpapp_MicroPhotoService_recordingFinished(
// camera->Open(pathStr, fileNameStr);
unsigned long photoId = videoId;
((CPhoneDevice *)dev)->OnVideoReady(result != JNI_FALSE, pathStr, photoId);
((CPhoneDevice *)dev)->OnVideoReady(photoOrVideo != JNI_FALSE, result != JNI_FALSE, pathStr, photoId);
if (path != NULL)
{
env->ReleaseStringUTFChars(path, pathStr);
@ -735,7 +922,7 @@ Java_com_xypower_mpapp_MicroPhotoService_recordingFinished(
}
extern "C" JNIEXPORT void JNICALL
extern "C" JNIEXPORT jboolean JNICALL
Java_com_xypower_mpapp_MicroPhotoService_reloadConfigs(
JNIEnv* env,
jobject pThis, jlong handler) {
@ -743,13 +930,36 @@ Java_com_xypower_mpapp_MicroPhotoService_reloadConfigs(
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
{
return;
return JNI_FALSE;
}
pTerminal->LoadAppConfigs();
bool res = pTerminal->LoadAppConfigs();
return res ? JNI_TRUE : JNI_FALSE;
}
extern "C" JNIEXPORT jboolean JNICALL
Java_com_xypower_mpapp_MicroPhotoService_sendExternalPhoto(
JNIEnv* env, jclass cls, jlong handler, jstring path) {
CTerminal* pTerminal = reinterpret_cast<CTerminal *>(handler);
if (pTerminal == NULL)
{
return JNI_FALSE;
}
if (env->GetStringUTFLength(path) <=0)
{
return JNI_FALSE;
}
const char *pathStr = env->GetStringUTFChars(path, 0);
bool res = pTerminal->SendExternalPhoto(pathStr);
env->ReleaseStringUTFChars(path, pathStr);
return res ? JNI_TRUE : JNI_FALSE;
}
extern "C" JNIEXPORT void JNICALL
Java_com_xypower_mpapp_MicroPhotoService_infoLog(
JNIEnv* env, jclass cls, jstring msg) {
@ -814,7 +1024,8 @@ Java_com_xypower_mpapp_MicroPhotoService_importPublicKeyFile(
const char *md5Str = env->GetStringUTFChars(md5, 0);
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
@ -827,7 +1038,7 @@ Java_com_xypower_mpapp_MicroPhotoService_importPublicKeyFile(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
env->ReleaseStringUTFChars(md5, md5Str);
@ -849,7 +1060,8 @@ Java_com_xypower_mpapp_MicroPhotoService_importPublicKey(
return JNI_FALSE;
}
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
@ -864,7 +1076,44 @@ Java_com_xypower_mpapp_MicroPhotoService_importPublicKey(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
return res ? JNI_TRUE : JNI_FALSE;
#else
return JNI_FALSE;
#endif
}
extern "C" JNIEXPORT jboolean JNICALL
Java_com_xypower_mpapp_MicroPhotoService_importPrivateKey(
JNIEnv* env, jclass cls, jint index, jbyteArray cert) {
#ifdef USING_NRSEC
int byteCertLen = env->GetArrayLength(cert);
if (byteCertLen <= 0)
{
return JNI_FALSE;
}
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
const char *path = NRSEC_PATH;
bool res = nrsec.Open(path);
if (res)
{
jbyte* byteCert = env->GetByteArrayElements(cert, 0);
res = nrsec.SM2ImportPrivateKey(index, (const uint8_t*)byteCert) == 0;
nrsec.Close();
env->ReleaseByteArrayElements(cert, byteCert, JNI_ABORT);
}
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOffCameraPower(NULL);
return res ? JNI_TRUE : JNI_FALSE;
#else
@ -886,7 +1135,9 @@ Java_com_xypower_mpapp_MicroPhotoService_genKeys(
jclass cls, jint index) {
#ifdef USING_NRSEC
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
const char *path = NRSEC_PATH;
@ -900,7 +1151,7 @@ Java_com_xypower_mpapp_MicroPhotoService_genKeys(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
return res ? JNI_TRUE : JNI_FALSE;
#else
@ -915,7 +1166,8 @@ Java_com_xypower_mpapp_MicroPhotoService_querySecVersion(
std::string version;
#ifdef USING_NRSEC
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
const char *path = NRSEC_PATH;
@ -929,7 +1181,7 @@ Java_com_xypower_mpapp_MicroPhotoService_querySecVersion(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
#endif
return env->NewStringUTF(version.c_str());
}
@ -945,7 +1197,8 @@ Java_com_xypower_mpapp_MicroPhotoService_genCertRequest(
}
const char *path = NRSEC_PATH;
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
uint8_t output[1024] = { 0 };
@ -962,6 +1215,9 @@ Java_com_xypower_mpapp_MicroPhotoService_genCertRequest(
env->ReleaseStringUTFChars(subject, subjectStr);
}
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOffCameraPower(NULL);
if (res)
{
const char* outputPathStr = env->GetStringUTFChars(outputPath, 0);
@ -999,7 +1255,8 @@ Java_com_xypower_mpapp_MicroPhotoService_importPrivateKeyFile(
const char *path = NRSEC_PATH;
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
@ -1011,7 +1268,7 @@ Java_com_xypower_mpapp_MicroPhotoService_importPrivateKeyFile(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
// const char *md5Str = env->GetStringUTFChars(md5, 0);
// env->ReleaseStringUTFChars(md5, md5Str);
@ -1038,7 +1295,8 @@ Java_com_xypower_mpapp_MicroPhotoService_exportPublicKeyFile(
uint8_t len = 0;
std::vector<unsigned char> data(64, 0);
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
@ -1050,7 +1308,7 @@ Java_com_xypower_mpapp_MicroPhotoService_exportPublicKeyFile(
}
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
if (res)
{
@ -1077,7 +1335,8 @@ Java_com_xypower_mpapp_MicroPhotoService_exportPrivateFile(
const char *path = NRSEC_PATH;
GpioControl::setCam3V3Enable(true);
GpioControl::setSpiPower(false);
CPhoneDevice::TurnOnCameraPower(NULL);
GpioControl::setSpiPower(true);
NrsecPort nrsec;
@ -1093,7 +1352,7 @@ Java_com_xypower_mpapp_MicroPhotoService_exportPrivateFile(
nrsec.Close();
GpioControl::setSpiPower(false);
GpioControl::setCam3V3Enable(false);
CPhoneDevice::TurnOffCameraPower(NULL);
if (res)
{

File diff suppressed because it is too large Load Diff

@ -153,18 +153,33 @@ class CPhoneDevice : public IDevice
{
public:
class CPhoneCamera : public NdkCamera {
class CPhoneCamera : public NdkCamera
{
public:
CPhoneCamera(CPhoneDevice* dev, int32_t width, int32_t height, const NdkCamera::CAMERA_PARAMS& params);
virtual ~CPhoneCamera();
virtual bool on_image(cv::Mat& rgb);
virtual void on_error(const std::string& msg);
virtual void onDisconnected(ACameraDevice* device);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, std::vector<std::shared_ptr<AImage> >& frames);
protected:
CPhoneDevice* m_dev;
};
class CJpegCamera : public CPhoneCamera
{
public:
CJpegCamera(CPhoneDevice* dev, int32_t width, int32_t height, const std::string& path, const NdkCamera::CAMERA_PARAMS& params);
virtual void onImageAvailable(AImageReader* reader);
virtual int32_t getOutputFormat() const;
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, std::vector<std::shared_ptr<AImage> >& frames);
protected:
std::string m_path;
};
struct TIMER_CONTEXT
{
CPhoneDevice* device;
@ -198,10 +213,18 @@ public:
virtual unsigned long RequestWakelock(unsigned long timeout);
virtual bool ReleaseWakelock(unsigned long wakelock);
virtual int GetWData(WEATHER_INFO *weatherInfo);
virtual int GetIceData(ICE_INFO *iceInfo, ICE_TAIL *icetail, SENSOR_PARAM *sensorParam);
virtual bool OpenSensors();
virtual bool CloseSensors();
bool GetNextScheduleItem(uint32_t tsBasedZero, uint32_t scheduleTime, vector<uint32_t>& items);
void UpdatePosition(double lon, double lat, double radius, time_t ts);
bool OnVideoReady(bool result, const char* path, unsigned int photoId);
bool OnVideoReady(bool photoOrVideo, bool result, const char* path, unsigned int photoId);
bool OnCaptureReady(bool photoOrVideo, bool result, cv::Mat& mat, unsigned int photoId);
bool ProcessRawCapture(bool result, int numberOfCaptures, const std::string& pathsJoinedByTab, bool frontCamera, int rotation, long photoId);
void UpdateSignalLevel(int signalLevel);
void UpdateTfCardPath(const std::string& tfCardPath)
{
@ -213,11 +236,11 @@ public:
}
void UpdateSimcard(const std::string& simcard);
void TurnOnCameraPower(JNIEnv* env);
void TurnOffCameraPower(JNIEnv* env);
static void TurnOnCameraPower(JNIEnv* env);
static void TurnOffCameraPower(JNIEnv* env);
void TurnOnOtg(JNIEnv* env);
void TurnOffOtg(JNIEnv* env);
static void TurnOnOtg(JNIEnv* env);
static void TurnOffOtg(JNIEnv* env);
protected:
@ -227,7 +250,8 @@ protected:
bool SendBroadcastMessage(std::string action, int value);
// bool MatchCaptureSizeRequest(ACameraManager *cameraManager, const char *selectedCameraId, unsigned int width, unsigned int height, uint32_t cameraOrientation_,
inline bool TakePhotoCb(bool res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime, const std::vector<IDevice::RECOG_OBJECT>& objects) const
bool PostProcessPhoto(const PHOTO_INFO& photoInfo, const vector<IDevice::OSD_INFO>& osds, const std::string& path, const std::string& cameraInfo, cv::Mat& mat);
inline bool TakePhotoCb(int res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime, const std::vector<IDevice::RECOG_OBJECT>& objects) const
{
if (m_listener != NULL)
{
@ -236,13 +260,12 @@ protected:
return false;
}
inline bool TakePhotoCb(bool res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime) const
inline bool TakePhotoCb(int result, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime) const
{
if (m_listener != NULL)
{
std::vector<IDevice::RECOG_OBJECT> objects;
return m_listener->OnPhotoTaken(res, photoInfo, path, photoTime, objects);
return m_listener->OnPhotoTaken(result, photoInfo, path, photoTime, objects);
}
return false;
@ -252,6 +275,7 @@ protected:
std::string QueryCpuTemperature();
bool OnImageReady(cv::Mat& mat);
bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, std::vector<std::shared_ptr<AImage> >& frames);
void onError(const std::string& msg);
void onDisconnected(ACameraDevice* device);
@ -310,8 +334,9 @@ protected:
time_t mHeartbeatStartTime;
unsigned int mHeartbeatDuration;
long mCameraPowerCount;
long mOtgCount;
static std::mutex m_powerLocker;
static long mCameraPowerCount;
static long mOtgCount;
std::thread m_threadClose;
int m_signalLevel;

@ -0,0 +1,57 @@
#ifndef __POSITION_HELPER_H__
#define __POSITION_HELPER_H__
#include <cmath>
#define _USE_MATH_DEFINES
inline double transformLat(double x, double y)
{
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * std::sqrt(std::abs(x));
ret += (20.0 * std::sin(6.0 * x * M_PI) + 20.0 * std::sin(2.0 * x * M_PI)) * 2.0 / 3.0;
ret += (20.0 * std::sin(y * M_PI) + 40.0 * std::sin(y / 3.0 * M_PI)) * 2.0 / 3.0;
ret += (160.0 * std::sin(y / 12.0 * M_PI) + 320 * std::sin(y * M_PI / 30.0)) * 2.0 / 3.0;
return ret;
}
inline double transformLng(double x, double y)
{
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * std::sqrt(std::abs(x));
ret += (20.0 * std::sin(6.0 * x * M_PI) + 20.0 * std::sin(2.0 * x * M_PI)) * 2.0 / 3.0;
ret += (20.0 * std::sin(x * M_PI) + 40.0 * std::sin(x / 3.0 * M_PI)) * 2.0 / 3.0;
ret += (150.0 * std::sin(x / 12.0 * M_PI) + 300.0 * std::sin(x / 30.0 * M_PI)) * 2.0 / 3.0;
return ret;
}
inline void transformPosition(double& lat, double& lng)
{
// 卫星椭球坐标投影到平面地图坐标系的投影因子
#define AXIS 6378245.0
// 椭球的偏心率(a^2 - b^2) / a^2
#define OFFSET 0.00669342162296594323
double dLat = transformLat(lng - 105.0, lat - 35.0);
double dLon = transformLng(lng - 105.0, lat - 35.0);
double radLat = lat / 180.0 * M_PI;
double magic = std::sin(radLat);
magic = 1 - OFFSET * magic * magic;
double sqrtMagic = std::sqrt(magic);
dLat = (dLat * 180.0) / ((AXIS * (1 - OFFSET)) / (magic * sqrtMagic) * M_PI);
dLon = (dLon * 180.0) / (AXIS / sqrtMagic * std::cos(radLat) * M_PI);
lat += dLat;
lng += dLon;
}
inline bool shouldConvertPosition(double lat, double lon)
{
if (lon < 72.004 || lon > 137.8347)
{
return false;
}
if (lat < 0.8293 || lat > 55.8271)
{
return false;
}
return true;
}
#endif // __POSITION_HELPER_H__

File diff suppressed because it is too large Load Diff

@ -1,467 +0,0 @@
//
// Created by hyz on 2024/6/5.
//
#ifndef WEATHERCOMM_H
#define WEATHERCOMM_H
#include <string>
#include "GPIOControl.h"
#include "termios.h"
#ifndef DWORD
typedef unsigned int DWORD;
#endif
#ifndef WORD
typedef unsigned short WORD;
#endif
#ifndef BYTE
typedef unsigned char BYTE;
#endif
#ifndef LONG
typedef long long LONG;
#endif
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define LOBYTE(w) ((unsigned char)(w))
#define HIBYTE(w) ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
#define LOWORD(l) ((WORD)(l))
#define HIWORD(l) ((WORD)((DWORD)(l) >> 16))
#define min(a, b) ((a) < (b) ? (a) : (b))
#define MAX_STRING_LEN 32
#define IOT_PARAM_WRITE 0xAE
#define IOT_PARAM_READ 0xAF
#define MAX_SERIAL_DEV_NUM 25 /* 最大接串口传感器数量*/
#define MAX_SERIAL_PORT_NUM 5
#define MAX_DEV_VALUE_NUM 12 /* 一台装置最大的采样值数量*/
#define WEATHER_PROTOCOL 1 /* 温湿度协议序号*/
#define WIND_PROTOCOL 2 /* 风速风向协议序号*/
#define SLANT_PROTOCOL 3 /* 倾斜角协议序号*/
#define RALLY_PROTOCOL 4 /* 拉力协议序号*/
#define PELCO_P_PROTOCOL 5 /* 摄像机Pelco_P协议序号*/
#define PELCO_D_PROTOCOL 6 /* 摄像机Pelco_D协议序号*/
#define SERIALCAMERA_PROTOCOL 8 /* 串口摄像机协议序号*/
#define RESERVE2_PROTOCOL 17 /* 备用2协议序号*/
#define RESERVE4_PROTOCOL 19 /* 备用4协议序号*/
#define RESERVE5_PROTOCOL 20 /* 备用5协议序号*/
#define INVALID_PROTOCOL 21 /* 无效协议序号*/
#define AirTempNo 0 /* 空气温度数据存储序号*/
#define HumidityNo 1 /* 相对湿度数据存储序号*/
#define WindSpeedNo 2 /* 风速数据存储序号*/
#define WindDirectionNo 3 /* 风向数据存储序号*/
#define RainfallNo 4 /* 雨量数据存储序号*/
#define AtmosNo 5 /* 大气压数据存储序号*/
#define OpticalRadiationNo 6 /* 日照(光辐射)数据存储序号*/
#define SER_IDLE 0 /* 传感器处于空闲状态,未启动采样*/
#define SER_SAMPLE 1 /* 正在采样过程中*/
#define SAMPLINGSUCCESS 2 /* 采样结束,正常读取到数据*/
#define SER_STARTSAMPLE 3 /* 启动采样*/
#define SER_SAMPLEFAIL -1 /* 采样失败,未采集到数据,传感器故障或未接*/
#define PHOTO_SAVE_SUCC 5 /* 图片保存成功*/
#define WEATHER_DATA_NUM 8 /* 气象数据最大数量(一般最多是6要素)*/
#define RALLY_DATA_NUM 2 /* 拉力数据最大数量(一般是1个)*/
#define SLANTANGLE_DATA_NUM 3 /* 倾角数据最大数量(一般只有X轴和Y轴值)*/
#define PTZ_MOVETIME 1 // 云台移动等待时间为1秒
#define MAX_CHANNEL_NUM 2 /* 视频通道最大通道*/
#define MAX_PHOTO_FRAME_LEN 1024 /* 图片数据一包最大长度*/
#define MAX_PHOTO_PACKET_NUM 1024 /* 图片最大包数图片最大定为1MB*/
#define RECVDATA_MAXLENTH 2048 /* 接收数据缓冲区最大值*/
#define TIMER_CNT 50 // Poll命令定时器时间 5 ms
#define SENDDATA_MAXLENTH RECVDATA_MAXLENTH /* 正常发送数据缓冲区最大值*/
// 摄像机控制命令宏定义
#define Cmd_Cancel 0x00000000 // 关闭功能
#define SET_PRESETNO 0x00030000 // 设置预置点
#define MOVE_TO_PRESETNO 0x00070000 // 调用预置点
/* 摄像机PELCO-P控制命令宏定义*/
#define P_Auto_Scan 0x20000000 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define P_IRIS_CLOSE 0x08000000 /* 光圈缩小(1 有效)*/
#define P_IRIS_OPEN 0x04000000 /* 光圈放大(1 有效)*/
#define P_FOCUS_NEAR 0x02000000 /* 近距离聚焦(1 有效)*/
#define P_FOCUS_FAR 0x01000000 /* 远距离聚焦(1 有效)*/
#define P_ZOOM_WIDE 0x00400000 /* 远离物体(1 有效)*/
#define P_ZOOM_TELE 0x00200000 /* 接近物体(1 有效)*/
#define P_MOVE_DOWN 0x0010001f /* 向下移动镜头(1 有效)*/
#define P_MOVE_UP 0x0008001f /* 向上移动镜头(1 有效)*/
#define P_MOVE_LEFT 0x00041f00 /* 向左移动镜头(1 有效)*/
#define P_MOVE_RIGHT 0x00021f00 /* 向右移动镜头(1 有效)*/
// 摄像机PELCO-D控制命令宏定义
#define D_Auto_Scan 0x10000000 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define D_IRIS_CLOSE 0x04000000 /* 光圈缩小(1 有效)*/
#define D_IRIS_OPEN 0x02000000 /* 光圈放大(1 有效)*/
#define D_FOCUS_NEAR 0x01000000 /* 近距离聚焦(1 有效)*/
#define D_FOCUS_FAR 0x00800000 /* 远距离聚焦(1 有效)*/
#define D_ZOOM_WIDE 0x00400000 /* 远离物体(1 有效)*/
#define D_ZOOM_TELE 0x00200000 /* 接近物体(1 有效)*/
#define D_MOVE_DOWN 0x0010002d /* 向下移动镜头(1 有效)*/
#define D_MOVE_UP 0x0008002d /* 向上移动镜头(1 有效)*/
#define D_MOVE_LEFT 0x00042d00 /* 向左移动镜头(1 有效)*/
#define D_MOVE_RIGHT 0x00022d00 /* 向右移动镜头(1 有效)*/
/* 摄像机下发命令宏定义*/
#define Take_Photo 0 /* 拍照*/
#define Stop_Baud 10000 /* 设置球机波特率*/
#define Stop_Cmd 10005 /* 取消或停止指令*/
#define Auto_Scan 10006 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define IRIS_CLOSE 10007 /* 光圈缩小(1 有效)*/
#define IRIS_OPEN 10008 /* 光圈放大(1 有效)*/
#define FOCUS_NEAR 10009 /* 近距离聚焦(1 有效)*/
#define FOCUS_FAR 10010 /* 远距离聚焦(1 有效)*/
#define ZOOM_WIDE 10011 /* 远离物体(1 有效)*/
#define ZOOM_TELE 10012 /* 接近物体(1 有效)*/
#define MOVE_DOWN 10013 /* 向下移动镜头(1 有效)*/
#define MOVE_UP 10014 /* 向上移动镜头(1 有效)*/
#define MOVE_LEFT 10015 /* 向左移动镜头(1 有效)*/
#define MOVE_RIGHT 10016 /* 向右移动镜头(1 有效)*/
#define MOVE_PRESETNO 10017 // 调用预置点
#define SAVE_PRESETNO 10018 // 设置预置点
#define SPEED_DOME_CAMERA 0 /* 球机摄像机*/
#define SERIAL_CAMERA 2 /* 串口摄像机a*/
#define COLLECT_DATA 0 /* 调试使用*/
#define LOGE(fmt, args...) __android_log_print(ANDROID_LOG_ERROR, "serial_port_comm", fmt, ##args) /* 红色*/
#define LOGI(fmt, args...) __android_log_print(ANDROID_LOG_INFO, "Sensors_Protocol", fmt, ##args) /* 草绿色*/
#define LOGV(fmt, args...) __android_log_print(ANDROID_LOG_VERBOSE, "serial_port_comm", fmt, ##args)/* 白色*/
#define LOGW(fmt, args...) __android_log_print(ANDROID_LOG_WARN, "Sensors_Protocol", fmt, ##args) /* 黄色*/
#define LOGD(fmt, args...) __android_log_print(ANDROID_LOG_DEBUG, "Sensors_Protocol", fmt, ##args) /* 蓝色*/
#define HexCharToInt( c ) (((c) >= '0') && ((c) <= '9') ? (c) - '0' : ((c) >= 'a') && ((c) <= 'f') ? (c) - 'a' + 10 :((c) >= 'A') && ((c) <= 'F') ? (c) - 'A' + 10 : 0 )
typedef struct
{
int cmd;
int value;
int result;
LONG value2;
char str[MAX_STRING_LEN];
}IOT_PARAM;
//SDS包类型结构
typedef struct
{
BYTE PortIdx; // 信息类型
WORD MsgType; // 信息类型
int MsgLen; // 信息长度
u_char MsgData[RECVDATA_MAXLENTH];
} RTUMSG;
typedef struct
{
float fFactor; // 数据系数
float EuValueDelta; // 数据工程值偏移
} AI_PARAM;
typedef struct
{
AI_PARAM AiParam; // 数据点配置参数
int AiState; // 数据标识(-1采样失败0:没有采样1正在采样2采样结束3启动采样
float EuValue; // 数据工程值
} AI_DEF;
typedef struct
{
BYTE AiState; // 数据标识(-1采样失败0:没有采样1正在采样2采样结束3启动采样
float EuValue; // 数据工程值
} Data_DEF;
typedef struct
{
int imagelen; // 整个图片大小
int phototime; // 拍照时间
u_char presetno; // 拍照预置点
char photoname[512]; // 图片存储名称和路径
int state;// 标识(-1拍照失败0:没有拍照1正在取图2拍照成功3启动拍照
} IMAGE_DEF;
typedef struct
{
int imagelen; // 整个图片大小
int imagenum; // 整个图片的总包数
int phototime; // 拍照时间
u_char presetno; // 拍照预置点
char photoname[512]; // 图片存储名称和路径
u_char buf[MAX_PHOTO_PACKET_NUM][MAX_PHOTO_FRAME_LEN]; // 图片数据缓存
int ilen[MAX_PHOTO_PACKET_NUM]; // 相对应的每包图片数据的长度
int state;// 标识(-1拍照失败0:没有拍照1正在取图2拍照成功3启动拍照
} PHOTO_DEF;
// 上层调用采集传感器参数
typedef struct
{
unsigned int baudrate; /* 波特率*/
int databit; /* 数据位*/
float stopbit; /* 停止位*/
char parity; /* 校验位*/
char pathname[64]; /* 串口文件名及路径*/
//int commNo; /* 约定的串口序号例如我们PC机上显示的COM1。。。*/
u_char SensorsType; /* 传感器类型索引,大于 0*/
int devaddr; /* 装置(传感器)使用的地址*/
u_char IsNoInsta; /* 装置没有安装或者已经坏了(1:正常, 0:无效,坏了或没有安装)*/
u_char CameraChannel; /* 像机的通道号*/
u_char Phase; /* 传感器所安装相别指拉力和倾角11表示A1....*/
} SENSOR_PARAM;
// 需要配置的串口装置参数
typedef struct
{
unsigned int baudrate; /* 波特率*/
int databit; /* 数据位*/
int stopbit; /* 停止位*/
char parity; /* 校验位*/
char pathname[64]; /* 串口文件名及路径*/
int commid; /* 串口序号 注意从0开始*/
u_char ProtocolIdx; /* 规约索引,大于 0*/
int devaddr; /* 装置使用的地址*/
u_char IsNoInsta; /* 装置没有安装或者已经坏了(1:正常, 0:无效,坏了或没有安装)*/
u_char CameraChannel; /* 像机的通道号*/
u_char Phase; /* 传感器所安装相别指拉力和倾角11表示A1....*/
} SERIAL_PARAM;
typedef struct
{
int m_iRevStatus; /* */
int m_iRecvLen; /* */
int m_iNeedRevLength; /* */
int iRecvTime; /* */
u_char m_au8RecvBuf[RECVDATA_MAXLENTH];/* */
int fd; /* 串口打开的文件句柄*/
u_char PollCmd[SENDDATA_MAXLENTH];
int cmdlen; // 发送缓冲区命令长度
//******************** Poll Cmd ****************************
u_char Retry; /* 重试命令次数 */
u_char RetryCnt; /* 重试命令计数*/
LONG RetryTime; /* 重试命令时间 */
LONG RetryTimeCnt; /* 重试命令时间计数*/
LONG WaitTime; /* 命令间隔时间 */
LONG WaitTimeCnt; /* 命令间隔时间计数*/
u_char ForceWaitFlag; /* 强制等待标志*/
u_short ForceWaitCnt; /* 强制等待计数*/
u_char ReSendCmdFlag; /* 重发命令标志 */
u_char SendCmdFlag; /* 命令发送标志 */
u_char RevCmdFlag; /* 命令正常接收标志*/
//**********************************************************
LONG lsendtime; /* 命令发送绝对时间计时(毫秒)*/
} SIO_PARAM_SERIAL_DEF;
//串口相关装置所有参数集中定义
typedef struct
{
//******************** 端口基本信息 ************************
u_char IsNeedSerial; /* 是否需要使用串口通讯*/
int CmdWaitTime; /* 没有使用*/
u_char UseSerialidx; /* 使用的串口序号*/
int SerialCmdidx; /* 正在使用的串口发送命令的命令序号(-1:表示没有命令发送)
使*/
int enrecvtime; /* 发送加密命令后接收到应答计时*/
LONG FirstCmdTimeCnt; /* 串口读取数据起始时间*/
u_char nextcmd; /* 第二次发送读取气象雨量命令 */
u_char SameTypeDevIdx; /* 相同类型装置顺序排列序号(从0开始)*/
u_char uOpenPowerFlag; /* 传感器上电标志(0:不需要打开; 1:需要打开)*/
int recvdatacnt; /* 接收到有效数据*/
PHOTO_DEF image; /* 临时存储图片数据*/
AI_DEF aiValue[MAX_DEV_VALUE_NUM]; /* 传感器采样值*/
} SERIAL_DEV_DEF;
//串口相关装置所有参数集中定义
typedef struct
{
u_char clcyesampling; /* 正在进行采样(0:没有进行采样;1:正在进行采样;)*/
u_char camerauseserial; /* 摄像机使用那个串口*/
DWORD PtzCmdType; /* 云台指令类型*/
int usecameradevidx; /* 有像机指令需要执行*/
/* 执行指令的装置序号(-1:表示没有需要执行的指令;)*/
int SendStopPtzCmdTimeCnt; /* 发送云台停止指令*/
u_char serialstatus[MAX_SERIAL_PORT_NUM]; /* 串口是否可以使用状态分别对应串口1、2、3*/
SERIAL_DEV_DEF ms_dev[MAX_SERIAL_DEV_NUM]; /* 装置所接传感器数量*/
int UseingSerialdev[MAX_SERIAL_PORT_NUM]; /* 正在使用串口通讯的装置序号(-1,表示串口空闲)*/
int curdevidx[MAX_SERIAL_PORT_NUM]; /* 当前正在通讯的装置序号(-1表示没有装置需要通讯)*/
u_char IsReadWireTem; /* 是否在开始读取测温数据(0:表示没有;1:是)*/
//int proruntime; /* 程序运行时间*/
int IsSleep; /* 是否使程序休眠(1:不休眠;2:休眠)*/
int tempsamplingstartime; /* 测温启动距离采样启动时间间隔*/
int tempsamplingsucctime; /* 测温启动距离采样成功时间间隔*/
int samplingtimeSec; /* 高速采样数据秒级时间控制*/
int SectimesamplingCnt[3]; /* 高速采样数据秒级采样数*/
int SunshineSensorsFault; /* 控制日照传感器故障发送*/
int TempSensorsFault; /* 控制测温传感器故障发送*/
int FirstSensorsFault; /* 第一次检测传感器故障发送*/
int SensorsIsUse; /* 传感器是否启用与自检位置匹配*/
int sequsampling; /* 顺序采样控制序号-1:无采样;其他对应相应装置序号*/
int imagepacketnum; /* 串口摄像机拍照图片总包数*/
int historyimagenum[MAX_CHANNEL_NUM]; /* 球机保存的历史图片数量*/
#if 1
//int sendflag; /* 临时上送泄露电流值标志*/
int sendphototime; /* 临时上送图片数据统计*/
int sendphotocmdcnt; /* 一次拍照过程中发送拍照指令计数*/
int photographtime; /* 图片拍摄的时间*/
int iLastGetPhotoNo; /* 设置串口摄像机参数时暂存拍照命令序号*/
u_char bImageSize; /* 用于临时存储接收上层命令的图片大小*/
u_char presetno; /* 用于临时存储接收上层命令的预置点*/
char filedir[512]; /* 用于摄像机拍照之后暂时存放的路径*/
#endif
u_char errorPhotoNoCnt; /* 串口摄像机拍照时回应错误包号计数(如:召第6包回应第3包)*/
u_char RephotographCnt; /* 串口摄像机重拍计数(只在读照片数据应答出错时才重拍)*/
} SRDT_DEF;
static void PortDataProcess( void );
static LONG get_msec();
int serial_port_comm();
static int weather_comm(SERIAL_PARAM weatherport);
static void setRS485Enable(bool z);
static void set485WriteMode();
static void set485ReadMode();
static void set12VEnable(bool z);
static void setCam3V3Enable(bool enabled);
// 串口相关的所有函数定义
/* 打开串口电源*/
void Gm_OpenSerialPower();
// 打开传感器电源
void Gm_OpenSensorsPower();
// 关闭传感器电源
void Gm_CloseSensorsPower(int port);
// 打开串口通讯
void Gm_OpenSerialPort(int devidx);
// 关闭串口通讯
void Gm_CloseSerialPort();
void DebugLog(int commid, char *szbuf, char flag);
int SaveLogTofile(int commid, char *szbuf);
// 功能说明:串口发送数据 返回实际发送的字节数
int GM_SerialComSend(const unsigned char * cSendBuf, LONG nSendLen, int commid);
void Gm_InitSerialComm(SENSOR_PARAM *sensorParam, char *filedir);
// 启动串口通讯
void GM_StartSerialComm();
// 启动使用串口拍照
void GM_StartSerialCameraPhoto(BYTE channel, int cmdidx);
void delete_old_files(const char *path, int days);
// 串口轮询通讯定时器
int GM_SerialTimer();
//轮询所有串口和传感器是否需要生成下发命令
void Gm_FindAllSensorsCommand();
//检查所有传感器是否采集完毕,采集完毕的关闭传感器电源
void GM_IsCloseSensors();
//检查所有串口是否有数据接收,有则启动接收
void GM_AllSerialComRecv();
//判断是否需要关闭定时器
int GM_CloseTimer();
void testComm();
void Gm_InitSerialComm_Test();
// 串口接收数据处理
void SerialDataProcess(int devidx, u_char *buf, int len);
void CameraRecvData(int commid, u_char *buf, int len);
// 串口摄像机数据处理
void CameraPhotoPortDataProcess( int port);
// 发送命令
void SendCmdFormPollCmdBuf( int port );
// 清除发送命令的所有标识
void ClearCmdAllFlag(int commid);
// 下发串口拍照指令控制
int FindNextCameraPhotoCommand(int devidx);
// 生成 CameraPhoto命令
void MakeCameraPhotoCommand( int portno, BYTE cmdidx, int OneParam, WORD TwoParam, BYTE Threep, int phototime);
// 清除命令缓冲区
void ClearCmdFormPollCmdBuf(int port);
// 准备发送云台指令
int Gm_CtrlPtzCmd(u_char channel, DWORD ptzcmd);
// 发送转动摄像机云台命令定时器
int Gm_Camera_Timer();
// 生成 PELCO_P 命令 *
void Gm_SendPelco_pCommand( DWORD cmdtype);
// 计算Pelco_p校验
BYTE Gm_Pelco_pXORCheck( BYTE *msg, int len );
// 生成 PELCO_D 命令 *
void Gm_SendPelco_DCommand( DWORD cmdtype);
// 计算Pelco_D校验
BYTE Gm_Pelco_DCheck( BYTE *msg, int len );
// 查询传感器电源状态
char Gm_GetSensorsPowerState(int port);
// 通过传感器使用的航空头查找传感器使用的串口序号
void FindDevUseSerialCommNo();
// 寻找并生成下一条倾角命令
int FindNextShxyProtocolCommand( int devidx );
// 倾角命令校验码计算
unsigned char CalLpc(unsigned char *msg, int len);
// 读上海欣影传感器协议数据
void ShxyProtocolRecvData(int commid, u_char *buf, int len);
// 检查检验和是否正确
int CheckShxyProtocolLpcError( u_char* msg, int len );
// 把16进制和10进制ASCII字符串转换成int整数
int ATOI(char *buf);
//生成倾角命令
void MakeShxyProtocolPollCommand(int portno, BYTE cmdidx);
// 上海欣影传感器协议数据处理
void ShxyProtocolDataProcess( int commid);
// 控制关闭传感器电源
//void Gm_CtrlCloseSensorsPower(int devidx);
// 检查传感器电源是否应该关闭或打开
//void Gm_CheckSensorsPower(void);
int SaveImageDataTofile(int devno);
void Collect_sensor_data();
void CameraPhotoCmd(int phototime, u_char channel, int cmdidx, u_char bImageSize, u_char presetno);
/* 数据和图片采集数据返回函数 开始*/
int GetWeatherData(Data_DEF *data, int datano);
int GetAirTempData(Data_DEF *airt);
int GetHumidityData(Data_DEF *airt);
int GetWindSpeedData(Data_DEF *airt);
int GetWindDirectionData(Data_DEF *airt);
int GetRainfallData(Data_DEF *airt);
int GetAtmosData(Data_DEF *airt);
int GetOpticalRadiationData(Data_DEF *airt);
int GetPullValue(int devno, Data_DEF *data);
int GetAngleValue(int devno, Data_DEF *data, int Xy);
int GetImage(int devno, IMAGE_DEF *photo);
/* 数据和图片采集数据返回函数 结束*/
// 生成一个随机整数
int GeneratingRandomNumber();
#endif //WEATHERCOMM_H

@ -15,7 +15,7 @@
#include <termios.h>
#include <time.h>
#include "GPIOControl.h"
#include "serialComm.h"
#include "SerialComm.h"
static void set_baudrate (struct termios *opt, unsigned int baudrate)

@ -17,6 +17,11 @@
#ifndef __CAMERA2_HELPER_H__
#define __CAMERA2_HELPER_H__
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include "mat.h"
template <typename T>
class RangeValue {
@ -103,4 +108,107 @@ private:
};
inline void ConvertYUV21ToMat(const uint8_t* nv21, int nv21_width, int nv21_height, int orgWidth, int orgHeight,
int sensorOrientation, bool front, int rotation, cv::Mat& rgb)
{
int w = 0;
int h = 0;
int rotate_type = 0;
cv::Mat nv21_rotated;
const unsigned char* yuv420data = nv21;
if (rotation != 0)
{
int co = 0;
if (front)
{
co = (sensorOrientation + (rotation - 1) * 90) % 360;
co = (360 - co) % 360;
}
else
{
co = (sensorOrientation - (rotation - 1) * 90 + 360) % 360;
}
// XYLOG(XYLOG_SEVERITY_DEBUG, "Orientation=%d Facing=%d", co, camera_facing);
// int co = 0;
if (co == 0)
{
w = nv21_width;
h = nv21_height;
rotate_type = front ? 2 : 1;
}
else if (co == 90)
{
w = nv21_height;
h = nv21_width;
int tmp = orgWidth;
orgWidth = orgHeight;
orgHeight = tmp;
rotate_type = front ? 5 : 6;
}
else if (co == 180)
{
w = nv21_width;
h = nv21_height;
rotate_type = front ? 4 : 3;
}
else if (co == 270)
{
w = nv21_height;
h = nv21_width;
int tmp = orgWidth;
orgWidth = orgHeight;
orgHeight = tmp;
rotate_type = front ? 7 : 8;
}
nv21_rotated.create(h + h / 2, w, CV_8UC1);
ncnn::kanna_rotate_yuv420sp(nv21, nv21_width, nv21_height, nv21_rotated.data, w, h, rotate_type);
yuv420data = nv21_rotated.data;
}
else
{
w = nv21_width;
h = nv21_height;
}
// nv21_rotated to rgb
if (w == orgWidth && h == orgHeight)
{
rgb.create(h, w, CV_8UC3);
// ncnn::yuv420sp2rgb(nv21_rotated.data, w, h, rgb.data);
ncnn::yuv420sp2rgb_nv12(yuv420data, w, h, rgb.data);
}
else
{
cv::Mat org(h, w, CV_8UC3);
ncnn::yuv420sp2rgb_nv12(yuv420data, w, h, org.data);
if (w * orgHeight == h * orgWidth) // Same Ratio
{
cv::resize(org, rgb, cv::Size(orgWidth, orgHeight));
}
else
{
// Crop image
if (w > orgWidth && h >= orgHeight)
{
int left = (w - orgWidth) / 2;
int top = (h - orgHeight) / 2;
rgb = org(cv::Range(top, top + orgHeight), cv::Range(left, left + orgWidth));
}
else
{
rgb = org;
}
}
}
}
#endif /* __CAMERA2_HELPER_H__ */

@ -9,6 +9,7 @@
using namespace std;
using namespace cv;
// https://zhuanlan.zhihu.com/p/38176640
void Debevec(vector<Mat>exposureImages, vector<float>exposureTimes, Mat& output);
void Robertson(vector<Mat>exposureImages, vector<float>exposureTimes, Mat& output);

@ -1,3 +1,4 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*

File diff suppressed because it is too large Load Diff

@ -38,6 +38,8 @@ static const uint64_t kMaxExposureTime = static_cast<uint64_t>(250000000);
#define WAIT_AWB_LOCKED 2
#define WAIT_AF_LOCKED 4
#define PREVIEW_REQUEST_IDX 0
class CameraManager
{
public:
@ -79,6 +81,7 @@ public:
unsigned int orientation:3;
unsigned int zoom : 1;
unsigned int wait3ALocked : 3;
unsigned int burstRawCapture : 1;
unsigned int reserved : 18;
int64_t exposureTime;
unsigned int sensitivity;
@ -86,6 +89,7 @@ public:
float zoomRatio;
uint8_t requestTemplate;
uint8_t awbMode;
uint8_t burstCaptures;
unsigned short focusTimeout; // milli-seconds 65535
};
@ -102,8 +106,8 @@ public:
int32_t compensation;
uint8_t sceneMode;
uint8_t awbMode;
uint16_t avgY;
float zoomRatio;
uint8_t avgY;
uint64_t duration;
int64_t frameDuration;
@ -112,6 +116,28 @@ public:
uint8_t afLockSetted : 1;
};
struct CaptureRequest
{
/* For image capture */
NdkCamera* pThis;
AImageReader* imageReader;
ANativeWindow* imageWindow;
ACameraOutputTarget* imageTarget;
ACaptureSessionOutput* sessionOutput;
ACaptureRequest* request;
ACameraDevice_request_template templateId;
int sessionSequenceId;
};
struct CaptureResult
{
ACameraMetadata* result;
AImage* image;
int sequenceId;
};
NdkCamera(int32_t width, int32_t height, const CAMERA_PARAMS& params);
virtual ~NdkCamera();
@ -120,35 +146,56 @@ public:
void close();
int selfTest(const std::string& cameraId, int32_t& maxResolutionX, int32_t& maxResolutionY);
static void writeJpegFile(AImage *image, const char* path);
static void writeRawFile(AImage *image, ACameraMetadata* characteristics, ACameraMetadata* result, const char* path);
void onAvailabilityCallback(const char* cameraId);
void onUnavailabilityCallback(const char* cameraId);
void onImageAvailable(AImageReader* reader);
virtual void onImageAvailable(AImageReader* reader);
virtual int32_t getOutputFormat() const;
virtual int32_t getBurstCaptures() const;
void CreateSession(ANativeWindow* previewWindow, ANativeWindow* jpgWindow, bool manaulPreview, int32_t imageRotation, int32_t width, int32_t height);
void CreateSession(ANativeWindow* previewWindow);
CaptureRequest* CreateRequest(bool isPreviewRequest);
void DestroySession();
virtual bool on_image(cv::Mat& rgb);
virtual void on_error(const std::string& msg);
virtual void on_image(const unsigned char* nv21, int nv21_width, int nv21_height);
virtual void onDisconnected(ACameraDevice* device);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, std::vector<std::shared_ptr<AImage> >& frames);
void onCaptureProgressed(ACameraCaptureSession* session, ACaptureRequest* request, const ACameraMetadata* result);
void onCaptureCompleted(ACameraCaptureSession* session, ACaptureRequest* request, const ACameraMetadata* result);
void onCaptureFailed(ACameraCaptureSession* session, ACaptureRequest* request, ACameraCaptureFailure* failure);
void onSessionReady(ACameraCaptureSession *session);
void onError(ACameraDevice* device, int error);
void CopyPreviewRequest(ACaptureRequest* request, const ACameraMetadata* previewResult);
const CAPTURE_RESULT& getCaptureResult() const
uint32_t GetLdr() const
{
return mFinalResult;
return mLdr;
}
bool IsCameraAvailable(const std::string& cameraId);
static bool convertAImageToNv21(AImage* image, uint8_t** nv21, int32_t& width, int32_t& height);
static void EnumCameraResult(ACameraMetadata* result, CAPTURE_RESULT& captureResult);
protected:
std::mutex m_locker;
std::set<std::string> m_availableCameras;
protected:
CAMERA_PARAMS m_params;
DisplayDimension foundRes;
int camera_facing;
int camera_orientation;
bool m_firstFrame;
bool m_photoTaken;
int32_t mWidth;
int32_t mHeight;
std::string mCameraId;
@ -173,10 +220,9 @@ protected:
int32_t activeArraySize[2];
int32_t maxRegions[3];
unsigned int m_imagesCaptured;
bool mCaptureTriggered;
CAPTURE_RESULT mResult;
CAPTURE_RESULT mFinalResult;
unsigned long long m_startTime;
protected:
@ -185,15 +231,35 @@ protected:
CameraManager camera_manager;
ACameraDevice* camera_device;
AImageReader* image_reader;
ANativeWindow* image_reader_surface;
ACameraOutputTarget* image_reader_target;
ACaptureRequest* capture_request;
ACaptureSessionOutputContainer* capture_session_output_container;
ACaptureSessionOutput* capture_session_output;
AImageReader* mPreviewImageReader;
ANativeWindow* mPreviewImageWindow;
ACameraOutputTarget* mPreviewOutputTarget;
ACaptureSessionOutput* mPreviewSessionOutput;
AImageReader* mImageReader;
ANativeWindow* mImageWindow;
ACameraOutputTarget* mOutputTarget;
ACaptureSessionOutput* mSessionOutput;
std::shared_ptr<ACameraMetadata> mCharacteristics;
std::vector<CaptureRequest*> mCaptureRequests;
std::shared_ptr<ACameraMetadata> mPreviewResults;
std::vector<std::shared_ptr<ACameraMetadata> > mCaptureResults;
uint32_t mLdr;
std::vector<std::shared_ptr<AImage> > mCaptureFrames;
ACameraCaptureSession* capture_session;
int captureSequenceId;
// AImageReader* image_reader;
// ANativeWindow* image_reader_surface;
// ACameraOutputTarget* image_reader_target;
// ACaptureRequest* capture_request;
// ACaptureSessionOutput* capture_session_output;
};
#endif // NDKCAMERA_H

@ -0,0 +1,38 @@
#pragma once
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
namespace hdrplus
{
class align
{
public:
align() = default;
~align() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param aligements alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontle & verticle tiles
*/
void process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& aligements );
private:
// From original image to coarse image
const std::vector<int> inv_scale_factors = { 1, 2, 4, 4 };
const std::vector<int> distances = { 1, 2, 2, 2 }; // L1 / L2 distance
const std::vector<int> grayimg_search_radious = { 1, 4, 4, 4 };
const std::vector<int> grayimg_tile_sizes = { 16, 16, 16, 8 };
const int num_levels = 4;
};
} // namespace hdrplus

@ -0,0 +1,36 @@
#pragma once
#include <string>
#include <vector>
#include <utility> // std::pair
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class bayer_image
{
public:
explicit bayer_image( const std::string& bayer_image_path );
explicit bayer_image( const std::vector<uint8_t>& bayer_image_content );
~bayer_image() = default;
std::pair<double, double> get_noise_params() const;
std::shared_ptr<LibRaw> libraw_processor;
cv::Mat raw_image;
cv::Mat grayscale_image;
int width;
int height;
int white_level;
std::vector<int> black_level_per_channel;
float iso;
private:
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
};
} // namespace hdrplus

@ -0,0 +1,44 @@
#pragma once
#include <vector>
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/bayer_image.h"
namespace hdrplus
{
class burst
{
public:
explicit burst( const std::string& burst_path, const std::string& reference_image_path );
explicit burst(const std::vector<std::string>& burst_paths, int reference_image_index);
explicit burst( const std::vector<std::vector<uint8_t> >& bayer_image_contents, int reference_image_index );
~burst() = default;
// Reference image index in the array
int reference_image_idx;
// Source bayer images & grayscale unpadded image
std::vector<hdrplus::bayer_image> bayer_images;
// Image padded to upper level tile size (16*2)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> bayer_images_pad;
// Padding information
std::vector<int> padding_info_bayer;
// Image padded to upper level tile size (16)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> grayscale_images_pad;
// number of image (including reference) in burst
int num_images;
// Bayer image after merging, stored as cv::Mat
cv::Mat merged_bayer_image;
};
} // namespace hdrplus

@ -0,0 +1,240 @@
#pragma once
#include <opencv2/opencv.hpp> // all opencv header
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <unordered_map>
#include <hdrplus/bayer_image.h>
#include <dirent.h>
#include <hdrplus/params.h>
#include <hdrplus/burst.h>
namespace hdrplus
{
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat gammasRGB(cv::Mat img, bool mode);
class finish
{
public:
cv::Mat mergedBayer; // merged image from Merge Module
std::string burstPath; // path to burst images
std::vector<std::string> rawPathList; // a list or array of the path to all burst imgs under burst Path
int refIdx; // index of the reference img
Parameters params;
cv::Mat rawReference;
// LibRaw libraw_processor_finish;
bayer_image* refBayer;
std::string mergedImgPath;
finish() = default;
// please use this initialization after merging part finish
finish(std::string burstPath, cv::Mat mergedBayer,int refIdx){
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = mergedBayer;
}
// for local testing only
finish(std::string burstPath, std::string mergedBayerPath,int refIdx){
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = loadFromCSV(mergedBayerPath, CV_16UC1);//
load_rawPathList(burstPath);
refBayer= new bayer_image(this->rawPathList[refIdx]);
this->rawReference = refBayer->raw_image;//;grayscale_image
// initialize parameters in libraw_processor_finish
setLibRawParams();
showParams();
std::cout<<"Finish init() finished!"<<std::endl;
}
~finish() = default;
// finish pipeline func
// void process(std::string burstPath, cv::Mat mergedBayer,int refIdx);
void process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage);
// replace Mat a with Mat b
void copy_mat_16U(cv::Mat& A, cv::Mat B);
void copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B);
// postprocess
// cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr);
void showImg(cv::Mat img)
{
int ch = CV_MAT_CN(CV_8UC1);
// cv::Mat tmp(4208,3120,CV_16UC1);
cv::Mat tmp(img);
// u_int16_t* ptr_tmp = (u_int16_t*)tmp.data;
// u_int16_t* ptr_img = (u_int16_t*)img.data;
// // col major to row major
// for(int r = 0; r < tmp.rows; r++) {
// for(int c = 0; c < tmp.cols; c++) {
// *(ptr_tmp+r*tmp.cols+c) = *(ptr_img+c*tmp.rows+r)/2048.0*255.0;
// }
// }
// std::cout<<"height="<<tmp.rows<<std::endl;
// std::cout<<"width="<<tmp.cols<<std::endl;
// cv::transpose(tmp, tmp);
u_int16_t* ptr = (u_int16_t*)tmp.data;
for(int r = 0; r < tmp.rows; r++) {
for(int c = 0; c < tmp.cols; c++) {
*(ptr+r*tmp.cols+c) = *(ptr+r*tmp.cols+c)/2048.0*255.0;
}
}
tmp = tmp.reshape(ch);
tmp.convertTo(tmp, CV_8UC1);
cv::imshow("test",tmp);
cv::imwrite("test2.jpg", tmp);
cv::waitKey(0);
std::cout<< this->mergedBayer.size()<<std::endl;
}
void showMat(cv::Mat img){
std::cout<<"size="<<img.size()<<std::endl;
std::cout<<"type="<<img.type()<<std::endl;
}
void showParams()
{
std::cout<<"Parameters:"<<std::endl;
std::cout<<"tuning_ltmGain = "<<this->params.tuning.ltmGain<<std::endl;
std::cout<<"tuning_gtmContrast = "<<this->params.tuning.gtmContrast<<std::endl;
for(auto key_val:this->params.flags){
std::cout<<key_val.first<<","<<key_val.second<<std::endl;
}
std::cout<<"demosaic_algorithm = "<<refBayer->libraw_processor->imgdata.params.user_qual<<std::endl;
std::cout<<"half_size = "<<refBayer->libraw_processor->imgdata.params.half_size<<std::endl;
std::cout<<"use_camera_wb = "<<refBayer->libraw_processor->imgdata.params.use_camera_wb<<std::endl;
std::cout<<"use_auto_wb = "<<refBayer->libraw_processor->imgdata.params.use_auto_wb<<std::endl;
std::cout<<"no_auto_bright = "<<refBayer->libraw_processor->imgdata.params.no_auto_bright<<std::endl;
std::cout<<"output_color = "<<refBayer->libraw_processor->imgdata.params.output_color <<std::endl;
std::cout<<"gamma[0] = "<<refBayer->libraw_processor->imgdata.params.gamm[0]<<std::endl;
std::cout<<"gamma[1] = "<<refBayer->libraw_processor->imgdata.params.gamm[1]<<std::endl;
std::cout<<"output_bps = "<<refBayer->libraw_processor->imgdata.params.output_bps<<std::endl;
// std::cout<<"demosaic_algorithm = "<<libraw_processor_finish.imgdata.params.user_qual<<std::endl;
// std::cout<<"half_size = "<<libraw_processor_finish.imgdata.params.half_size<<std::endl;
// std::cout<<"use_camera_wb = "<<libraw_processor_finish.imgdata.params.use_camera_wb<<std::endl;
// std::cout<<"use_auto_wb = "<<libraw_processor_finish.imgdata.params.use_auto_wb<<std::endl;
// std::cout<<"no_auto_bright = "<<libraw_processor_finish.imgdata.params.no_auto_bright<<std::endl;
// std::cout<<"output_color = "<<libraw_processor_finish.imgdata.params.output_color <<std::endl;
// std::cout<<"gamma[0] = "<<libraw_processor_finish.imgdata.params.gamm[0]<<std::endl;
// std::cout<<"gamma[1] = "<<libraw_processor_finish.imgdata.params.gamm[1]<<std::endl;
// std::cout<<"output_bps = "<<libraw_processor_finish.imgdata.params.output_bps<<std::endl;
std::cout<<"===================="<<std::endl;
}
void showRawPathList(){
std::cout<<"RawPathList:"<<std::endl;
for(auto pth:this->rawPathList){
std::cout<<pth<<std::endl;
}
std::cout<<"===================="<<std::endl;
}
private:
cv::Mat loadFromCSV(const std::string& path, int opencv_type)
{
cv::Mat m;
std::ifstream csvFile (path);
std::string line;
while (getline(csvFile, line))
{
std::vector<int> dvals;
std::stringstream ss(line);
std::string val;
// int count=0;
while (getline(ss, val, ','))
{
dvals.push_back(stod(val));//*255.0/2048.0
// count++;
}
// std::cout<<count<<std::endl;
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
int ch = CV_MAT_CN(opencv_type);
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void load_rawPathList(std::string burstPath){
DIR *pDir; // pointer to root
struct dirent *ptr;
if (!(pDir = opendir(burstPath.c_str()))) {
std::cout<<"root dir not found!"<<std::endl;
return;
}
while ((ptr = readdir(pDir)) != nullptr) {
// ptr will move to the next file automatically
std::string sub_file = burstPath + "/" + ptr->d_name; // current filepath that ptr points to
if (ptr->d_type != 8 && ptr->d_type != 4) { // not normal file or dir
return;
}
// only need normal files
if (ptr->d_type == 8) {
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
if (strstr(ptr->d_name, ".dng")) {
rawPathList.emplace_back(sub_file);
}
}
}
}
// close root dir
closedir(pDir);
}
void setLibRawParams(){
refBayer->libraw_processor->imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
refBayer->libraw_processor->imgdata.params.half_size = params.rawpyArgs.half_size;
refBayer->libraw_processor->imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
refBayer->libraw_processor->imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
refBayer->libraw_processor->imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
refBayer->libraw_processor->imgdata.params.output_color = params.rawpyArgs.output_color;
refBayer->libraw_processor->imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
refBayer->libraw_processor->imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
refBayer->libraw_processor->imgdata.params.output_bps = params.rawpyArgs.output_bps;
// libraw_processor_finish.imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
// libraw_processor_finish.imgdata.params.half_size = params.rawpyArgs.half_size;
// libraw_processor_finish.imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
// libraw_processor_finish.imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
// libraw_processor_finish.imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
// libraw_processor_finish.imgdata.params.output_color = params.rawpyArgs.output_color;
// libraw_processor_finish.imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
// libraw_processor_finish.imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
// libraw_processor_finish.imgdata.params.output_bps = params.rawpyArgs.output_bps;
}
};
} // namespace hdrplus

@ -0,0 +1,29 @@
#pragma once
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
namespace hdrplus
{
class hdrplus_pipeline
{
private:
hdrplus::align align_module;
hdrplus::merge merge_module;
hdrplus::finish finish_module;
public:
void run_pipeline( const std::string& burst_path, const std::string& reference_image_path );
bool run_pipeline( const std::vector<std::string>& burst_paths, int reference_image_index, cv::Mat& finalImg );
bool run_pipeline( const std::vector<std::vector<uint8_t> >& burst_contents, int reference_image_index, cv::Mat& finalImg );
hdrplus_pipeline() = default;
~hdrplus_pipeline() = default;
};
} // namespace hdrplus

@ -0,0 +1,184 @@
#pragma once
#include <vector>
#include <opencv2/opencv.hpp> // all opencv header
#include <cmath>
#include "hdrplus/burst.h"
#define TILE_SIZE 16
#define TEMPORAL_FACTOR 75
#define SPATIAL_FACTOR 0.1
namespace hdrplus
{
class merge
{
public:
int offset = TILE_SIZE / 2;
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
merge() = default;
~merge() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param alignments alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontal & vertical tiles
*/
void process( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments);
/*
std::vector<cv::Mat> get_other_tiles(); //return the other tile list T_1 to T_n
std::vector<cv::Mat> vector_math(string operation, reference_tile, other_tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> scalar_vector_math(string operation, scalar num, std::vector<cv::Mat> tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> average_vector(std::vector<cv::Mat> tile_list); //take average of vector elements
*/
private:
float tileRMS(cv::Mat tile) {
cv::Mat squared;
cv::multiply(tile, tile, squared);
return sqrt(cv::mean(squared)[0]);
}
std::vector<float> getNoiseVariance(std::vector<cv::Mat> tiles, float lambda_shot, float lambda_read) {
std::vector<float> noise_variance;
for (auto tile : tiles) {
noise_variance.push_back(lambda_shot * tileRMS(tile) + lambda_read);
}
return noise_variance;
}
cv::Mat cosineWindow1D(cv::Mat input, int window_size = TILE_SIZE) {
cv::Mat output = input.clone();
for (int i = 0; i < input.cols; ++i) {
output.at<float>(0, i) = 1. / 2. - 1. / 2. * cos(2 * M_PI * (input.at<float>(0, i) + 1 / 2.) / window_size);
}
return output;
}
cv::Mat cosineWindow2D(cv::Mat tile) {
int window_size = tile.rows; // Assuming square tile
cv::Mat output_tile = tile.clone();
cv::Mat window = cv::Mat::zeros(1, window_size, CV_32F);
for(int i = 0; i < window_size; ++i) {
window.at<float>(i) = i;
}
cv::Mat window_x = cosineWindow1D(window, window_size);
window_x = cv::repeat(window_x, window_size, 1);
cv::Mat window_2d = window_x.mul(window_x.t());
cv::Mat window_applied;
cv::multiply(tile, window_2d, window_applied, 1, CV_32F);
return window_applied;
}
cv::Mat cat2Dtiles(std::vector<std::vector<cv::Mat>> tiles) {
std::vector<cv::Mat> rows;
for (auto row_tiles : tiles) {
cv::Mat row;
cv::hconcat(row_tiles, row);
rows.push_back(row);
}
cv::Mat img;
cv::vconcat(rows, img);
return img;
}
void circshift(cv::Mat &out, const cv::Point &delta)
{
cv::Size sz = out.size();
// error checking
assert(sz.height > 0 && sz.width > 0);
// no need to shift
if ((sz.height == 1 && sz.width == 1) || (delta.x == 0 && delta.y == 0))
return;
// delta transform
int x = delta.x;
int y = delta.y;
if (x > 0) x = x % sz.width;
if (y > 0) y = y % sz.height;
if (x < 0) x = x % sz.width + sz.width;
if (y < 0) y = y % sz.height + sz.height;
// in case of multiple dimensions
std::vector<cv::Mat> planes;
split(out, planes);
for (size_t i = 0; i < planes.size(); i++)
{
// vertical
cv::Mat tmp0, tmp1, tmp2, tmp3;
cv::Mat q0(planes[i], cv::Rect(0, 0, sz.width, sz.height - y));
cv::Mat q1(planes[i], cv::Rect(0, sz.height - y, sz.width, y));
q0.copyTo(tmp0);
q1.copyTo(tmp1);
tmp0.copyTo(planes[i](cv::Rect(0, y, sz.width, sz.height - y)));
tmp1.copyTo(planes[i](cv::Rect(0, 0, sz.width, y)));
// horizontal
cv::Mat q2(planes[i], cv::Rect(0, 0, sz.width - x, sz.height));
cv::Mat q3(planes[i], cv::Rect(sz.width - x, 0, x, sz.height));
q2.copyTo(tmp2);
q3.copyTo(tmp3);
tmp2.copyTo(planes[i](cv::Rect(x, 0, sz.width - x, sz.height)));
tmp3.copyTo(planes[i](cv::Rect(0, 0, x, sz.height)));
}
cv::merge(planes, out);
}
void fftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) floor(sz.width / 2.0);
pt.y = (int) floor(sz.height / 2.0);
circshift(out, pt);
}
void ifftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) ceil(sz.width / 2.0);
pt.y = (int) ceil(sz.height / 2.0);
circshift(out, pt);
}
std::vector<cv::Mat> getReferenceTiles(cv::Mat reference_image);
cv::Mat mergeTiles(std::vector<cv::Mat> tiles, int rows, int cols);
cv::Mat processChannel( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read);
//temporal denoise
std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor);
std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor);
};
} // namespace hdrplus

@ -0,0 +1,69 @@
#pragma once
#include <string>
#include <unordered_map>
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class RawpyArgs{
public:
int demosaic_algorithm = 3;// 3 - AHD interpolation <->int user_qual
bool half_size = false;
bool use_camera_wb = true;
bool use_auto_wb = false;
bool no_auto_bright = true;
int output_color = LIBRAW_COLORSPACE_sRGB;
int gamma[2] = {1,1}; //# gamma correction not applied by rawpy (not quite understand)
int output_bps = 16;
};
class Options{
public:
std::string input = "";
std::string output = "";
std::string mode = "full"; //'full' 'align' 'merge' 'finish'
int reference = 0;
float temporalfactor=75.0;
float spatialfactor = 0.1;
int ltmGain=-1;
double gtmContrast=0.075;
int verbose=2; // (0, 1, 2, 3, 4, 5)
};
class Tuning{
public:
std::string ltmGain = "auto";
double gtmContrast = 0.075;
std::vector<float> sharpenAmount{1,0.5,0.5};
std::vector<float> sharpenSigma{1,2,4};
std::vector<float> sharpenThreshold{0.02,0.04,0.06};
};
class Parameters{
public:
std::unordered_map<std::string,bool> flags;
RawpyArgs rawpyArgs;
Options options;
Tuning tuning;
Parameters()
{
const char* keys[] = {"writeReferenceImage", "writeGammaReference", "writeMergedImage", "writeGammaMerged",
"writeShortExposure", "writeLongExposure", "writeFusedExposure", "writeLTMImage",
"writeLTMGamma", "writeGTMImage", "writeReferenceFinal", "writeFinalImage"};
for (int idx = 0; idx < sizeof(keys) / sizeof(const char*); idx++) {
flags[keys[idx]] = true;
}
}
};
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
} // namespace hdrplus

@ -0,0 +1,326 @@
#pragma once
#include <string>
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
// https://stackoverflow.com/questions/63404539/portable-loop-unrolling-with-template-parameter-in-c-with-gcc-icc
/// Helper macros for stringification
#define TO_STRING_HELPER(X) #X
#define TO_STRING(X) TO_STRING_HELPER(X)
// Define loop unrolling depending on the compiler
#if defined(__ICC) || defined(__ICL)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__GNUC__) && !defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(GCC unroll (16)))
#elif defined(_MSC_BUILD)
#pragma message ("Microsoft Visual C++ (MSVC) detected: Loop unrolling not supported!")
#define UNROLL_LOOP(n)
#else
#warning "Unknown compiler: Loop unrolling not supported!"
#define UNROLL_LOOP(n)
#endif
namespace hdrplus
{
template <typename T, int kernel>
cv::Mat box_filter_kxk( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
if ( kernel <= 0 )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error(std::string( __FILE__ ) + "::" + __func__ + " box filter only support kernel size >= 1");
#endif
}
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
for ( int row_i = 0; row_i < dst_height; ++row_i )
{
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
// Take ceiling for rounding
T box_sum = T( 0 );
UNROLL_LOOP( kernel )
for ( int kernel_row_i = 0; kernel_row_i < kernel; ++kernel_row_i )
{
UNROLL_LOOP( kernel )
for ( int kernel_col_i = 0; kernel_col_i < kernel; ++kernel_col_i )
{
box_sum += src_image_ptr[ ( row_i * kernel + kernel_row_i ) * src_step + ( col_i * kernel + kernel_col_i ) ];
}
}
// Average by taking ceiling
T box_avg = box_sum / T( kernel * kernel );
dst_image_ptr[ row_i * dst_step + col_i ] = box_avg;
}
}
return dst_image;
}
template <typename T, int kernel>
cv::Mat downsample_nearest_neighbour( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image = cv::Mat( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
// -03 should be enough to optimize below code
for ( int row_i = 0; row_i < dst_height; row_i++ )
{
UNROLL_LOOP( 32 )
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
dst_image_ptr[ row_i * dst_step + col_i ] = \
src_image_ptr[ (row_i * kernel) * src_step + (col_i * kernel) ];
}
}
return dst_image;
}
template< typename T >
void print_cvmat( cv::Mat image )
{
const T* img_ptr = (const T*)image.data;
int height = image.size().height;
int width = image.size().width;
int step = image.step1();
int chns = image.channels();
printf("print_cvmat()::Image of size height = %d, width = %d, step = %d\n", \
height, width, step );
if ( chns == 1 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("%3.d ", img_ptr[ row_i_offset + col_i ]);
//printf("%3.d ", int( image.at<T>( row_i, col_i ) ) );
}
printf("\n");
}
}
else if ( chns == 3 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("[%3.d, %3.d, %3.d] ", img_ptr[ row_i_offset + col_i * 3 + 0 ], \
img_ptr[ row_i_offset + col_i * 3 + 1 ], \
img_ptr[ row_i_offset + col_i * 3 + 2 ] );
}
printf("\n");
}
}
else
{
#ifdef __ANDROID__
#else
throw std::runtime_error("cv::Mat number of channel currently not support to print\n");
#endif
}
}
/**
* @brief Extract RGB channel seprately from bayer image
*
* @tparam T data tyoe of bayer image.
* @return vector of RGB image. OpenCV internally maintain reference count.
* Thus this step won't create deep copy overhead.
*
* @example extract_rgb_from_bayer<uint16_t>( bayer_img, rgb_vector_container );
*/
template <typename T>
void extract_rgb_from_bayer( const cv::Mat& bayer_img, \
cv::Mat& img_ch1, cv::Mat& img_ch2, cv::Mat& img_ch3, cv::Mat& img_ch4 )
{
const T* bayer_img_ptr = (const T*)bayer_img.data;
int bayer_width = bayer_img.size().width;
int bayer_height = bayer_img.size().height;
int bayer_step = bayer_img.step1();
if ( bayer_width % 2 != 0 || bayer_height % 2 != 0 )
{
#ifdef __ANDROID__
#else
throw std::runtime_error("Bayer image data size incorrect, must be multiplier of 2\n");
#endif
}
// RGB image is half the size of bayer image
int rgb_width = bayer_width / 2;
int rgb_height = bayer_height / 2;
img_ch1.create( rgb_height, rgb_width, bayer_img.type() );
img_ch2.create( rgb_height, rgb_width, bayer_img.type() );
img_ch3.create( rgb_height, rgb_width, bayer_img.type() );
img_ch4.create( rgb_height, rgb_width, bayer_img.type() );
int rgb_step = img_ch1.step1();
T* img_ch1_ptr = (T*)img_ch1.data;
T* img_ch2_ptr = (T*)img_ch2.data;
T* img_ch3_ptr = (T*)img_ch3.data;
T* img_ch4_ptr = (T*)img_ch4.data;
#pragma omp parallel for
for ( int rgb_row_i = 0; rgb_row_i < rgb_height; rgb_row_i++ )
{
int rgb_row_i_offset = rgb_row_i * rgb_step;
// Every RGB row corresbonding to two Bayer image row
int bayer_row_i_offset0 = ( rgb_row_i * 2 + 0 ) * bayer_step; // For RG
int bayer_row_i_offset1 = ( rgb_row_i * 2 + 1 ) * bayer_step; // For GB
for ( int rgb_col_j = 0; rgb_col_j < rgb_width; rgb_col_j++ )
{
// img_ch1/2/3/4 : (0,0), (1,0), (0,1), (1,1)
int bayer_col_i_offset0 = rgb_col_j * 2 + 0;
int bayer_col_i_offset1 = rgb_col_j * 2 + 1;
img_ch1_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset0 ];
img_ch3_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset1 ];
img_ch2_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset0 ];
img_ch4_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset1 ];
}
}
}
/**
* @brief Convert RGB image to gray image through same weight linear combination.
* Also support implicit data type conversion.
*
* @tparam RGB_DTYPE rgb image type (e.g. uint16_t)
* @tparam GRAY_DTYPE gray image type (e.g. uint16_t)
* @tparam GRAY_CVTYPE opencv gray image type
*/
template< typename RGB_DTYPE, typename GRAY_DTYPE, int GRAY_CVTYPE >
cv::Mat rgb_2_gray( const cv::Mat& rgb_img )
{
const RGB_DTYPE* rgb_img_ptr = (const RGB_DTYPE*)rgb_img.data;
int img_width = rgb_img.size().width;
int img_height = rgb_img.size().height;
int rgb_img_step = rgb_img.step1();
// Create output gray cv::Mat
cv::Mat gray_img( img_height, img_width, GRAY_CVTYPE );
GRAY_DTYPE* gray_img_ptr = (GRAY_DTYPE*)gray_img.data;
int gray_img_step = gray_img.step1();
#pragma omp parallel for
for ( int row_i = 0; row_i < img_height; row_i++ )
{
int rgb_row_i_offset = row_i * rgb_img_step;
int gray_row_i_offset = row_i * gray_img_step;
UNROLL_LOOP( 32 ) // multiplier of cache line size
for ( int col_j = 0; col_j < img_width; col_j++ )
{
GRAY_DTYPE avg_ij(0);
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 0) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 1) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 2) ];
avg_ij /= 3;
gray_img_ptr[ gray_row_i_offset + col_j ] = avg_ij;
}
}
// OpenCV use reference count. Thus return won't create deep copy
return gray_img;
}
template <typename T>
void print_tile( const cv::Mat& img, int tile_size, int start_idx_row, int start_idx_col )
{
const T* img_ptr = (T*)img.data;
int src_step = img.step1();
for ( int row = start_idx_row; row < tile_size + start_idx_row; ++row )
{
const T* img_ptr_row = img_ptr + row * src_step;
for ( int col = start_idx_col; col < tile_size + start_idx_col; ++col )
{
printf("%u ", img_ptr_row[ col ] );
}
printf("\n");
}
printf("\n");
}
template< typename T>
void print_img( const cv::Mat& img, int img_height = -1, int img_width = -1 )
{
const T* img_ptr = (T*)img.data;
if ( img_height == -1 && img_width == -1 )
{
img_height = img.size().height;
img_width = img.size().width;
}
else
{
img_height = std::min( img.size().height, img_height );
img_width = std::min( img.size().width, img_width );
}
printf("Image size (h=%d, w=%d), Print range (h=0-%d, w=0-%d)]\n", \
img.size().height, img.size().width, img_height, img_width );
int img_step = img.step1();
for ( int row = 0; row < img_height; ++row )
{
const T* img_ptr_row = img_ptr + row * img_step;
for ( int col = 0; col < img_width; ++col )
{
printf("%u ", img_ptr_row[ col ]);
}
printf("\n");
}
printf("\n");
}
} // namespace hdrplus

@ -0,0 +1,992 @@
#include <vector>
#include <string>
#include <limits>
#include <cstdio>
#include <utility> // std::make_pair
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
#include "hdrplus/align.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
// Function declration
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors );
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr = false );
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
static void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type );
// Function Implementations
// static function only visible within file
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors )
{
// #ifndef NDEBUG
// printf("%s::%s build_per_grayimg_pyramid start with scale factor : ", __FILE__, __func__ );
// for ( int i = 0; i < inv_scale_factors.size(); ++i )
// {
// printf("%d ", inv_scale_factors.at( i ));
// }
// printf("\n");
// #endif
images_pyramid.resize( inv_scale_factors.size() );
for ( size_t i = 0; i < inv_scale_factors.size(); ++i )
{
cv::Mat blur_image;
cv::Mat downsample_image;
switch ( inv_scale_factors[ i ] )
{
case 1:
images_pyramid[ i ] = src_image.clone();
// cv::Mat use reference count, will not create deep copy
downsample_image = src_image;
break;
case 2:
// printf("(2) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
// // Gaussian blur
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
// // Downsample
downsample_image = downsample_nearest_neighbour<uint16_t, 2>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 2>( images_pyramid.at( i-1 ) );
// Add
images_pyramid.at( i ) = downsample_image.clone();
break;
case 4:
// printf("(4) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
downsample_image = downsample_nearest_neighbour<uint16_t, 4>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 4>( images_pyramid.at( i-1 ) );
images_pyramid.at( i ) = downsample_image.clone();
break;
default:
#ifdef __ANDROID__
#else
throw std::runtime_error("inv scale factor " + std::to_string( inv_scale_factors[ i ]) + "invalid" );
#endif
}
}
}
static bool operator!=( const std::pair<int, int>& lhs, const std::pair<int, int>& rhs )
{
return lhs.first != rhs.first || lhs.second != rhs.second;
}
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr )
{
int src_num_tiles_h = src_alignment.size();
int src_num_tiles_w = src_alignment[ 0 ].size();
constexpr int repeat_factor = pyramid_scale_factor_prev_curr / tilesize_scale_factor_prev_curr;
// printf("build_upsampled_prev_aligement with scale factor %d, repeat factor %d, tile size factor %d\n", \
// pyramid_scale_factor_prev_curr, repeat_factor, tilesize_scale_factor_prev_curr );
int dst_num_tiles_main_h = src_num_tiles_h * repeat_factor;
int dst_num_tiles_main_w = src_num_tiles_w * repeat_factor;
if ( dst_num_tiles_main_h > num_tiles_h || dst_num_tiles_main_w > num_tiles_w )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("current level number of tiles smaller than upsampled tiles\n");
#endif
}
// Allocate data for dst_alignment
// NOTE: number of tiles h, number of tiles w might be different from dst_num_tiles_main_h, dst_num_tiles_main_w
// For tiles between num_tile_h and dst_num_tiles_main_h, use (0,0)
dst_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
// Upsample alignment
#pragma omp parallel for collapse(2)
for ( int row_i = 0; row_i < src_num_tiles_h; row_i++ )
{
for ( int col_i = 0; col_i < src_num_tiles_w; col_i++ )
{
// Scale alignment
std::pair<int, int> align_i = src_alignment[ row_i ][ col_i ];
align_i.first *= pyramid_scale_factor_prev_curr;
align_i.second *= pyramid_scale_factor_prev_curr;
// repeat
UNROLL_LOOP( repeat_factor )
for ( int repeat_row_i = 0; repeat_row_i < repeat_factor; ++repeat_row_i )
{
int repeat_row_i_offset = row_i * repeat_factor + repeat_row_i;
UNROLL_LOOP( repeat_factor )
for ( int repeat_col_i = 0; repeat_col_i < repeat_factor; ++repeat_col_i )
{
int repeat_col_i_offset = col_i * repeat_factor + repeat_col_i;
dst_alignment[ repeat_row_i_offset ][ repeat_col_i_offset ] = align_i;
}
}
}
}
if ( consider_nbr )
{
// Copy consurtctor
std::vector<std::vector<std::pair<int, int>>> upsampled_alignment{ dst_alignment };
// Distance function
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = \
&l1_distance<uint16_t, unsigned long long, tile_size>;
#pragma omp parallel for collapse(2)
for ( int tile_row_i = 0; tile_row_i < num_tiles_h; tile_row_i++ )
{
for ( int tile_col_i = 0; tile_col_i < num_tiles_w; tile_col_i++ )
{
const auto& curr_align_i = upsampled_alignment[ tile_row_i ][ tile_col_i ];
// Container for nbr alignment pair
std::vector<std::pair<int, int>> nbrs_align_i;
// Consider 4 neighbour's alignment
// Only compute distance if alignment is different
if ( tile_col_i > 0 )
{
const auto& nbr1_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i - 1 ];
if ( curr_align_i != nbr1_align_i ) nbrs_align_i.emplace_back( nbr1_align_i );
}
if ( tile_col_i < num_tiles_w - 1 )
{
const auto& nbr2_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i + 1 ];
if ( curr_align_i != nbr2_align_i ) nbrs_align_i.emplace_back( nbr2_align_i );
}
if ( tile_row_i > 0 )
{
const auto& nbr3_align_i = upsampled_alignment[ tile_row_i - 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr3_align_i ) nbrs_align_i.emplace_back( nbr3_align_i );
}
if ( tile_row_i < num_tiles_h - 1 )
{
const auto& nbr4_align_i = upsampled_alignment[ tile_row_i + 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr4_align_i ) nbrs_align_i.emplace_back( nbr4_align_i );
}
// If there is a nbr alignment that need to be considered. Compute distance
if ( ! nbrs_align_i.empty() )
{
int ref_tile_row_start_idx_i = tile_row_i * tile_size / 2;
int ref_tile_col_start_idx_i = tile_col_i * tile_size / 2;
// curr_align_i's distance
auto curr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + curr_align_i.first, \
ref_tile_col_start_idx_i + curr_align_i.second );
for ( const auto& nbr_align_i : nbrs_align_i )
{
auto nbr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + nbr_align_i.first, \
ref_tile_col_start_idx_i + nbr_align_i.second );
if ( nbr_align_i_distance < curr_align_i_distance )
{
#ifdef NDEBUG
printf("tile [%d, %d] update align, prev align (%d, %d) curr align (%d, %d), prev distance %d curr distance %d\n", \
tile_row_i, tile_col_i, \
curr_align_i.first, curr_align_i.second, \
nbr_align_i.first, nbr_align_i.second, \
int(curr_align_i_distance), int(nbr_align_i_distance) );
#endif
dst_alignment[ tile_row_i ][ tile_col_i ] = nbr_align_i;
curr_align_i_distance = nbr_align_i_distance;
}
}
}
}
}
}
}
// Set tilesize as template argument for better compiler optimization result.
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += l1;
}
}
#undef CUSTOME_ABS
return sum;
}
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
// printf("Search two tile with ref : \n");
// print_tile<data_type>( img1, tile_size, img1_tile_row_start_idx, img1_tile_col_start_idx );
// printf("Search two tile with alt :\n");
// print_tile<data_type>( img2, tile_size, img2_tile_row_start_idx, img2_tile_col_start_idx );
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += ( l1 * l1 );
}
}
#undef CUSTOME_ABS
return sum;
}
template<typename T, int tile_size>
static cv::Mat extract_img_tile( const cv::Mat& img, int img_tile_row_start_idx, int img_tile_col_start_idx )
{
const T* img_ptr = (const T*)img.data;
int img_width = img.size().width;
int img_height = img.size().height;
int img_step = img.step1();
if ( img_tile_row_start_idx < 0 || img_tile_row_start_idx > img_height - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_row_start_idx " + std::to_string( img_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img_height - tile_size ) + ")\n" );
#endif
}
if ( img_tile_col_start_idx < 0 || img_tile_col_start_idx > img_width - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_col_start_idx " + std::to_string( img_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img_width - tile_size ) + ")\n" );
#endif
}
cv::Mat img_tile( tile_size, tile_size, img.type() );
T* img_tile_ptr = (T*)img_tile.data;
int img_tile_step = img_tile.step1();
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const T* img_ptr_row_i = img_ptr + img_step * ( img_tile_row_start_idx + row_i );
T* img_tile_ptr_row_i = img_tile_ptr + img_tile_step * row_i;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
img_tile_ptr_row_i[ col_i ] = img_ptr_row_i[ img_tile_col_start_idx + col_i ];
}
}
return img_tile;
}
void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type )
{
// Every align image level share the same distance function.
// Use function ptr to reduce if else overhead inside for loop
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = nullptr;
if ( distance_type == 1 ) // l1 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 16>;
}
}
else if ( distance_type == 2 ) // l2 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 16>;
}
}
// Every level share the same upsample function
void (*upsample_alignment_func_ptr)(const std::vector<std::vector<std::pair<int, int>>>&, \
std::vector<std::vector<std::pair<int, int>>>&, \
int, int, const cv::Mat&, const cv::Mat&, bool) = nullptr;
if ( scale_factor_prev_curr == 2 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( scale_factor_prev_curr == 4 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
// Function to extract reference image tile for memory cache
cv::Mat (*extract_ref_img_tile)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 8>;
}
else if ( curr_tile_size == 16 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 16>;
}
// Function to extract search image tile for memory cache
cv::Mat (*extract_alt_img_search)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+4*2>;
}
}
else if ( curr_tile_size == 16 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+4*2>;
}
}
int num_tiles_h = ref_img.size().height / (curr_tile_size / 2) - 1;
int num_tiles_w = ref_img.size().width / (curr_tile_size / 2 ) - 1;
/* Upsample pervious layer alignment */
std::vector<std::vector<std::pair<int, int>>> upsampled_prev_aligement;
// Coarsest level
// prev_alignment is invalid / empty, construct alignment as (0,0)
if ( prev_tile_size == -1 )
{
upsampled_prev_aligement.resize( num_tiles_h, \
std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
}
// Upsample previous level alignment
else
{
upsample_alignment_func_ptr( prev_aligement, upsampled_prev_aligement, \
num_tiles_h, num_tiles_w, ref_img, alt_img, false );
// printf("\n!!!!!Upsampled previous alignment\n");
// for ( int tile_row = 0; tile_row < int(upsampled_prev_aligement.size()); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < int(upsampled_prev_aligement.at(0).size()); tile_col++ )
// {
// const auto tile_start = upsampled_prev_aligement.at( tile_row ).at( tile_col );
// printf("up tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
#ifndef NDEBUG
printf("%s::%s start: \n", __FILE__, __func__ );
printf(" scale_factor_prev_curr %d, tile_size %d, prev_tile_size %d, search_radiou %d, distance L%d, \n", \
scale_factor_prev_curr, curr_tile_size, prev_tile_size, search_radiou, distance_type );
printf(" ref img size h=%d w=%d, alt img size h=%d w=%d, \n", \
ref_img.size().height, ref_img.size().width, alt_img.size().height, alt_img.size().width );
printf(" num tile h (upsampled) %d, num tile w (upsampled) %d\n", num_tiles_h, num_tiles_w);
#endif
// allocate memory for current alignmenr
curr_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
/* Pad alternative image */
cv::Mat alt_img_pad;
cv::copyMakeBorder( alt_img, \
alt_img_pad, \
search_radiou, search_radiou, search_radiou, search_radiou, \
cv::BORDER_CONSTANT, cv::Scalar( UINT_LEAST16_MAX ) );
// printf("Reference image h=%d, w=%d: \n", ref_img.size().height, ref_img.size().width );
// print_img<uint16_t>( ref_img );
// printf("Alter image pad h=%d, w=%d: \n", alt_img_pad.size().height, alt_img_pad.size().width );
// print_img<uint16_t>( alt_img_pad );
// printf("!! enlarged tile size %d\n", curr_tile_size + 2 * search_radiou );
int alt_tile_row_idx_max = alt_img_pad.size().height - ( curr_tile_size + 2 * search_radiou );
int alt_tile_col_idx_max = alt_img_pad.size().width - ( curr_tile_size + 2 * search_radiou );
// Dlete below distance vector, this is for debug only
std::vector<std::vector<uint16_t>> distances( num_tiles_h, std::vector<uint16_t>( num_tiles_w, 0 ));
/* Iterate through all reference tile & compute distance */
#pragma omp parallel for collapse(2)
for ( int ref_tile_row_i = 0; ref_tile_row_i < num_tiles_h; ref_tile_row_i++ )
{
for ( int ref_tile_col_i = 0; ref_tile_col_i < num_tiles_w; ref_tile_col_i++ )
{
// Upper left index of reference tile
int ref_tile_row_start_idx_i = ref_tile_row_i * curr_tile_size / 2;
int ref_tile_col_start_idx_i = ref_tile_col_i * curr_tile_size / 2;
// printf("\nRef img tile [%d, %d] -> start idx [%d, %d] (row, col)\n", \
// ref_tile_row_i, ref_tile_col_i, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// printf("\nRef img tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( ref_img, curr_tile_size, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// Upsampled alignment at this tile
// Alignment are relative displacement in pixel value
int prev_alignment_row_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).first;
int prev_alignment_col_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).second;
// Alternative image tile start idx
int alt_tile_row_start_idx_i = ref_tile_row_start_idx_i + prev_alignment_row_i;
int alt_tile_col_start_idx_i = ref_tile_col_start_idx_i + prev_alignment_col_i;
// Ensure alternative image tile within range
if ( alt_tile_row_start_idx_i < 0 )
alt_tile_row_start_idx_i = 0;
if ( alt_tile_col_start_idx_i < 0 )
alt_tile_col_start_idx_i = 0;
if ( alt_tile_row_start_idx_i > alt_tile_row_idx_max )
{
// int before = alt_tile_row_start_idx_i;
alt_tile_row_start_idx_i = alt_tile_row_idx_max;
// printf("@@ change start x from %d to %d\n", before, alt_tile_row_idx_max);
}
if ( alt_tile_col_start_idx_i > alt_tile_col_idx_max )
{
// int before = alt_tile_col_start_idx_i;
alt_tile_col_start_idx_i = alt_tile_col_idx_max;
// printf("@@ change start y from %d to %d\n", before, alt_tile_col_idx_max );
}
// Explicitly caching reference image tile
cv::Mat ref_img_tile_i = extract_ref_img_tile( ref_img, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
cv::Mat alt_img_search_i = extract_alt_img_search( alt_img_pad, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Because alternative image is padded with search radious.
// Using same coordinate with reference image will automatically considered search radious * 2
// printf("Alt image tile [%d, %d]-> start idx [%d, %d]\n", \
// ref_tile_row_i, ref_tile_col_i, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// printf("\nAlt image tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( alt_img_pad, curr_tile_size + 2 * search_radiou, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Search based on L1/L2 distance
unsigned long long min_distance_i = ULONG_LONG_MAX;
int min_distance_row_i = -1;
int min_distance_col_i = -1;
for ( int search_row_j = 0; search_row_j < ( search_radiou * 2 + 1 ); search_row_j++ )
{
for ( int search_col_j = 0; search_col_j < ( search_radiou * 2 + 1 ); search_col_j++ )
{
// printf("\n--->tile at [%d, %d] search (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou );
// unsigned long long distance_j = distance_func_ptr( ref_img, alt_img_pad, \
// ref_tile_row_start_idx_i, ref_tile_col_start_idx_i, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
// unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_pad, \
// 0, 0, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_search_i, \
0, 0, \
search_row_j, search_col_j );
// printf("<---tile at [%d, %d] search (%d, %d), new dis %llu, old dis %llu\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou, distance_j, min_distance_i );
// If this is smaller distance
if ( distance_j < min_distance_i )
{
min_distance_i = distance_j;
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
// If same value, choose the one closer to the original tile location
if ( distance_j == min_distance_i && min_distance_row_i != -1 && min_distance_col_i != -1 )
{
int prev_distance_row_2_ref = min_distance_row_i - search_radiou;
int prev_distance_col_2_ref = min_distance_col_i - search_radiou;
int curr_distance_row_2_ref = search_row_j - search_radiou;
int curr_distance_col_2_ref = search_col_j - search_radiou;
int prev_distance_2_ref_sqr = prev_distance_row_2_ref * prev_distance_row_2_ref + prev_distance_col_2_ref * prev_distance_col_2_ref;
int curr_distance_2_ref_sqr = curr_distance_row_2_ref * curr_distance_row_2_ref + curr_distance_col_2_ref * curr_distance_col_2_ref;
// previous min distance idx is farther away from ref tile start location
if ( prev_distance_2_ref_sqr > curr_distance_2_ref_sqr )
{
// printf("@@@ Same distance %d, choose closer one (%d, %d) instead of (%d, %d)\n", \
// distance_j, search_row_j, search_col_j, min_distance_row_i, min_distance_col_i);
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
}
}
}
// printf("tile at (%d, %d) alignment (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, min_distance_row_i, min_distance_col_i );
int alignment_row_i = prev_alignment_row_i + min_distance_row_i - search_radiou;
int alignment_col_i = prev_alignment_col_i + min_distance_col_i - search_radiou;
std::pair<int, int> alignment_i( alignment_row_i, alignment_col_i );
// Add min_distance_i's corresbonding idx as min
curr_alignment.at( ref_tile_row_i ).at( ref_tile_col_i ) = alignment_i;
distances.at( ref_tile_row_i ).at( ref_tile_col_i ) = min_distance_i;
}
}
// printf("\n!!!!!Min distance for each tile \n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; ++tile_col )
// {
// printf("tile (%d, %d) distance %u\n", \
// tile_row, tile_col, distances.at( tile_row).at(tile_col ) );
// }
// }
// printf("\n!!!!!Alignment at current level\n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; tile_col++ )
// {
// const auto tile_start = curr_alignment.at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
void align::process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& images_alignment )
{
#ifndef NDEBUG
printf("%s::%s align::process start\n", __FILE__, __func__ ); fflush(stdout);
#endif
images_alignment.clear();
images_alignment.resize( burst_images.num_images );
// image pyramid per image, per pyramid level
std::vector<std::vector<cv::Mat>> per_grayimg_pyramid;
// printf("!!!!! ref bayer padded\n");
// print_img<uint16_t>( burst_images.bayer_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
// printf("!!!!! ref gray padded\n");
// print_img<uint16_t>( burst_images.grayscale_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
per_grayimg_pyramid.resize( burst_images.num_images );
#pragma omp parallel for
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// per_grayimg_pyramid[ img_idx ][ 0 ] is the original image
// per_grayimg_pyramid[ img_idx ][ 3 ] is the coarsest image
build_per_grayimg_pyramid( per_grayimg_pyramid.at( img_idx ), \
burst_images.grayscale_images_pad.at( img_idx ), \
this->inv_scale_factors );
}
// #ifndef NDEBUG
// printf("%s::%s build image pyramid of size : ", __FILE__, __func__ );
// for ( int level_i = 0; level_i < num_levels; ++level_i )
// {
// printf("(%d, %d) ", per_grayimg_pyramid[ 0 ][ level_i ].size().height,
// per_grayimg_pyramid[ 0 ][ level_i ].size().width );
// }
// printf("\n"); fflush(stdout);
// #endif
// print image pyramid
// for ( int level_i; level_i < num_levels; ++level_i )
// {
// printf("\n\n!!!!! ref gray pyramid level %d img : \n" , level_i );
// print_img<uint16_t>( per_grayimg_pyramid[ burst_images.reference_image_idx ][ level_i ] );
// }
// exit(-1);
// Align every image
const std::vector<cv::Mat>& ref_grayimg_pyramid = per_grayimg_pyramid[ burst_images.reference_image_idx ];
std::vector<std::vector<std::pair<int, int>>> curr_alignment;
std::vector<std::vector<std::pair<int, int>>> prev_alignment;
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// Do not align with reference image
if ( img_idx == burst_images.reference_image_idx )
continue;
const std::vector<cv::Mat>& alt_grayimg_pyramid = per_grayimg_pyramid[ img_idx ];
// Align every level from coarse to grain
// level 0 : finest level, the original image
// level 3 : coarsest level
curr_alignment.clear();
prev_alignment.clear();
for ( int level_i = num_levels - 1; level_i >= 0; level_i-- ) // 3,2,1,0
{
// make curr alignment as previous alignment
prev_alignment.swap( curr_alignment );
curr_alignment.clear();
// printf("\n\n########################align level %d\n", level_i );
align_image_level(
ref_grayimg_pyramid[ level_i ], // reference image at current level
alt_grayimg_pyramid[ level_i ], // alternative image at current level
prev_alignment, // previous layer alignment
curr_alignment, // current layer alignment
( level_i == ( num_levels - 1 ) ? -1 : inv_scale_factors[ level_i + 1 ] ), // scale factor between previous layer and current layer. -1 if current layer is the coarsest layer, [-1, 4, 4, 2]
grayimg_tile_sizes[ level_i ], // current level tile size
( level_i == ( num_levels - 1 ) ? -1 : grayimg_tile_sizes[ level_i + 1 ] ), // previous level tile size
grayimg_search_radious[ level_i ], // search radious
distances[ level_i ] ); // L1/L2 distance
// printf("@@@Alignment at level %d is h=%d, w=%d", level_i, curr_alignment.size(), curr_alignment.at(0).size() );
} // for pyramid level
// Alignment at grayscale image
images_alignment.at( img_idx ).swap( curr_alignment );
// printf("\n!!!!!Alternative Image Alignment\n");
// for ( int tile_row = 0; tile_row < images_alignment.at( img_idx ).size(); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < images_alignment.at( img_idx ).at(0).size(); tile_col++ )
// {
// const auto tile_start = images_alignment.at( img_idx ).at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
} // for alternative image
}
} // namespace hdrplus

@ -0,0 +1,166 @@
#include <string>
#include <cstdio>
#include <iostream>
#include <utility> // std::pair, std::makr_pair
#include <memory> // std::shared_ptr
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
#include <exiv2/exiv2.hpp> // exiv2
#include "hdrplus/bayer_image.h"
#include "hdrplus/utility.h" // box_filter_kxk
namespace hdrplus
{
bayer_image::bayer_image( const std::string& bayer_image_path )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
if ( ( return_code = libraw_processor->open_file( bayer_image_path.c_str() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(bayer_image_path);
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error(bayer_image_path);
error += ": No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image %s with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, bayer_image_path.c_str(), width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
bayer_image::bayer_image( const std::vector<uint8_t>& bayer_image_content )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
if ( ( return_code = libraw_processor->open_buffer( (void *)(&bayer_image_content[0]), bayer_image_content.size() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(&bayer_image_content[0], bayer_image_content.size());
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error = "No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
std::pair<double, double> bayer_image::get_noise_params() const
{
// Set ISO to 100 if not positive
double iso_ = iso <= 0 ? 100 : iso;
// Calculate shot noise and read noise parameters w.r.t ISO 100
double lambda_shot_p = iso_ / 100.0f * baseline_lambda_shot;
double lambda_read_p = (iso_ / 100.0f) * (iso_ / 100.0f) * baseline_lambda_read;
double black_level = (black_level_per_channel[0] + \
black_level_per_channel[1] + \
black_level_per_channel[2] + \
black_level_per_channel[3]) / 4.0;
// Rescale shot and read noise to normal range
double lambda_shot = lambda_shot_p * (white_level - black_level);
double lambda_read = lambda_read_p * (white_level - black_level) * (white_level - black_level);
// return pair
return std::make_pair(lambda_shot, lambda_read);
}
}

@ -0,0 +1,251 @@
#include <cstdio>
#include <string>
#include <omp.h>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
burst::burst( const std::string& burst_path, const std::string& reference_image_path )
{
std::vector<cv::String> bayer_image_paths;
// Search through the input path directory to get all input image path
if ( burst_path.at( burst_path.size() - 1) == '/')
cv::glob( burst_path + "*.dng", bayer_image_paths, false );
else
cv::glob( burst_path + "/*.dng", bayer_image_paths, false );
#ifndef NDEBUG
for ( const auto& bayer_img_path_i : bayer_image_paths )
{
printf("img i path %s\n", bayer_img_path_i.c_str()); fflush(stdout);
}
printf("ref img path %s\n", reference_image_path.c_str()); fflush(stdout);
#endif
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
for ( size_t i = 0; i < bayer_image_paths.size(); ++i )
{
if ( bayer_image_paths[ i ] == reference_image_path )
{
reference_image_idx = i;
}
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error unable to locate reference image " + reference_image_path );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::string>& bayer_image_paths, int reference_image_index )
{
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_paths.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::vector<uint8_t> >& bayer_image_contents, int reference_image_index )
{
// Number of images
num_images = bayer_image_contents.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_contents.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_content : bayer_image_contents )
{
bayer_images.emplace_back( bayer_image_content );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
} // namespace hdrplus

@ -0,0 +1,784 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/finish.h"
#include "hdrplus/utility.h"
#include <cmath>
#ifdef __ANDROID__
#define DBG_OUTPUT_ROOT "/sdcard/com.xypower.mpapp/tmp/"
#else
#define DBG_OUTPUT_ROOT ""
#endif
// #include <type_traits>
namespace hdrplus
{
cv::Mat convert16bit2_8bit_(cv::Mat ans){
if(ans.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(255.0/USHRT_MAX);
(*it)[1] *=(255.0/USHRT_MAX);
(*it)[2] *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC3);
}else if(ans.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC1);
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_16bit_(cv::Mat ans){
if(ans.type()==CV_8UC3){
ans.convertTo(ans, CV_16UC3);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(65535.0/255.0);
(*it)[1] *=(65535.0/255.0);
(*it)[2] *=(65535.0/255.0);
}
}else if(ans.type()==CV_8UC1){
ans.convertTo(ans, CV_16UC1);
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(65535.0/255.0);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_12bit_(cv::Mat ans){
// cv::Mat ans(I);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(2048.0/255.0);
(*it)[1] *=(2048.0/255.0);
(*it)[2] *=(2048.0/255.0);
}
ans.convertTo(ans, CV_16UC3);
return ans;
}
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=USHRT_MAX;
// check the val against the threshold
if(x<=threshold){
x =gainMin*x;
}else{
x = gainMax* pow(x,exponent)-gainMax+1;
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=USHRT_MAX;
return (uint16_t)x;
}
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=65535.0;
// check the val against the threshold
if(x<=threshold){
x = x/gainMin;
}else{
x = pow((x+gainMax-1)/gainMax,exponent);
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=65535;
return (uint16_t)x;
}
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaCompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaCompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaCompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaCompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaDecompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaDecompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaDecompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaDecompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat gammasRGB(cv::Mat img, bool mode){
if(mode){// compress
return uGammaCompress_(img,0.0031308, 12.92, 1.055, 1. / 2.4);
}else{ // decompress
return uGammaDecompress_(img, 0.04045, 12.92, 1.055, 2.4);
}
}
void copy_mat_16U_2(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
cv::Mat mean_(cv::Mat img){
// initialize processedImg
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
// traverse img
int idx = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
uint32_t tmp = (*it)[0]+(*it)[1]+(*it)[2];
uint16_t avg_val = tmp/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
double getMean(cv::Mat img){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
double sum=0;
for(int i=0;i<max_idx;i++){
sum += *(ptr+i);
}
sum/=max_idx;
sum/=USHRT_MAX;
return sum;
}
cv::Mat matMultiply_scalar(cv::Mat img,float gain){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
double tmp = *(ptr+i)*gain;
if(tmp<0){
*(ptr+i)=0;
}else if(tmp>USHRT_MAX){
*(ptr+i) = USHRT_MAX;
}else{
*(ptr+i)=(u_int16_t)tmp;
}
}
return img;
}
double getSaturated(cv::Mat img, double threshold){
threshold *= USHRT_MAX;
double count=0;
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
if(*(ptr+i)>threshold){
count++;
}
}
return count/(double)max_idx;
}
cv::Mat meanGain_(cv::Mat img,int gain){
if(img.channels()!=3){
std::cout<<"unsupport img type in meanGain_()"<<std::endl;
return cv::Mat();
}else{ // RGB img
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
int idx=0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
double sum = 0;
// R
double tmp = (*it)[0]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// G
tmp = (*it)[1]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// B
tmp = (*it)[2]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// put into processedImg
uint16_t avg_val = sum/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
}
cv::Mat applyScaling_(cv::Mat mergedImage, cv::Mat shortGray, cv::Mat fusedGray){
cv::Mat result = mergedImage.clone();
u_int16_t* ptr_shortg = (u_int16_t*)shortGray.data;
u_int16_t* ptr_fusedg = (u_int16_t*)fusedGray.data;
int count = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = result.begin<cv::Vec3w>(), end = result.end<cv::Vec3w>(); it != end; ++it)
{
double s = 1;
if(*(ptr_shortg+count)!=0){
s = *(ptr_fusedg+count);
s/=*(ptr_shortg+count);
}
for(int c=0;c<mergedImage.channels();c++){
double tmp = (*it)[c]*s;
if(tmp<0){
(*it)[c] = 0;
}else if(tmp>USHRT_MAX){
(*it)[c] = USHRT_MAX;
}else{
(*it)[c] = tmp;
}
}
}
return result;
}
void localToneMap(cv::Mat& mergedImage, Options options, cv::Mat& shortg,
cv::Mat& longg, cv::Mat& fusedg, int& gain){
std::cout<<"HDR Tone Mapping..."<<std::endl;
// # Work with grayscale images
cv::Mat shortGray = rgb_2_gray<uint16_t, uint16_t, CV_16U>(mergedImage); //mean_(mergedImage);
std::cout<<"--- Compute grayscale image"<<std::endl;
// compute gain
gain = 0;
if(options.ltmGain==-1){
double dsFactor = 25;
int down_height = round(shortGray.rows/dsFactor);
int down_width = round(shortGray.cols/dsFactor);
cv::Mat shortS;
cv::resize(shortGray,shortS,cv::Size(down_height,down_width),cv::INTER_LINEAR);
shortS = shortS.reshape(1,1);
bool bestGain = false;
double compression = 1.0;
double saturated = 0.0;
cv::Mat shortSg = gammasRGB(shortS.clone(), true);
double sSMean = getMean(shortSg);
while((compression < 1.9 && saturated < .95)||((!bestGain) && (compression < 6) && (gain < 30) && (saturated < 0.33))){
gain += 2;
cv::Mat longSg = gammasRGB(shortS.clone()*gain, true);
double lSMean = getMean(longSg);
compression = lSMean / sSMean;
bestGain = lSMean > (1 - sSMean) / 2; // only works if burst underexposed
saturated = getSaturated(longSg,0.95);
if(options.verbose==4){
}
}
}else{
if(options.ltmGain>0){
gain = options.ltmGain;
}
}
std::cout<<"--- Compute gain"<<std::endl;
// create a synthetic long exposure
cv::Mat longGray = meanGain_(mergedImage.clone(),gain);
std::cout<<"--- Synthetic long expo"<<std::endl;
// apply gamma correction to both
longg = gammasRGB(longGray.clone(), true);
shortg = gammasRGB(shortGray.clone(),true);
std::cout<<"--- Apply Gamma correction"<<std::endl;
// perform tone mapping by exposure fusion in grayscale
cv::Ptr<cv::MergeMertens> mergeMertens = cv::createMergeMertens();
std::cout<<"--- Create Mertens"<<std::endl;
// hack: cv2 mergeMertens expects inputs between 0 and 255
// but the result is scaled between 0 and 1 (some values can actually be greater than 1!)
std::vector<cv::Mat> src_expos;
src_expos.push_back(convert16bit2_8bit_(shortg.clone()));
src_expos.push_back(convert16bit2_8bit_(longg.clone()));
mergeMertens->process(src_expos, fusedg);
fusedg = fusedg*USHRT_MAX;
fusedg.convertTo(fusedg, CV_16UC1);
std::cout<<"--- Apply Mertens"<<std::endl;
// undo gamma correction
cv::Mat fusedGray = gammasRGB(fusedg.clone(), false);
// cv::imwrite("fusedg_degamma.png", fusedGray);
std::cout<<"--- Un-apply Gamma correction"<<std::endl;
// scale each RGB channel of the short exposure accordingly
mergedImage = applyScaling_(mergedImage, shortGray, fusedGray);
std::cout<<"--- Scale channels"<<std::endl;
}
u_int16_t enhanceContrast_1pix(u_int16_t pix_val,double gain){
double x = pix_val;
x/=USHRT_MAX;
x = x - gain*sin(2*M_PI*x);
if(x<0){
x = 0;
}else if(x>1){
x = 1;
}
u_int16_t result = x*USHRT_MAX;
return result;
}
cv::Mat enhanceContrast(cv::Mat image, Options options){
if(options.gtmContrast>=0 && options.gtmContrast<=1){
u_int16_t* ptr = (u_int16_t*)image.data;
int end = image.rows*image.cols*image.channels();
for(int idx = 0;idx<end;idx++){
*(ptr+idx) = enhanceContrast_1pix(*(ptr+idx),options.gtmContrast);
}
}else{
std::cout<<"GTM ignored, expected a contrast enhancement ratio between 0 and 1"<<std::endl;
}
return image;
}
cv::Mat distL1_(cv::Mat X, cv::Mat Y){
int end_x = X.rows*X.cols*X.channels();
int end_y = Y.rows*Y.cols*Y.channels();
cv::Mat result = cv::Mat(X.rows,X.cols,X.type());
if(end_x==end_y){
u_int16_t* ptr_x = (u_int16_t*)X.data;
u_int16_t* ptr_y = (u_int16_t*)Y.data;
u_int16_t* ptr_r = (u_int16_t*)result.data;
for(int i=0;i<end_x;i++){
if(*(ptr_x+i)<*(ptr_y+i)){
*(ptr_r+i) = *(ptr_y+i) - *(ptr_x+i);
}else{
*(ptr_r+i) = *(ptr_x+i) - *(ptr_y+i);
}
}
}else{
std::cout<<"Mat size not match. distL1_ failed!"<<std::endl;
}
return result;
}
cv::Mat sharpenTriple_(cv::Mat image,
cv::Mat blur0, cv::Mat low0, float th0, float k0,
cv::Mat blur1, cv::Mat low1, float th1, float k1,
cv::Mat blur2, cv::Mat low2, float th2, float k2){
// create result mat
cv::Mat result = cv::Mat(image.rows,image.cols,image.type());
// initialize iteraters
u_int16_t* ptr_r = (u_int16_t*)result.data;
u_int16_t* ptr_img = (u_int16_t*)image.data;
u_int16_t* ptr_blur0 = (u_int16_t*)blur0.data;
u_int16_t* ptr_low0 = (u_int16_t*)low0.data;
u_int16_t* ptr_blur1 = (u_int16_t*)blur1.data;
u_int16_t* ptr_low1 = (u_int16_t*)low1.data;
u_int16_t* ptr_blur2 = (u_int16_t*)blur2.data;
u_int16_t* ptr_low2 = (u_int16_t*)low2.data;
int n_channels = image.channels();
int end = image.rows*image.cols*n_channels;
// traverse Image
for(int idx = 0;idx<end;idx++){
double r, r0, r1, r2;
double x = *(ptr_img+idx);
double l0 = *(ptr_low0+idx)/(double)USHRT_MAX;
double l1 = *(ptr_low1+idx)/(double)USHRT_MAX;
double l2 = *(ptr_low2+idx)/(double)USHRT_MAX;
double b0 = *(ptr_blur0+idx);
double b1 = *(ptr_blur1+idx);
double b2 = *(ptr_blur2+idx);
r0 = l0<th0? x:x+k0*(x-b0);
r1 = l1<th1? x:x+k1*(x-b1);
r2 = l2<th2? x:x+k2*(x-b2);
r = (r0+r1+r2)/3.0;
if(r<0) r=0;
if(r>USHRT_MAX) r = USHRT_MAX;
*(ptr_r+idx) = (u_int16_t)r;
}
return result;
}
cv::Mat sharpenTriple(cv::Mat image, Tuning tuning, Options options){
// sharpen the image using unsharp masking
std::vector<float> amounts = tuning.sharpenAmount;
std::vector<float> sigmas = tuning.sharpenSigma;
std::vector<float> thresholds = tuning.sharpenThreshold;
// Compute all Gaussian blur
cv::Mat blur0,blur1,blur2;
cv::GaussianBlur(image,blur0,cv::Size(0,0),sigmas[0]);
cv::GaussianBlur(image,blur1,cv::Size(0,0),sigmas[1]);
cv::GaussianBlur(image,blur2,cv::Size(0,0),sigmas[2]);
std::cout<<" --- gaussian blur"<<std::endl;
// cv::imwrite("blur2.png", blur2);
// Compute all low contrast images
cv::Mat low0 = distL1_(blur0, image);
cv::Mat low1 = distL1_(blur1, image);
cv::Mat low2 = distL1_(blur2, image);
std::cout<<" --- low contrast"<<std::endl;
// cv::imwrite("low2.png", low2);
// Compute the triple sharpen
cv::Mat sharpImage = sharpenTriple_(image,
blur0, low0, thresholds[0], amounts[0],
blur1, low1, thresholds[1], amounts[1],
blur2, low2, thresholds[2], amounts[2]);
std::cout<<" --- sharpen"<<std::endl;
return sharpImage;
}
void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
int H = B.rows;
int W = B.cols;
int end = H*W;
for(int i=0;i<end;i++){
*(ptr_A+i) = *(ptr_B+i);
}
}
// void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// // u_int16_t* ptr_A = (u_int16_t*)A.data;
// u_int16_t* ptr_B = (u_int16_t*)B.data;
// for(int r = 0; r < B.rows; r++) {
// for(int c = 0; c < B.cols; c++) {
// *(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
// }
// }
// }
cv::Mat processMergedMat(cv::Mat mergedImg, int opencv_type){
cv::Mat m;
uint16_t* ptr = (uint16_t*)mergedImg.data;
for(int r = 0; r < mergedImg.rows; r++) {
std::vector<int> dvals;
for(int c = 0; c < mergedImg.cols; c++) {
dvals.push_back(*(ptr+r*mergedImg.cols+c));
}
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
int ch = CV_MAT_CN(opencv_type);
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void show20_20(cv::Mat m){
u_int16_t* ptr = (u_int16_t*)m.data;
for(int i=0;i<20;i++){
for(int j=0;j<20;j++){
std::cout<<*(ptr+i*m.cols+j)<<", ";
}
std::cout<<std::endl;
}
}
void writeCSV(std::string filename, cv::Mat m)
{
std::ofstream myfile;
myfile.open(filename.c_str());
myfile<< cv::format(m, cv::Formatter::FMT_CSV) << std::endl;
myfile.close();
}
void finish::process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage){
// copy mergedBayer to rawReference
std::cout<<"finish pipeline start ..."<<std::endl;
// save merged Image value
// #ifndef HDRPLUS_NO_DETAILED_OUTPUT
writeCSV(DBG_OUTPUT_ROOT "merged.csv",burst_images.merged_bayer_image);
// #endif
this->refIdx = burst_images.reference_image_idx;
// this->burstPath = burstPath;
// std::cout<<"processMerged:"<<std::endl;
// show20_20(mergedB);
this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
// this->mergedBayer = processMergedMat(mergedB,CV_16UC1);//loadFromCSV("merged.csv", CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
// show20_20(this->mergedBayer);
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
#else
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
#endif
// std::cout<<"csv:"<<std::endl;
// show20_20(this->mergedBayer);
// load_rawPathList(burstPath);
// read in ref img
// bayer_image* ref = new bayer_image(rawPathList[refIdx]);
bayer_image* ref = new bayer_image(burst_images.bayer_images[burst_images.reference_image_idx]);
cv::Mat processedRefImage = postprocess(ref->libraw_processor,params.rawpyArgs);
std::cout<<"size ref: "<<processedRefImage.rows<<"*"<<processedRefImage.cols<<std::endl;
// write reference image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceImage"]){
std::cout<<"writing reference img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
// cv::imshow("test",processedImage);
cv::imwrite(DBG_OUTPUT_ROOT "processedRef.jpg", outputImg);
// cv::waitKey(0);
}
#endif
// write gamma reference
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGammaReference"]){
std::cout<<"writing Gamma reference img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedRefImage.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "processedRefGamma.jpg", outputImg);
}
#endif
// get the bayer_image of the merged image
// bayer_image* mergedImg = new bayer_image(rawPathList[refIdx]);
bayer_image* mergedImg = new bayer_image(burst_images.bayer_images[this->refIdx]);
mergedImg->libraw_processor->imgdata.rawdata.raw_image = (uint16_t*)this->mergedBayer.data;
// copy_mat_16U_3(mergedImg->libraw_processor->imgdata.rawdata.raw_image,this->mergedBayer);
cv::Mat processedMerge = postprocess(mergedImg->libraw_processor,params.rawpyArgs);
// write merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Merged img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImg.jpg", outputImg);
}
#endif
// write gamma merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Gamma Merged img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImgGamma.jpg", outputImg);
}
#endif
// step 5. HDR tone mapping
// processedImage, gain, shortExposure, longExposure, fusedExposure = localToneMap(burstPath, processedImage, options)
int gain;
if(params.options.ltmGain){
cv::Mat shortExposure, longExposure, fusedExposure;
localToneMap(processedMerge, params.options,shortExposure,longExposure,fusedExposure,gain);
std::cout<<"gain="<< gain<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeShortExposure"]){
std::cout<<"writing ShortExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(shortExposure);
cv::imwrite(DBG_OUTPUT_ROOT "shortg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLongExposure"]){
std::cout<<"writing LongExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(longExposure);
cv::imwrite(DBG_OUTPUT_ROOT "longg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFusedExposure"]){
std::cout<<"writing FusedExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(fusedExposure);
cv::imwrite(DBG_OUTPUT_ROOT "fusedg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMImage"]){
std::cout<<"writing LTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMGamma"]){
std::cout<<"writing LTMImage Gamma ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain_gamma.jpg", outputImg);
}
#endif
}
// step 6 GTM: contrast enhancement / global tone mapping
if(params.options.gtmContrast){
processedMerge = enhanceContrast(processedMerge, params.options);
std::cout<<"STEP 6 -- Apply GTM"<<std::endl;
}
// apply the final sRGB gamma curve
processedMerge = gammasRGB(processedMerge.clone(),true);
std::cout<<"-- Apply Gamma"<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGTMImage"]) {
std::cout<<"writing GTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "GTM_gamma.jpg", outputImg);
}
#endif
// Step 7: sharpen
finalOutputImage = sharpenTriple(processedMerge.clone(), params.tuning, params.options);
cv::Mat& processedImage = finalOutputImage;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFinalImage"]){
std::cout<<"writing FinalImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalImage.jpg", outputImg);
}
#endif
// write final ref
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceFinal"]){
std::cout<<"writing Final Ref Image ..."<<std::endl;
if(params.options.ltmGain){
params.options.ltmGain = gain;
}
cv::Mat shortExposureRef, longExposureRef, fusedExposureRef;
localToneMap(processedRefImage, params.options,shortExposureRef,longExposureRef,fusedExposureRef,gain);
if(params.options.gtmContrast){ // contrast enhancement / global tone mapping
processedRefImage = enhanceContrast(processedRefImage, params.options);
}
processedRefImage = gammasRGB(processedRefImage.clone(),true);
// sharpen
processedRefImage = sharpenTriple(processedRefImage.clone(), params.tuning, params.options);
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalReference.jpg", outputImg);
}
#endif
// End of finishing
}
void finish::copy_mat_16U(cv::Mat& A, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < A.rows; r++) {
for(int c = 0; c < A.cols; c++) {
*(ptr_A+r*A.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
void finish::copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)libraw_ptr->imgdata.rawdata.raw_image;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
} // namespace hdrplus

@ -0,0 +1,105 @@
#include <cstdio>
#include <string>
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/hdrplus_pipeline.h"
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
#include <fstream>
#ifdef __ANDROID__
#include <AndroidHelper.h>
#endif
namespace hdrplus
{
void hdrplus_pipeline::run_pipeline( \
const std::string& burst_path, \
const std::string& reference_image_path )
{
// Create burst of images
burst burst_images( burst_path, reference_image_path );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
// Run align
align_module.process( burst_images, alignments );
// Run merging
merge_module.process( burst_images, alignments );
// Run finishing
cv::Mat finalImg;
finish_module.process( burst_images, finalImg);
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::string>& burst_paths, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_paths, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
#ifdef __ANDROID__
ALOGI("Finish loading images");
#endif
// Run align
align_module.process( burst_images, alignments );
#ifdef __ANDROID__
ALOGI("Finish align");
#endif
// Run merging
merge_module.process( burst_images, alignments );
#ifdef __ANDROID__
ALOGI("Finish merging");
#endif
// Run finishing
finish_module.process( burst_images, finalImg);
#ifdef __ANDROID__
ALOGI("Finish process");
#endif
return true;
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::vector<uint8_t> >& burst_contents, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_contents, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
#ifdef __ANDROID__
ALOGI("Finish loading images");
#endif
// Run align
align_module.process( burst_images, alignments );
#ifdef __ANDROID__
ALOGI("Finish align");
#endif
// Run merging
merge_module.process( burst_images, alignments );
#ifdef __ANDROID__
ALOGI("Finish merging");
#endif
// Run finishing
finish_module.process( burst_images, finalImg);
#ifdef __ANDROID__
ALOGI("Finish process");
#endif
return true;
}
} // namespace hdrplus

@ -0,0 +1,338 @@
#include <opencv2/opencv.hpp> // all opencv header
#include <vector>
#include <utility>
#include "hdrplus/merge.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
void merge::process(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments)
{
// 4.1 Noise Parameters and RMS
// Noise parameters calculated from baseline ISO noise parameters
double lambda_shot, lambda_read;
std::tie(lambda_shot, lambda_read) = burst_images.bayer_images[burst_images.reference_image_idx].get_noise_params();
// 4.2-4.4 Denoising and Merging
// Get padded bayer image
cv::Mat reference_image = burst_images.bayer_images_pad[burst_images.reference_image_idx];
cv::imwrite("ref.jpg", reference_image);
// Get raw channels
std::vector<cv::Mat> channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(reference_image, channels[0], channels[1], channels[2], channels[3]);
std::vector<cv::Mat> processed_channels(4);
// For each channel, perform denoising and merge
for (int i = 0; i < 4; ++i) {
// Get channel mat
cv::Mat channel_i = channels[i];
// cv::imwrite("ref" + std::to_string(i) + ".jpg", channel_i);
//we should be getting the individual channel in the same place where we call the processChannel function with the reference channel in its arguments
//possibly we could add another argument in the processChannel function which is the channel_i for the alternate image. maybe using a loop to cover all the other images
//create list of channel_i of alternate images:
std::vector<cv::Mat> alternate_channel_i_list;
for (int j = 0; j < burst_images.num_images; j++) {
if (j != burst_images.reference_image_idx) {
//get alternate image
cv::Mat alt_image = burst_images.bayer_images_pad[j];
std::vector<cv::Mat> alt_channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(alt_image, alt_channels[0], alt_channels[1], alt_channels[2], alt_channels[3]);
alternate_channel_i_list.push_back(alt_channels[i]);
}
}
// Apply merging on the channel
cv::Mat merged_channel = processChannel(burst_images, alignments, channel_i, alternate_channel_i_list, lambda_shot, lambda_read);
// cv::imwrite("merged" + std::to_string(i) + ".jpg", merged_channel);
// Put channel raw data back to channels
merged_channel.convertTo(processed_channels[i], CV_16U);
}
// Write all channels back to a bayer mat
cv::Mat merged(reference_image.rows, reference_image.cols, CV_16U);
int x, y;
for (y = 0; y < reference_image.rows; ++y){
uint16_t* row = merged.ptr<uint16_t>(y);
if (y % 2 == 0){
uint16_t* i0 = processed_channels[0].ptr<uint16_t>(y / 2);
uint16_t* i1 = processed_channels[1].ptr<uint16_t>(y / 2);
for (x = 0; x < reference_image.cols;){
//R
row[x] = i0[x / 2];
x++;
//G1
row[x] = i1[x / 2];
x++;
}
}
else {
uint16_t* i2 = processed_channels[2].ptr<uint16_t>(y / 2);
uint16_t* i3 = processed_channels[3].ptr<uint16_t>(y / 2);
for(x = 0; x < reference_image.cols;){
//G2
row[x] = i2[x / 2];
x++;
//B
row[x] = i3[x / 2];
x++;
}
}
}
// Remove padding
std::vector<int> padding = burst_images.padding_info_bayer;
cv::Range horizontal = cv::Range(padding[2], reference_image.cols - padding[3]);
cv::Range vertical = cv::Range(padding[0], reference_image.rows - padding[1]);
burst_images.merged_bayer_image = merged(vertical, horizontal);
cv::imwrite("merged.jpg", burst_images.merged_bayer_image);
}
std::vector<cv::Mat> merge::getReferenceTiles(cv::Mat reference_image) {
std::vector<cv::Mat> reference_tiles;
for (int y = 0; y < reference_image.rows - offset; y += offset) {
for (int x = 0; x < reference_image.cols - offset; x += offset) {
cv::Mat tile = reference_image(cv::Rect(x, y, TILE_SIZE, TILE_SIZE));
reference_tiles.push_back(tile);
}
}
return reference_tiles;
}
cv::Mat merge::mergeTiles(std::vector<cv::Mat> tiles, int num_rows, int num_cols) {
// 1. get all four subsets: original (evenly split), horizontal overlapped,
// vertical overlapped, 2D overlapped
std::vector<std::vector<cv::Mat>> tiles_original;
std::vector<cv::Mat> row;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_original.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_horizontal;
// std::vector<cv::Mat> row;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_horizontal.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_vertical;
// std::vector<cv::Mat> row;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_vertical.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_2d;
// std::vector<cv::Mat> row;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_2d.push_back(row);
}
// 2. Concatenate the four subsets
cv::Mat img_original = cat2Dtiles(tiles_original);
cv::Mat img_horizontal = cat2Dtiles(tiles_horizontal);
cv::Mat img_vertical = cat2Dtiles(tiles_vertical);
cv::Mat img_2d = cat2Dtiles(tiles_2d);
// 3. Add the four subsets together
img_original(cv::Rect(offset, 0, num_cols - TILE_SIZE, num_rows)) += img_horizontal;
img_original(cv::Rect(0, offset, num_cols, num_rows - TILE_SIZE)) += img_vertical;
img_original(cv::Rect(offset, offset, num_cols - TILE_SIZE, num_rows - TILE_SIZE)) += img_2d;
return img_original;
}
cv::Mat merge::processChannel(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read) {
// Get tiles of the reference image
std::vector<cv::Mat> reference_tiles = getReferenceTiles(channel_image);
// Get noise variance (sigma**2 = lambda_shot * tileRMS + lambda_read)
std::vector<float> noise_variance = getNoiseVariance(reference_tiles, lambda_shot, lambda_read);
// Apply FFT on reference tiles (spatial to frequency)
std::vector<cv::Mat> reference_tiles_DFT;
for (auto ref_tile : reference_tiles) {
cv::Mat ref_tile_DFT;
ref_tile.convertTo(ref_tile_DFT, CV_32F);
cv::dft(ref_tile_DFT, ref_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
reference_tiles_DFT.push_back(ref_tile_DFT);
}
// Acquire alternate tiles and apply FFT on them as well
std::vector<std::vector<cv::Mat>> alt_tiles_list(reference_tiles.size());
int num_tiles_row = alternate_channel_i_list[0].rows / offset - 1;
int num_tiles_col = alternate_channel_i_list[0].cols / offset - 1;
std::vector<cv::Mat> alt_tiles;
for (int y = 0; y < num_tiles_row; ++y) {
for (int x = 0; x < num_tiles_col; ++x) {
alt_tiles.clear();
// Get reference tile location
int top_left_y = y * offset;
int top_left_x = x * offset;
for (int i = 0; i < alternate_channel_i_list.size(); ++i) {
// Get alignment displacement
int displacement_y, displacement_x;
std::tie(displacement_y, displacement_x) = alignments[i + 1][y][x];
// Get tile
cv::Mat alt_tile = alternate_channel_i_list[i](cv::Rect(top_left_x + displacement_x, top_left_y + displacement_y, TILE_SIZE, TILE_SIZE));
// Apply FFT
cv::Mat alt_tile_DFT;
alt_tile.convertTo(alt_tile_DFT, CV_32F);
cv::dft(alt_tile_DFT, alt_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
alt_tiles.push_back(alt_tile_DFT);
}
alt_tiles_list[y * num_tiles_col + x] = alt_tiles;
}
}
// 4.2 Temporal Denoising
reference_tiles_DFT = temporal_denoise(reference_tiles_DFT, alt_tiles_list, noise_variance, TEMPORAL_FACTOR);
// 4.3 Spatial Denoising
reference_tiles_DFT = spatial_denoise(reference_tiles_DFT, alternate_channel_i_list.size(), noise_variance, SPATIAL_FACTOR);
//now reference tiles are temporally and spatially denoised
// Apply IFFT on reference tiles (frequency to spatial)
std::vector<cv::Mat> denoised_tiles;
for (auto dft_tile : reference_tiles_DFT) {
cv::Mat denoised_tile;
cv::divide(dft_tile, TILE_SIZE * TILE_SIZE, dft_tile);
cv::dft(dft_tile, denoised_tile, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
denoised_tiles.push_back(denoised_tile);
}
reference_tiles = denoised_tiles;
// 4.4 Cosine Window Merging
// Process tiles through 2D cosine window
std::vector<cv::Mat> windowed_tiles;
for (auto tile : reference_tiles) {
windowed_tiles.push_back(cosineWindow2D(tile));
}
// Merge tiles
return mergeTiles(windowed_tiles, channel_image.rows, channel_image.cols);
}
std::vector<cv::Mat> merge::temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor) {
// goal: temporially denoise using the weiner filter
// input:
// 1. array of 2D dft tiles of the reference image
// 2. array of 2D dft tiles of the aligned alternate image
// 3. estimated noise variance
// 4. temporal factor
// return: merged image patches dft
// calculate noise scaling
double temporal_noise_scaling = (TILE_SIZE * TILE_SIZE * (2.0 / 16)) * TEMPORAL_FACTOR;
// loop across tiles
std::vector<cv::Mat> denoised;
for (int i = 0; i < tiles.size(); ++i) {
// sum of pairwise denoising
cv::Mat tile_sum = tiles[i].clone();
double coeff = temporal_noise_scaling * noise_variance[i];
// Ref tile
cv::Mat tile = tiles[i];
// Alt tiles
std::vector<cv::Mat> alt_tiles_i = alt_tiles[i];
for (int j = 0; j < alt_tiles_i.size(); ++j) {
// Alt tile
cv::Mat alt_tile = alt_tiles_i[j];
// Tile difference
cv::Mat diff = tile - alt_tile;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(diff, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// find shrinkage operator A
cv::Mat shrinkage;
cv::divide(absolute_diff, absolute_diff + coeff, shrinkage);
cv::merge(std::vector<cv::Mat>{shrinkage, shrinkage}, shrinkage);
// Interpolation
tile_sum += alt_tile + diff.mul(shrinkage);
}
// Average by num of frames
cv::divide(tile_sum, alt_tiles_i.size() + 1, tile_sum);
denoised.push_back(tile_sum);
}
return denoised;
}
std::vector<cv::Mat> merge::spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor) {
double spatial_noise_scaling = (TILE_SIZE * TILE_SIZE * (1.0 / 16)) * spatial_factor;
// Calculate |w| using ifftshift
cv::Mat row_distances = cv::Mat::zeros(1, TILE_SIZE, CV_32F);
for(int i = 0; i < TILE_SIZE; ++i) {
row_distances.at<float>(i) = i - offset;
}
row_distances = cv::repeat(row_distances.t(), 1, TILE_SIZE);
cv::Mat col_distances = row_distances.t();
cv::Mat distances;
cv::sqrt(row_distances.mul(row_distances) + col_distances.mul(col_distances), distances);
ifftshift(distances);
std::vector<cv::Mat> denoised;
// Loop through all tiles
for (int i = 0; i < tiles.size(); ++i) {
cv::Mat tile = tiles[i];
float coeff = noise_variance[i] / (num_alts + 1) * spatial_noise_scaling;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(tile, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// Division
cv::Mat scale;
cv::divide(absolute_diff, absolute_diff + distances * coeff, scale);
cv::merge(std::vector<cv::Mat>{scale, scale}, scale);
denoised.push_back(tile.mul(scale));
}
return denoised;
}
} // namespace hdrplus

@ -0,0 +1,53 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include <hdrplus/params.h>
namespace hdrplus
{
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
libraw_ptr->imgdata.params.user_qual = rawpyArgs.demosaic_algorithm;
libraw_ptr->imgdata.params.half_size = rawpyArgs.half_size;
libraw_ptr->imgdata.params.use_camera_wb = rawpyArgs.use_camera_wb;
libraw_ptr->imgdata.params.use_auto_wb = rawpyArgs.use_auto_wb;
libraw_ptr->imgdata.params.no_auto_bright = rawpyArgs.no_auto_bright;
libraw_ptr->imgdata.params.output_color = rawpyArgs.output_color;
libraw_ptr->imgdata.params.gamm[0] = rawpyArgs.gamma[0];
libraw_ptr->imgdata.params.gamm[1] = rawpyArgs.gamma[1];
libraw_ptr->imgdata.params.output_bps = rawpyArgs.output_bps;
}
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
std::cout<<"postprocessing..."<<std::endl;
// set parameters
setParams(libraw_ptr,rawpyArgs);
std::cout<<"conversion to 16 bit using black and white levels, demosaicking, white balance, color correction..."<<std::endl;
libraw_ptr->dcraw_process();
int errorcode;
libraw_processed_image_t *ret_img = libraw_ptr->dcraw_make_mem_image(&errorcode);
int opencv_type = CV_16UC3; // 16bit RGB
if(ret_img->colors==1){ // grayscale
if(ret_img->bits == 8){ // uint8
opencv_type = CV_8UC1;
}else{ // uint16
opencv_type = CV_16UC1;
}
}else{// RGB
if(ret_img->bits == 8){ //8bit
opencv_type = CV_8UC3;
}else{ // 16bit
opencv_type = CV_16UC3;
}
}
cv::Mat processedImg(ret_img->height,ret_img->width,opencv_type,ret_img->data);
std::cout<<"postprocess finished!"<<std::endl;
return processedImg;
}
}

@ -0,0 +1,60 @@
// Copyright 2014 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_library_shared {
name: "libimg_utils",
srcs: [
"src/EndianUtils.cpp",
"src/FileInput.cpp",
"src/FileOutput.cpp",
"src/SortedEntryVector.cpp",
"src/Input.cpp",
"src/Output.cpp",
"src/Orderable.cpp",
"src/TiffIfd.cpp",
"src/TiffWritable.cpp",
"src/TiffWriter.cpp",
"src/TiffEntry.cpp",
"src/TiffEntryImpl.cpp",
"src/ByteArrayOutput.cpp",
"src/DngUtils.cpp",
"src/StripSource.cpp",
],
shared_libs: [
"liblog",
"libutils",
"libcutils",
],
cflags: [
"-Wall",
"-Wextra",
"-Werror",
"-fvisibility=hidden",
],
product_variables: {
debuggable: {
// Enable assert() in eng builds
cflags: [
"-UNDEBUG",
"-DLOG_NDEBUG=1",
],
},
},
export_include_dirs: ["include"],
}

@ -0,0 +1,83 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_BYTE_ARRAY_OUTPUT_H
#define IMG_UTILS_BYTE_ARRAY_OUTPUT_H
#include <img_utils/Output.h>
#include <utils/Errors.h>
// #include <utils/Vector.h>
#include <cutils/compiler.h>
#include <stdint.h>
#include <vector>
namespace android {
namespace img_utils {
/**
* Utility class that accumulates written bytes into a buffer.
*/
class ANDROID_API ByteArrayOutput : public Output {
public:
ByteArrayOutput();
virtual ~ByteArrayOutput();
/**
* Open this ByteArrayOutput.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Write bytes from the given buffer. The number of bytes given in the count
* argument will be written. Bytes will be written from the given buffer starting
* at the index given in the offset argument.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
/**
* Close this ByteArrayOutput.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
/**
* Get current size of the array of bytes written.
*/
virtual size_t getSize() const;
/**
* Get pointer to array of bytes written. It is not valid to use this pointer if
* open, write, or close is called after this method.
*/
virtual const uint8_t* getArray() const;
protected:
std::vector<uint8_t> mByteArray;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_BYTE_ARRAY_OUTPUT_H*/

@ -0,0 +1,232 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_DNG_UTILS_H
#define IMG_UTILS_DNG_UTILS_H
#include <img_utils/ByteArrayOutput.h>
#include <img_utils/EndianUtils.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
#include <cutils/compiler.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define NELEMS(x) ((int) (sizeof(x) / sizeof((x)[0])))
#define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
/**
* Utility class for building values for the OpcodeList tags specified
* in the Adobe DNG 1.4 spec.
*/
class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
public:
// Note that the Adobe DNG 1.4 spec for Bayer phase (defined for the
// FixBadPixelsConstant and FixBadPixelsList opcodes) is incorrect. It's
// inconsistent with the DNG SDK (cf. dng_negative::SetBayerMosaic and
// dng_opcode_FixBadPixelsList::IsGreen), and Adobe confirms that the
// spec should be updated to match the SDK.
enum CfaLayout {
CFA_GRBG = 0,
CFA_RGGB,
CFA_BGGR,
CFA_GBRG,
CFA_NONE,
};
OpcodeListBuilder();
virtual ~OpcodeListBuilder();
/**
* Get the total size of this opcode list in bytes.
*/
virtual size_t getSize() const;
/**
* Get the number of opcodes defined in this list.
*/
virtual uint32_t getCount() const;
/**
* Write the opcode list into the given buffer. This buffer
* must be able to hold at least as many elements as returned
* by calling the getSize() method.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t buildOpList(/*out*/ uint8_t* buf) const;
/**
* Add GainMap opcode(s) for the given metadata parameters. The given
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaTop,
uint32_t activeAreaLeft,
uint32_t activeAreaBottom,
uint32_t activeAreaRight,
CfaLayout cfa,
const float* lensShadingMap);
/**
* Add a GainMap opcode with the given fields. The mapGains array
* must have mapPointsV * mapPointsH * mapPlanes elements.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addGainMap(uint32_t top,
uint32_t left,
uint32_t bottom,
uint32_t right,
uint32_t plane,
uint32_t planes,
uint32_t rowPitch,
uint32_t colPitch,
uint32_t mapPointsV,
uint32_t mapPointsH,
double mapSpacingV,
double mapSpacingH,
double mapOriginV,
double mapOriginH,
uint32_t mapPlanes,
const float* mapGains);
/**
* Add WarpRectilinear opcode for the given metadata parameters.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addWarpRectilinearForMetadata(const float* kCoeffs,
uint32_t activeArrayWidth,
uint32_t activeArrayHeight,
float opticalCenterX,
float opticalCenterY);
/**
* Add a WarpRectilinear opcode.
*
* numPlanes - Number of planes included in this opcode.
* opticalCenterX, opticalCenterY - Normalized x,y coordinates of the sensor optical
* center relative to the top,left pixel of the produced images (e.g. [0.5, 0.5]
* gives a sensor optical center in the image center.
* kCoeffs - A list of coefficients for the polynomial equation representing the distortion
* correction. For each plane, 6 coefficients must be included:
* {k_r0, k_r1, k_r2, k_r3, k_t0, k_t1}. See the DNG 1.4 specification for an
* outline of the polynomial used here.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addWarpRectilinear(uint32_t numPlanes,
double opticalCenterX,
double opticalCenterY,
const double* kCoeffs);
/**
* Add FixBadPixelsList opcode for the given metadata parameters.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addBadPixelListForMetadata(const uint32_t* hotPixels,
uint32_t xyPairCount,
uint32_t colorFilterArrangement);
/**
* Add FixBadPixelsList opcode.
*
* bayerPhase - 0=top-left of image is red, 1=top-left of image is green pixel in red row,
* 2=top-left of image is green pixel in blue row, 3=top-left of image is
* blue.
* badPointCount - number of (x,y) pairs of bad pixels are given in badPointRowColPairs.
* badRectCount - number of (top, left, bottom, right) tuples are given in
* badRectTopLeftBottomRightTuples
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addBadPixelList(uint32_t bayerPhase,
uint32_t badPointCount,
uint32_t badRectCount,
const uint32_t* badPointRowColPairs,
const uint32_t* badRectTopLeftBottomRightTuples);
// TODO: Add other Opcode methods
protected:
static const uint32_t FLAG_OPTIONAL = 0x1u;
static const uint32_t FLAG_OPTIONAL_FOR_PREVIEW = 0x2u;
// Opcode IDs
enum {
WARP_RECTILINEAR_ID = 1,
FIX_BAD_PIXELS_LIST = 5,
GAIN_MAP_ID = 9,
};
// LSM mosaic indices
enum {
LSM_R_IND = 0,
LSM_GE_IND = 1,
LSM_GO_IND = 2,
LSM_B_IND = 3,
};
uint32_t mCount;
ByteArrayOutput mOpList;
EndianOutput mEndianOut;
status_t addOpcodePreamble(uint32_t opcodeId);
private:
/**
* Add Bayer GainMap opcode(s) for the given metadata parameters.
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
status_t addBayerGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
CfaLayout cfa,
const float* lensShadingMap);
/**
* Add Bayer GainMap opcode(s) for the given metadata parameters.
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
status_t addMonochromeGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
const float* lensShadingMap);
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_DNG_UTILS_H*/

@ -0,0 +1,250 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_ENDIAN_UTILS
#define IMG_UTILS_ENDIAN_UTILS
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
#include <endian.h>
#include <assert.h>
namespace android {
namespace img_utils {
/**
* Endianness types supported.
*/
enum ANDROID_API Endianness {
UNDEFINED_ENDIAN, // Default endianness will be used.
BIG,
LITTLE
};
/**
* Convert from the native device endianness to big endian.
*/
template<typename T>
T convertToBigEndian(T in);
/**
* Convert from the native device endianness to little endian.
*/
template<typename T>
T convertToLittleEndian(T in);
/**
* A utility class for writing to an Output with the given endianness.
*/
class ANDROID_API EndianOutput : public Output {
public:
/**
* Wrap the given Output. Calling write methods will result in
* writes to this output.
*/
explicit EndianOutput(Output* out, Endianness end=LITTLE);
virtual ~EndianOutput();
/**
* Call open on the wrapped output.
*/
virtual status_t open();
/**
* Call close on the wrapped output.
*/
virtual status_t close();
/**
* Set the endianness to use when writing.
*/
virtual void setEndianness(Endianness end);
/**
* Get the currently configured endianness.
*/
virtual Endianness getEndianness() const;
/**
* Get the current number of bytes written by this EndianOutput.
*/
virtual uint32_t getCurrentOffset() const;
// TODO: switch write methods to uint32_t instead of size_t,
// the max size of a TIFF files is bounded
/**
* The following methods will write elements from given input buffer to the output.
* Count elements in the buffer will be written with the endianness set for this
* EndianOutput. If the given offset is greater than zero, that many elements will
* be skipped in the buffer before writing.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t write(const int8_t* buf, size_t offset, size_t count);
virtual status_t write(const uint16_t* buf, size_t offset, size_t count);
virtual status_t write(const int16_t* buf, size_t offset, size_t count);
virtual status_t write(const uint32_t* buf, size_t offset, size_t count);
virtual status_t write(const int32_t* buf, size_t offset, size_t count);
virtual status_t write(const uint64_t* buf, size_t offset, size_t count);
virtual status_t write(const int64_t* buf, size_t offset, size_t count);
virtual status_t write(const float* buf, size_t offset, size_t count);
virtual status_t write(const double* buf, size_t offset, size_t count);
protected:
template<typename T>
inline status_t writeHelper(const T* buf, size_t offset, size_t count);
uint32_t mOffset;
Output* mOutput;
Endianness mEndian;
};
template<typename T>
inline status_t EndianOutput::writeHelper(const T* buf, size_t offset, size_t count) {
assert(offset <= count);
status_t res = OK;
size_t size = sizeof(T);
switch(mEndian) {
case BIG: {
for (size_t i = offset; i < count; ++i) {
T tmp = convertToBigEndian<T>(buf[offset + i]);
if ((res = mOutput->write(reinterpret_cast<uint8_t*>(&tmp), 0, size))
!= OK) {
return res;
}
mOffset += size;
}
break;
}
case LITTLE: {
for (size_t i = offset; i < count; ++i) {
T tmp = convertToLittleEndian<T>(buf[offset + i]);
if ((res = mOutput->write(reinterpret_cast<uint8_t*>(&tmp), 0, size))
!= OK) {
return res;
}
mOffset += size;
}
break;
}
default: {
return BAD_VALUE;
}
}
return res;
}
template<>
inline uint8_t convertToBigEndian(uint8_t in) {
return in;
}
template<>
inline int8_t convertToBigEndian(int8_t in) {
return in;
}
template<>
inline uint16_t convertToBigEndian(uint16_t in) {
return htobe16(in);
}
template<>
inline int16_t convertToBigEndian(int16_t in) {
return htobe16(in);
}
template<>
inline uint32_t convertToBigEndian(uint32_t in) {
return htobe32(in);
}
template<>
inline int32_t convertToBigEndian(int32_t in) {
return htobe32(in);
}
template<>
inline uint64_t convertToBigEndian(uint64_t in) {
return htobe64(in);
}
template<>
inline int64_t convertToBigEndian(int64_t in) {
return htobe64(in);
}
template<>
inline uint8_t convertToLittleEndian(uint8_t in) {
return in;
}
template<>
inline int8_t convertToLittleEndian(int8_t in) {
return in;
}
template<>
inline uint16_t convertToLittleEndian(uint16_t in) {
return htole16(in);
}
template<>
inline int16_t convertToLittleEndian(int16_t in) {
return htole16(in);
}
template<>
inline uint32_t convertToLittleEndian(uint32_t in) {
return htole32(in);
}
template<>
inline int32_t convertToLittleEndian(int32_t in) {
return htole32(in);
}
template<>
inline uint64_t convertToLittleEndian(uint64_t in) {
return htole64(in);
}
template<>
inline int64_t convertToLittleEndian(int64_t in) {
return htole64(in);
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_ENDIAN_UTILS*/

@ -0,0 +1,76 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_FILE_INPUT_H
#define IMG_UTILS_FILE_INPUT_H
#include <img_utils/Input.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <stdio.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class for reading from a file.
*/
class ANDROID_API FileInput : public Input {
public:
/**
* Create a file input for the given path.
*/
explicit FileInput(String8 path);
virtual ~FileInput();
/**
* Open a file descriptor to the path given in the constructor.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Read bytes from the file into the given buffer. At most, the number
* of bytes given in the count argument will be read. Bytes will be written
* into the given buffer starting at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Close the file descriptor to the path given in the constructor.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
private:
FILE *mFp;
String8 mPath;
bool mOpen;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_INPUT_H*/

@ -0,0 +1,46 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_FILE_OUTPUT_H
#define IMG_UTILS_FILE_OUTPUT_H
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <stdio.h>
#include <stdint.h>
namespace android {
namespace img_utils {
class ANDROID_API FileOutput : public Output {
public:
explicit FileOutput(String8 path);
virtual ~FileOutput();
virtual status_t open();
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t close();
private:
FILE *mFp;
String8 mPath;
bool mOpen;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_FILE_OUTPUT_H*/

@ -0,0 +1,71 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_INPUT_H
#define IMG_UTILS_INPUT_H
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class used as a source of bytes.
*/
class ANDROID_API Input {
public:
virtual ~Input();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t read(uint8_t* buf, size_t offset, size_t count) = 0;
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_INPUT_H*/

@ -0,0 +1,57 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_ORDERABLE
#define IMG_UTILS_ORDERABLE
#include <cutils/compiler.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define COMPARE_DEF(op) \
inline bool operator op (const Orderable& orderable) const;
/**
* Subclasses of Orderable can be compared and sorted. This is
* intended to be used to create sorted arrays of TIFF entries
* and IFDs.
*/
class ANDROID_API Orderable {
public:
virtual ~Orderable();
/**
* Comparison operatotors are based on the value returned
* from this method.
*/
virtual uint32_t getComparableValue() const = 0;
COMPARE_DEF(>)
COMPARE_DEF(<)
COMPARE_DEF(>=)
COMPARE_DEF(<=)
COMPARE_DEF(==)
COMPARE_DEF(!=)
};
#undef COMPARE_DEF
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_ORDERABLE*/

@ -0,0 +1,61 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_OUTPUT_H
#define IMG_UTILS_OUTPUT_H
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class used to output bytes.
*/
class ANDROID_API Output {
public:
virtual ~Output();
/**
* Open this Output.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Write bytes from the given buffer. The number of bytes given in the count
* argument will be written. Bytes will be written from the given buffer starting
* at the index given in the offset argument.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count) = 0;
/**
* Close this Output. It is not valid to call open on a previously closed Output.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_OUTPUT_H*/

@ -0,0 +1,44 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_PAIR_H
#define IMG_UTILS_PAIR_H
#include <cutils/compiler.h>
namespace android {
namespace img_utils {
/**
* Generic pair utility class. Nothing special here.
*/
template<typename F, typename S>
class ANDROID_API Pair {
public:
F first;
S second;
Pair() {}
Pair(const Pair& o) : first(o.first), second(o.second) {}
Pair(const F& f, const S& s) : first(f), second(s) {}
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_PAIR_H*/

@ -0,0 +1,53 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_SORTED_ENTRY_VECTOR_H
#define IMG_UTILS_SORTED_ENTRY_VECTOR_H
#include <img_utils/TiffEntry.h>
#include <utils/StrongPointer.h>
#include <utils/SortedVector.h>
namespace android {
namespace img_utils {
/**
* Subclass of SortedVector that has been extended to
* do comparisons/lookups based on the tag ID of the entries.
*/
class SortedEntryVector : public SortedVector<sp<TiffEntry> > {
public:
virtual ~SortedEntryVector();
/**
* Returns the index of the entry with the given tag ID, or
* -1 if none exists.
*/
ssize_t indexOfTag(uint16_t tag) const;
protected:
/**
* Compare tag ID.
*/
virtual int do_compare(const void* lhs, const void* rhs) const;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_SORTED_ENTRY_VECTOR_H*/

@ -0,0 +1,53 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_STRIP_SOURCE_H
#define IMG_UTILS_STRIP_SOURCE_H
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* This class acts as a data source for strips set in a TiffIfd.
*/
class ANDROID_API StripSource {
public:
virtual ~StripSource();
/**
* Write count bytes to the stream.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t writeToStream(Output& stream, uint32_t count) = 0;
/**
* Return the source IFD.
*/
virtual uint32_t getIfd() const = 0;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_STRIP_SOURCE_H*/

@ -0,0 +1,130 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_ENTRY
#define IMG_UTILS_TIFF_ENTRY
#include <img_utils/TiffWritable.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/EndianUtils.h>
#include <cutils/compiler.h>
// #include <utils/String8.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define COMPARE_DEF(op) \
inline bool operator op (const TiffEntry& entry) const;
/**
* This class holds a single TIFF IFD entry.
*
* Subclasses are expected to support assignment and copying operations.
*/
class ANDROID_API TiffEntry : public TiffWritable {
public:
virtual ~TiffEntry();
/**
* Write the 12-byte IFD entry to the output. The given offset will be
* set as the tag value if the size of the tag value exceeds the max
* size for the TIFF Value field (4 bytes), and should be word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const = 0;
/**
* Get the count set for this entry. This corresponds to the TIFF Count
* field.
*/
virtual uint32_t getCount() const = 0;
/**
* Get the tag id set for this entry. This corresponds to the TIFF Tag
* field.
*/
virtual uint16_t getTag() const = 0;
/**
* Get the type set for this entry. This corresponds to the TIFF Type
* field.
*/
virtual TagType getType() const = 0;
/**
* Get the defined endianness for this entry. If this is defined,
* the tag value will be written with the given byte order.
*/
virtual Endianness getEndianness() const = 0;
/**
* Get the value for this entry. This corresponds to the TIFF Value
* field.
*
* Returns NULL if the value is NULL, or if the type used does not
* match the type of this tag.
*/
template<typename T>
const T* getData() const;
virtual std::string toString() const;
/**
* Force the type used here to be a valid TIFF type.
*
* Returns NULL if the given value is NULL, or if the type given does
* not match the type of the value given.
*/
template<typename T>
static const T* forceValidType(TagType type, const T* value);
virtual const void* getDataHelper() const = 0;
COMPARE_DEF(>)
COMPARE_DEF(<)
protected:
enum {
MAX_PRINT_STRING_LENGTH = 256
};
};
#define COMPARE(op) \
bool TiffEntry::operator op (const TiffEntry& entry) const { \
return getComparableValue() op entry.getComparableValue(); \
}
COMPARE(>)
COMPARE(<)
template<typename T>
const T* TiffEntry::getData() const {
const T* value = reinterpret_cast<const T*>(getDataHelper());
return forceValidType<T>(getType(), value);
}
#undef COMPARE
#undef COMPARE_DEF
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_ENTRY*/

@ -0,0 +1,219 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_ENTRY_IMPL
#define IMG_UTILS_TIFF_ENTRY_IMPL
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffEntry.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/Output.h>
#include <img_utils/EndianUtils.h>
#include <utils/Log.h>
#include <utils/Errors.h>
// #include <utils/Vector.h>
#include <utils/StrongPointer.h>
#include <stdint.h>
#include <vector>
namespace android {
namespace img_utils {
template<typename T>
class TiffEntryImpl : public TiffEntry {
public:
TiffEntryImpl(uint16_t tag, TagType type, uint32_t count, Endianness end, const T* data);
virtual ~TiffEntryImpl();
status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const;
status_t writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const;
uint32_t getCount() const;
uint16_t getTag() const;
TagType getType() const;
Endianness getEndianness() const;
size_t getSize() const;
uint32_t getComparableValue() const;
protected:
const void* getDataHelper() const;
uint32_t getActualSize() const;
uint16_t mTag;
uint16_t mType;
uint32_t mCount;
Endianness mEnd;
std::vector<T> mData;
};
template<typename T>
TiffEntryImpl<T>::TiffEntryImpl(uint16_t tag, TagType type, uint32_t count, Endianness end,
const T* data)
: mTag(tag), mType(static_cast<uint16_t>(type)), mCount(count), mEnd(end) {
count = (type == RATIONAL || type == SRATIONAL) ? count * 2 : count;
auto it = mData.insert(mData.end(), data, data + count);
// LOG_ALWAYS_FATAL_IF(index < 0, "%s: Could not allocate vector for data.", __FUNCTION__);
}
template<typename T>
TiffEntryImpl<T>::~TiffEntryImpl() {}
template<typename T>
uint32_t TiffEntryImpl<T>::getCount() const {
return mCount;
}
template<typename T>
uint16_t TiffEntryImpl<T>::getTag() const {
return mTag;
}
template<typename T>
TagType TiffEntryImpl<T>::getType() const {
return static_cast<TagType>(mType);
}
template<typename T>
const void* TiffEntryImpl<T>::getDataHelper() const {
return reinterpret_cast<const void*>(&mData[0]);
}
template<typename T>
size_t TiffEntryImpl<T>::getSize() const {
uint32_t total = getActualSize();
WORD_ALIGN(total)
return (total <= OFFSET_SIZE) ? 0 : total;
}
template<typename T>
uint32_t TiffEntryImpl<T>::getActualSize() const {
uint32_t total = sizeof(T) * mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
// 2 ints stored for each rational, multiply by 2
total <<= 1;
}
return total;
}
template<typename T>
Endianness TiffEntryImpl<T>::getEndianness() const {
return mEnd;
}
template<typename T>
uint32_t TiffEntryImpl<T>::getComparableValue() const {
return mTag;
}
template<typename T>
status_t TiffEntryImpl<T>::writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const {
assert((offset % TIFF_WORD_SIZE) == 0);
status_t ret = OK;
BAIL_ON_FAIL(out->write(&mTag, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mType, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mCount, 0, 1), ret);
uint32_t dataSize = getActualSize();
if (dataSize > OFFSET_SIZE) {
BAIL_ON_FAIL(out->write(&offset, 0, 1), ret);
} else {
uint32_t count = mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
/**
* Rationals are stored as an array of ints. Each
* rational is represented by 2 ints. To recover the
* size of the array here, multiply the count by 2.
*/
count <<= 1;
}
BAIL_ON_FAIL(out->write(&mData[0], 0, count), ret);
ZERO_TILL_WORD(out, dataSize, ret);
}
return ret;
}
template<typename T>
status_t TiffEntryImpl<T>::writeData(uint32_t /*offset*/, EndianOutput* out) const {
status_t ret = OK;
// Some tags have fixed-endian value output
Endianness tmp = UNDEFINED_ENDIAN;
if (mEnd != UNDEFINED_ENDIAN) {
tmp = out->getEndianness();
out->setEndianness(mEnd);
}
uint32_t count = mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
/**
* Rationals are stored as an array of ints. Each
* rational is represented by 2 ints. To recover the
* size of the array here, multiply the count by 2.
*/
count <<= 1;
}
BAIL_ON_FAIL(out->write(&mData[0], 0, count), ret);
if (mEnd != UNDEFINED_ENDIAN) {
out->setEndianness(tmp);
}
// Write to next word alignment
ZERO_TILL_WORD(out, sizeof(T) * count, ret);
return ret;
}
template<>
inline status_t TiffEntryImpl<sp<TiffIfd> >::writeTagInfo(uint32_t offset,
/*out*/EndianOutput* out) const {
assert((offset % TIFF_WORD_SIZE) == 0);
status_t ret = OK;
BAIL_ON_FAIL(out->write(&mTag, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mType, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mCount, 0, 1), ret);
BAIL_ON_FAIL(out->write(&offset, 0, 1), ret);
return ret;
}
template<>
inline uint32_t TiffEntryImpl<sp<TiffIfd> >::getActualSize() const {
uint32_t total = 0;
for (size_t i = 0; i < mData.size(); ++i) {
total += mData[i]->getSize();
}
return total;
}
template<>
inline status_t TiffEntryImpl<sp<TiffIfd> >::writeData(uint32_t offset, EndianOutput* out) const {
status_t ret = OK;
for (uint32_t i = 0; i < mCount; ++i) {
BAIL_ON_FAIL(mData[i]->writeData(offset, out), ret);
offset += mData[i]->getSize();
}
return ret;
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_ENTRY_IMPL*/

@ -0,0 +1,132 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_HELPERS_H
#define IMG_UTILS_TIFF_HELPERS_H
#include <stdint.h>
namespace android {
namespace img_utils {
const uint8_t ZERO_WORD[] = {0, 0, 0, 0};
#define BAIL_ON_FAIL(x, flag) \
if (((flag) = (x)) != OK) return flag;
#define BYTES_TILL_WORD(index) \
((TIFF_WORD_SIZE - ((index) % TIFF_WORD_SIZE)) % TIFF_WORD_SIZE)
#define WORD_ALIGN(count) \
count += BYTES_TILL_WORD(count);
#define ZERO_TILL_WORD(output, index, ret) \
{ \
size_t remaining = BYTES_TILL_WORD(index); \
if (remaining > 0) { \
BAIL_ON_FAIL((output)->write(ZERO_WORD, 0, remaining), ret); \
} \
}
/**
* Basic TIFF header constants.
*/
enum {
BAD_OFFSET = 0,
TIFF_WORD_SIZE = 4, // Size in bytes
IFD_HEADER_SIZE = 2, // Size in bytes
IFD_FOOTER_SIZE = 4, // Size in bytes
TIFF_ENTRY_SIZE = 12, // Size in bytes
MAX_IFD_ENTRIES = UINT16_MAX,
FILE_HEADER_SIZE = 8, // Size in bytes
ENDIAN_MARKER_SIZE = 2, // Size in bytes
TIFF_MARKER_SIZE = 2, // Size in bytes
OFFSET_MARKER_SIZE = 4, // Size in bytes
TIFF_FILE_MARKER = 42,
BIG_ENDIAN_MARKER = 0x4D4Du,
LITTLE_ENDIAN_MARKER = 0x4949u
};
/**
* Constants for the TIFF tag types.
*/
enum TagType {
UNKNOWN_TAGTYPE = 0,
BYTE=1,
ASCII,
SHORT,
LONG,
RATIONAL,
SBYTE,
UNDEFINED,
SSHORT,
SLONG,
SRATIONAL,
FLOAT,
DOUBLE
};
/**
* Sizes of the TIFF entry fields (in bytes).
*/
enum {
TAG_SIZE = 2,
TYPE_SIZE = 2,
COUNT_SIZE = 4,
OFFSET_SIZE = 4
};
/**
* Convenience IFD id constants.
*/
enum {
IFD_0 = 0,
RAW_IFD,
PROFILE_IFD,
PREVIEW_IFD
};
inline size_t getTypeSize(TagType type) {
switch(type) {
case UNDEFINED:
case ASCII:
case BYTE:
case SBYTE:
return 1;
case SHORT:
case SSHORT:
return 2;
case LONG:
case SLONG:
case FLOAT:
return 4;
case RATIONAL:
case SRATIONAL:
case DOUBLE:
return 8;
default:
return 0;
}
}
inline uint32_t calculateIfdSize(size_t numberOfEntries) {
return IFD_HEADER_SIZE + IFD_FOOTER_SIZE + TIFF_ENTRY_SIZE * numberOfEntries;
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_HELPERS_H*/

@ -0,0 +1,164 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_IFD_H
#define IMG_UTILS_TIFF_IFD_H
#include <img_utils/TiffWritable.h>
#include <img_utils/TiffEntry.h>
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/StrongPointer.h>
#include <stdint.h>
#include <map>
namespace android {
namespace img_utils {
/**
* This class holds a single TIFF Image File Directory (IFD) structure.
*
* This maps to the TIFF IFD structure that is logically composed of:
* - A 2-byte field listing the number of entries.
* - A list of 12-byte TIFF entries.
* - A 4-byte offset to the next IFD.
*/
class ANDROID_API TiffIfd : public TiffWritable {
public:
explicit TiffIfd(uint32_t ifdId);
virtual ~TiffIfd();
/**
* Add a TiffEntry to this IFD or replace an existing entry with the
* same tag ID. No validation is done.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t addEntry(const sp<TiffEntry>& entry);
/**
* Set the pointer to the next IFD. This is used to create a linked
* list of IFDs as defined by the TIFF 6.0 spec., and is not included
* when calculating the size of IFD and entries for the getSize()
* method (unlike SubIFDs).
*/
virtual void setNextIfd(const sp<TiffIfd>& ifd);
/**
* Get the pointer to the next IFD, or NULL if none exists.
*/
virtual sp<TiffIfd> getNextIfd() const;
/**
* Write the IFD data. This includes the IFD header, entries, footer,
* and the corresponding values for each entry (recursively including
* sub-IFDs). The written amount should end on a word boundary, and
* the given offset should be word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const;
/**
* Get the size of the IFD. This includes the IFD header, entries, footer,
* and the corresponding values for each entry (recursively including
* any sub-IFDs).
*/
virtual size_t getSize() const;
/**
* Get the id of this IFD.
*/
virtual uint32_t getId() const;
/**
* Get an entry with the given tag ID.
*
* Returns a strong pointer to the entry if it exists, or an empty strong
* pointer.
*/
virtual sp<TiffEntry> getEntry(uint16_t tag) const;
/**
* Remove the entry with the given tag ID if it exists.
*/
virtual void removeEntry(uint16_t tag);
/**
* Convenience method to validate and set strip-related image tags.
*
* This sets all strip related tags, but leaves offset values unitialized.
* setStripOffsets must be called with the desired offset before writing.
* The strip tag values are calculated from the existing tags for image
* dimensions and pixel type set in the IFD.
*
* Does not handle planar image configurations (PlanarConfiguration != 1).
*
* Returns OK on success, or a negative error code.
*/
virtual status_t validateAndSetStripTags();
/**
* Returns true if validateAndSetStripTags has been called, but not setStripOffsets.
*/
virtual bool uninitializedOffsets() const;
/**
* Convenience method to set beginning offset for strips.
*
* Call this to update the strip offsets before calling writeData.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t setStripOffset(uint32_t offset);
/**
* Get the total size of the strips in bytes.
*
* This sums the byte count at each strip offset, and returns
* the total count of bytes stored in strips for this IFD.
*/
virtual uint32_t getStripSize() const;
/**
* Get a formatted string representing this IFD.
*/
virtual std::string toString() const;
/**
* Print a formatted string representing this IFD to logcat.
*/
void log() const;
/**
* Get value used to determine sort order.
*/
virtual uint32_t getComparableValue() const;
protected:
virtual uint32_t checkAndGetOffset(uint32_t offset) const;
std::map<uint16_t, sp<TiffEntry> > mEntries;
sp<TiffIfd> mNextIfd;
uint32_t mIfdId;
bool mStripOffsetsInitialized;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_IFD_H*/

@ -0,0 +1,60 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_WRITABLE
#define IMG_UTILS_TIFF_WRITABLE
#include <img_utils/Orderable.h>
#include <img_utils/EndianUtils.h>
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* TiffWritable subclasses represent TIFF metadata objects that can be written
* to an EndianOutput object. This is used for TIFF entries and IFDs.
*/
class ANDROID_API TiffWritable : public Orderable, public LightRefBase<TiffWritable> {
public:
TiffWritable();
virtual ~TiffWritable();
/**
* Write the data to the output. The given offset is used to calculate
* the header offset for values written. The offset is defined
* relative to the beginning of the TIFF header, and is word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const = 0;
/**
* Get the size of the data to write.
*/
virtual size_t getSize() const = 0;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_WRITABLE*/

@ -0,0 +1,328 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_WRITER_H
#define IMG_UTILS_TIFF_WRITER_H
#include <img_utils/EndianUtils.h>
#include <img_utils/StripSource.h>
#include <img_utils/TiffEntryImpl.h>
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffIfd.h>
#include <utils/Log.h>
#include <utils/Errors.h>
#include <utils/StrongPointer.h>
#include <cutils/compiler.h>
#include <stdint.h>
#include <vector>
#include <map>
namespace android {
namespace img_utils {
class TiffEntry;
class TiffIfd;
class Output;
/**
* This class holds a collection of TIFF IFDs that can be written as a
* complete DNG file header.
*
* This maps to the TIFF header structure that is logically composed of:
* - An 8-byte file header containing an endianness indicator, the TIFF
* file marker, and the offset to the first IFD.
* - A list of TIFF IFD structures.
*/
class ANDROID_API TiffWriter : public LightRefBase<TiffWriter> {
public:
enum SubIfdType {
SUBIFD = 0,
GPSINFO
};
/**
* Constructs a TiffWriter with the default tag mappings. This enables
* all of the tags defined in TagDefinitions.h, and uses the following
* mapping precedence to resolve collisions:
* (highest precedence) TIFF/EP > DNG > EXIF 2.3 > TIFF 6.0
*/
TiffWriter();
/**
* Constructs a TiffWriter with the given tag mappings. The mapping
* precedence will be in the order that the definition maps are given,
* where the lower index map gets precedence.
*
* This can be used with user-defined definitions, or definitions form
* TagDefinitions.h
*
* The enabledDefinitions mapping object is owned by the caller, and must
* stay alive for the lifespan of the constructed TiffWriter object.
*/
TiffWriter(std::map<uint16_t, const TagDefinition_t*>* enabledDefinitions,
size_t length);
virtual ~TiffWriter();
/**
* Write a TIFF header containing each IFD set. This will recursively
* write all SubIFDs and tags.
*
* Any StripSources passed in will be written to the output as image strips
* at the appropriate offests. The StripByteCounts, RowsPerStrip, and
* StripOffsets tags must be set to use this. To set these tags in a
* given IFD, use the addStrip method.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t write(Output* out, StripSource** sources, size_t sourcesCount,
Endianness end = LITTLE);
/**
* Write a TIFF header containing each IFD set. This will recursively
* write all SubIFDs and tags.
*
* Image data for strips or tiles must be written separately at the
* appropriate offsets. These offsets must not fall within the file
* header written this way. The size of the header written is given
* by the getTotalSize() method.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t write(Output* out, Endianness end = LITTLE);
/**
* Get the total size in bytes of the TIFF header. This includes all
* IFDs, tags, and values set for this TiffWriter.
*/
virtual uint32_t getTotalSize() const;
/**
* Add an entry to the IFD with the given ID.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
* - NAME_NOT_FOUND - No ifd exists with the given ID.
*/
virtual status_t addEntry(const sp<TiffEntry>& entry, uint32_t ifd);
/**
* Build an entry for a known tag and add it to the IFD with the given ID.
* This tag must be defined in one of the definition vectors this TIFF writer
* was constructed with. The count and type are validated.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
* - NAME_NOT_FOUND - No ifd exists with the given ID.
*/
template<typename T>
status_t addEntry(uint16_t tag, uint32_t count, const T* data, uint32_t ifd);
/**
* Build an entry for a known tag. This tag must be one of the tags
* defined in one of the definition vectors this TIFF writer was constructed
* with. The count and type are validated. If this succeeds, the resulting
* entry will be placed in the outEntry pointer.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
*/
template<typename T>
status_t buildEntry(uint16_t tag, uint32_t count, const T* data,
/*out*/sp<TiffEntry>* outEntry) const;
/**
* Convenience function to set the strip related tags for a given IFD.
*
* Call this before using a StripSource as an input to write.
* The following tags must be set before calling this method:
* - ImageWidth
* - ImageLength
* - SamplesPerPixel
* - BitsPerSample
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addStrip(uint32_t ifd);
/**
* Return the TIFF entry with the given tag ID in the IFD with the given ID,
* or an empty pointer if none exists.
*/
virtual sp<TiffEntry> getEntry(uint16_t tag, uint32_t ifd) const;
/**
* Remove the TIFF entry with the given tag ID in the given IFD if it exists.
*/
virtual void removeEntry(uint16_t tag, uint32_t ifd);
/**
* Create an empty IFD with the given ID and add it to the end of the
* list of IFDs.
*/
virtual status_t addIfd(uint32_t ifd);
/**
* Create an empty IFD with the given ID and add it as a SubIfd of the
* parent IFD.
*/
virtual status_t addSubIfd(uint32_t parentIfd, uint32_t ifd, SubIfdType type = SUBIFD);
/**
* Returns the default type for the given tag ID.
*/
virtual TagType getDefaultType(uint16_t tag) const;
/**
* Returns the default count for a given tag ID, or 0 if this
* tag normally has a variable count.
*/
virtual uint32_t getDefaultCount(uint16_t tag) const;
/**
* Returns true if an IFD with the given ID exists.
*/
virtual bool hasIfd(uint32_t ifd) const;
/**
* Returns true if a definition exist for the given tag ID.
*/
virtual bool checkIfDefined(uint16_t tag) const;
/**
* Returns the name of the tag if a definition exists for the given tag
* ID, or null if no definition exists.
*/
virtual const char* getTagName(uint16_t tag) const;
/**
* Print the currently configured IFDs and entries to logcat.
*/
virtual void log() const;
/**
* Build an entry. No validation is done.
*
* WARNING: Using this method can result in creating poorly formatted
* TIFF files.
*
* Returns a TiffEntry with the given tag, type, count, endianness,
* and data.
*/
template<typename T>
static sp<TiffEntry> uncheckedBuildEntry(uint16_t tag, TagType type,
uint32_t count, Endianness end, const T* data);
/**
* Utility function to build atag-to-definition mapping from a given
* array of tag definitions.
*/
#if 0
static KeyedVector<uint16_t, const TagDefinition_t*> buildTagMap(
const TagDefinition_t* definitions, size_t length);
#endif
protected:
enum {
DEFAULT_NUM_TAG_MAPS = 4,
};
sp<TiffIfd> findLastIfd();
status_t writeFileHeader(EndianOutput& out);
const TagDefinition_t* lookupDefinition(uint16_t tag) const;
status_t calculateOffsets();
sp<TiffIfd> mIfd;
std::map<uint32_t, sp<TiffIfd> > mNamedIfds;
std::vector<std::map<uint16_t, const TagDefinition_t*> > mTagMaps;
size_t mNumTagMaps;
#if 0
static KeyedVector<uint16_t, const TagDefinition_t*> sTagMaps[];
#endif
};
template<typename T>
status_t TiffWriter::buildEntry(uint16_t tag, uint32_t count, const T* data,
/*out*/sp<TiffEntry>* outEntry) const {
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
ALOGE("%s: No such tag exists for id %x.", __FUNCTION__, tag);
return BAD_INDEX;
}
uint32_t fixedCount = definition->fixedCount;
if (fixedCount > 0 && fixedCount != count) {
ALOGE("%s: Invalid count %d for tag %x (expects %d).", __FUNCTION__, count, tag,
fixedCount);
return BAD_VALUE;
}
TagType fixedType = definition->defaultType;
if (TiffEntry::forceValidType(fixedType, data) == NULL) {
ALOGE("%s: Invalid type used for tag value for tag %x.", __FUNCTION__, tag);
return BAD_TYPE;
}
*outEntry = new TiffEntryImpl<T>(tag, fixedType, count,
definition->fixedEndian, data);
return OK;
}
template<typename T>
status_t TiffWriter::addEntry(uint16_t tag, uint32_t count, const T* data, uint32_t ifd) {
sp<TiffEntry> outEntry;
status_t ret = buildEntry<T>(tag, count, data, &outEntry);
if (ret != OK) {
ALOGE("%s: Could not build entry for tag %x.", __FUNCTION__, tag);
return ret;
}
return addEntry(outEntry, ifd);
}
template<typename T>
sp<TiffEntry> TiffWriter::uncheckedBuildEntry(uint16_t tag, TagType type, uint32_t count,
Endianness end, const T* data) {
TiffEntryImpl<T>* entry = new TiffEntryImpl<T>(tag, type, count, end, data);
return sp<TiffEntry>(entry);
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_WRITER_H*/

@ -0,0 +1,54 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/ByteArrayOutput.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
ByteArrayOutput::ByteArrayOutput() {}
ByteArrayOutput::~ByteArrayOutput() {}
status_t ByteArrayOutput::open() {
return OK;
}
status_t ByteArrayOutput::write(const uint8_t* buf, size_t offset, size_t count) {
if (mByteArray.insert(mByteArray.end(), buf + offset, buf + offset + count) == mByteArray.end()) {
ALOGE("%s: Failed to write to ByteArrayOutput.", __FUNCTION__);
return BAD_VALUE;
}
return OK;
}
status_t ByteArrayOutput::close() {
mByteArray.clear();
return OK;
}
size_t ByteArrayOutput::getSize() const {
return mByteArray.size();
}
const uint8_t* ByteArrayOutput::getArray() const {
return &mByteArray[0];
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,496 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/DngUtils.h>
#include <inttypes.h>
#include <algorithm>
#include <vector>
#include <math.h>
namespace android {
namespace img_utils {
OpcodeListBuilder::OpcodeListBuilder() : mCount(0), mOpList(), mEndianOut(&mOpList, BIG) {
if(mEndianOut.open() != OK) {
ALOGE("%s: Open failed.", __FUNCTION__);
}
}
OpcodeListBuilder::~OpcodeListBuilder() {
if(mEndianOut.close() != OK) {
ALOGE("%s: Close failed.", __FUNCTION__);
}
}
size_t OpcodeListBuilder::getSize() const {
return mOpList.getSize() + sizeof(mCount);
}
uint32_t OpcodeListBuilder::getCount() const {
return mCount;
}
status_t OpcodeListBuilder::buildOpList(uint8_t* buf) const {
uint32_t count = convertToBigEndian(mCount);
memcpy(buf, &count, sizeof(count));
memcpy(buf + sizeof(count), mOpList.getArray(), mOpList.getSize());
return OK;
}
status_t OpcodeListBuilder::addGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaTop,
uint32_t activeAreaLeft,
uint32_t activeAreaBottom,
uint32_t activeAreaRight,
CfaLayout cfa,
const float* lensShadingMap) {
status_t err = OK;
uint32_t activeAreaWidth = activeAreaRight - activeAreaLeft;
uint32_t activeAreaHeight = activeAreaBottom - activeAreaTop;
switch (cfa) {
case CFA_RGGB:
case CFA_GRBG:
case CFA_GBRG:
case CFA_BGGR:
err = addBayerGainMapsForMetadata(lsmWidth, lsmHeight, activeAreaWidth,
activeAreaHeight, cfa, lensShadingMap);
break;
case CFA_NONE:
err = addMonochromeGainMapsForMetadata(lsmWidth, lsmHeight, activeAreaWidth,
activeAreaHeight, lensShadingMap);
break;
default:
ALOGE("%s: Unknown CFA layout %d", __FUNCTION__, cfa);
err = BAD_VALUE;
break;
}
return err;
}
status_t OpcodeListBuilder::addBayerGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
CfaLayout cfa,
const float* lensShadingMap) {
uint32_t redTop = 0;
uint32_t redLeft = 0;
uint32_t greenEvenTop = 0;
uint32_t greenEvenLeft = 1;
uint32_t greenOddTop = 1;
uint32_t greenOddLeft = 0;
uint32_t blueTop = 1;
uint32_t blueLeft = 1;
switch(cfa) {
case CFA_RGGB:
redTop = 0;
redLeft = 0;
greenEvenTop = 0;
greenEvenLeft = 1;
greenOddTop = 1;
greenOddLeft = 0;
blueTop = 1;
blueLeft = 1;
break;
case CFA_GRBG:
redTop = 0;
redLeft = 1;
greenEvenTop = 0;
greenEvenLeft = 0;
greenOddTop = 1;
greenOddLeft = 1;
blueTop = 1;
blueLeft = 0;
break;
case CFA_GBRG:
redTop = 1;
redLeft = 0;
greenEvenTop = 0;
greenEvenLeft = 0;
greenOddTop = 1;
greenOddLeft = 1;
blueTop = 0;
blueLeft = 1;
break;
case CFA_BGGR:
redTop = 1;
redLeft = 1;
greenEvenTop = 0;
greenEvenLeft = 1;
greenOddTop = 1;
greenOddLeft = 0;
blueTop = 0;
blueLeft = 0;
break;
default:
ALOGE("%s: Unknown CFA layout %d", __FUNCTION__, cfa);
return BAD_VALUE;
}
std::vector<float> redMapVector(lsmWidth * lsmHeight);
float *redMap = redMapVector.data();
std::vector<float> greenEvenMapVector(lsmWidth * lsmHeight);
float *greenEvenMap = greenEvenMapVector.data();
std::vector<float> greenOddMapVector(lsmWidth * lsmHeight);
float *greenOddMap = greenOddMapVector.data();
std::vector<float> blueMapVector(lsmWidth * lsmHeight);
float *blueMap = blueMapVector.data();
double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
size_t lsmMapSize = lsmWidth * lsmHeight * 4;
// Split lens shading map channels into separate arrays
size_t j = 0;
for (size_t i = 0; i < lsmMapSize; i += 4, ++j) {
redMap[j] = lensShadingMap[i + LSM_R_IND];
greenEvenMap[j] = lensShadingMap[i + LSM_GE_IND];
greenOddMap[j] = lensShadingMap[i + LSM_GO_IND];
blueMap[j] = lensShadingMap[i + LSM_B_IND];
}
status_t err = addGainMap(/*top*/redTop,
/*left*/redLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/redMap);
if (err != OK) return err;
err = addGainMap(/*top*/greenEvenTop,
/*left*/greenEvenLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/greenEvenMap);
if (err != OK) return err;
err = addGainMap(/*top*/greenOddTop,
/*left*/greenOddLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/greenOddMap);
if (err != OK) return err;
err = addGainMap(/*top*/blueTop,
/*left*/blueLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/blueMap);
return err;
}
status_t OpcodeListBuilder::addMonochromeGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
const float* lensShadingMap) {
std::vector<float> mapVector(lsmWidth * lsmHeight);
float *map = mapVector.data();
double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
size_t lsmMapSize = lsmWidth * lsmHeight * 4;
// Split lens shading map channels into separate arrays
size_t j = 0;
for (size_t i = 0; i < lsmMapSize; i += 4, ++j) {
map[j] = lensShadingMap[i];
}
status_t err = addGainMap(/*top*/0,
/*left*/0,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/1,
/*colPitch*/1,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/map);
if (err != OK) return err;
return err;
}
status_t OpcodeListBuilder::addGainMap(uint32_t top,
uint32_t left,
uint32_t bottom,
uint32_t right,
uint32_t plane,
uint32_t planes,
uint32_t rowPitch,
uint32_t colPitch,
uint32_t mapPointsV,
uint32_t mapPointsH,
double mapSpacingV,
double mapSpacingH,
double mapOriginV,
double mapOriginH,
uint32_t mapPlanes,
const float* mapGains) {
status_t err = addOpcodePreamble(GAIN_MAP_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUMBER_INT_ARGS = 11;
const uint32_t NUMBER_DOUBLE_ARGS = 4;
uint32_t totalSize = NUMBER_INT_ARGS * sizeof(uint32_t) + NUMBER_DOUBLE_ARGS * sizeof(double) +
mapPointsV * mapPointsH * mapPlanes * sizeof(float);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
// Batch writes as much as possible
uint32_t settings1[] = { top,
left,
bottom,
right,
plane,
planes,
rowPitch,
colPitch,
mapPointsV,
mapPointsH };
err = mEndianOut.write(settings1, 0, NELEMS(settings1));
if (err != OK) return err;
double settings2[] = { mapSpacingV,
mapSpacingH,
mapOriginV,
mapOriginH };
err = mEndianOut.write(settings2, 0, NELEMS(settings2));
if (err != OK) return err;
err = mEndianOut.write(&mapPlanes, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(mapGains, 0, mapPointsV * mapPointsH * mapPlanes);
if (err != OK) return err;
mCount++;
return OK;
}
status_t OpcodeListBuilder::addWarpRectilinearForMetadata(const float* kCoeffs,
uint32_t activeArrayWidth,
uint32_t activeArrayHeight,
float opticalCenterX,
float opticalCenterY) {
if (activeArrayWidth <= 1 || activeArrayHeight <= 1) {
ALOGE("%s: Cannot add opcode for active array with dimensions w=%" PRIu32 ", h=%" PRIu32,
__FUNCTION__, activeArrayWidth, activeArrayHeight);
return BAD_VALUE;
}
double normalizedOCX = opticalCenterX / static_cast<double>(activeArrayWidth);
double normalizedOCY = opticalCenterY / static_cast<double>(activeArrayHeight);
normalizedOCX = CLAMP(normalizedOCX, 0, 1);
normalizedOCY = CLAMP(normalizedOCY, 0, 1);
double coeffs[6] = {
kCoeffs[0],
kCoeffs[1],
kCoeffs[2],
kCoeffs[3],
kCoeffs[4],
kCoeffs[5]
};
return addWarpRectilinear(/*numPlanes*/1,
/*opticalCenterX*/normalizedOCX,
/*opticalCenterY*/normalizedOCY,
coeffs);
}
status_t OpcodeListBuilder::addWarpRectilinear(uint32_t numPlanes,
double opticalCenterX,
double opticalCenterY,
const double* kCoeffs) {
status_t err = addOpcodePreamble(WARP_RECTILINEAR_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUMBER_CENTER_ARGS = 2;
const uint32_t NUMBER_COEFFS = numPlanes * 6;
uint32_t totalSize = (NUMBER_CENTER_ARGS + NUMBER_COEFFS) * sizeof(double) + sizeof(uint32_t);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&numPlanes, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(kCoeffs, 0, NUMBER_COEFFS);
if (err != OK) return err;
err = mEndianOut.write(&opticalCenterX, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&opticalCenterY, 0, 1);
if (err != OK) return err;
mCount++;
return OK;
}
status_t OpcodeListBuilder::addBadPixelListForMetadata(const uint32_t* hotPixels,
uint32_t xyPairCount,
uint32_t colorFilterArrangement) {
if (colorFilterArrangement > 3) {
ALOGE("%s: Unknown color filter arrangement %" PRIu32, __FUNCTION__,
colorFilterArrangement);
return BAD_VALUE;
}
return addBadPixelList(colorFilterArrangement, xyPairCount, 0, hotPixels, nullptr);
}
status_t OpcodeListBuilder::addBadPixelList(uint32_t bayerPhase,
uint32_t badPointCount,
uint32_t badRectCount,
const uint32_t* badPointRowColPairs,
const uint32_t* badRectTopLeftBottomRightTuples) {
status_t err = addOpcodePreamble(FIX_BAD_PIXELS_LIST);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUM_NON_VARLEN_FIELDS = 3;
const uint32_t SIZE_OF_POINT = 2;
const uint32_t SIZE_OF_RECT = 4;
uint32_t totalSize = (NUM_NON_VARLEN_FIELDS + badPointCount * SIZE_OF_POINT +
badRectCount * SIZE_OF_RECT) * sizeof(uint32_t);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&bayerPhase, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&badPointCount, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&badRectCount, 0, 1);
if (err != OK) return err;
if (badPointCount > 0) {
err = mEndianOut.write(badPointRowColPairs, 0, SIZE_OF_POINT * badPointCount);
if (err != OK) return err;
}
if (badRectCount > 0) {
err = mEndianOut.write(badRectTopLeftBottomRightTuples, 0, SIZE_OF_RECT * badRectCount);
if (err != OK) return err;
}
mCount++;
return OK;
}
status_t OpcodeListBuilder::addOpcodePreamble(uint32_t opcodeId) {
status_t err = mEndianOut.write(&opcodeId, 0, 1);
if (err != OK) return err;
uint8_t version[] = {1, 3, 0, 0};
err = mEndianOut.write(version, 0, NELEMS(version));
if (err != OK) return err;
return OK;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,83 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/EndianUtils.h>
namespace android {
namespace img_utils {
EndianOutput::EndianOutput(Output* out, Endianness end)
: mOffset(0), mOutput(out), mEndian(end) {}
EndianOutput::~EndianOutput() {}
status_t EndianOutput::open() {
mOffset = 0;
return mOutput->open();
}
status_t EndianOutput::close() {
return mOutput->close();
}
void EndianOutput::setEndianness(Endianness end) {
mEndian = end;
}
uint32_t EndianOutput::getCurrentOffset() const {
return mOffset;
}
Endianness EndianOutput::getEndianness() const {
return mEndian;
}
status_t EndianOutput::write(const uint8_t* buf, size_t offset, size_t count) {
status_t res = OK;
if((res = mOutput->write(buf, offset, count)) == OK) {
mOffset += count;
}
return res;
}
status_t EndianOutput::write(const int8_t* buf, size_t offset, size_t count) {
return write(reinterpret_cast<const uint8_t*>(buf), offset, count);
}
#define DEFINE_WRITE(_type_) \
status_t EndianOutput::write(const _type_* buf, size_t offset, size_t count) { \
return writeHelper<_type_>(buf, offset, count); \
}
DEFINE_WRITE(uint16_t)
DEFINE_WRITE(int16_t)
DEFINE_WRITE(uint32_t)
DEFINE_WRITE(int32_t)
DEFINE_WRITE(uint64_t)
DEFINE_WRITE(int64_t)
status_t EndianOutput::write(const float* buf, size_t offset, size_t count) {
assert(sizeof(float) == sizeof(uint32_t));
return writeHelper<uint32_t>(reinterpret_cast<const uint32_t*>(buf), offset, count);
}
status_t EndianOutput::write(const double* buf, size_t offset, size_t count) {
assert(sizeof(double) == sizeof(uint64_t));
return writeHelper<uint64_t>(reinterpret_cast<const uint64_t*>(buf), offset, count);
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,85 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/FileInput.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
FileInput::FileInput(String8 path) : mFp(NULL), mPath(path), mOpen(false) {}
FileInput::~FileInput() {
if (mOpen) {
ALOGE("%s: FileInput destroyed without calling close!", __FUNCTION__);
close();
}
}
status_t FileInput::open() {
if (mOpen) {
ALOGW("%s: Open called when file %s already open.", __FUNCTION__, mPath.string());
return OK;
}
mFp = ::fopen(mPath, "rb");
if (!mFp) {
ALOGE("%s: Could not open file %s", __FUNCTION__, mPath.string());
return BAD_VALUE;
}
mOpen = true;
return OK;
}
ssize_t FileInput::read(uint8_t* buf, size_t offset, size_t count) {
if (!mOpen) {
ALOGE("%s: Could not read file %s, file not open.", __FUNCTION__, mPath.string());
return BAD_VALUE;
}
size_t bytesRead = ::fread(buf + offset, sizeof(uint8_t), count, mFp);
int error = ::ferror(mFp);
if (error != 0) {
ALOGE("%s: Error %d occurred while reading file %s.", __FUNCTION__, error, mPath.string());
return BAD_VALUE;
}
// End of file reached
if (::feof(mFp) != 0 && bytesRead == 0) {
return NOT_ENOUGH_DATA;
}
return bytesRead;
}
status_t FileInput::close() {
if(!mOpen) {
ALOGW("%s: Close called when file %s already close.", __FUNCTION__, mPath.string());
return OK;
}
status_t ret = OK;
if(::fclose(mFp) != 0) {
ALOGE("%s: Failed to close file %s.", __FUNCTION__, mPath.string());
ret = BAD_VALUE;
}
mOpen = false;
return ret;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,79 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/FileOutput.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
FileOutput::FileOutput(String8 path) : mFp(NULL), mPath(path), mOpen(false) {}
FileOutput::~FileOutput() {
if (mOpen) {
ALOGW("%s: Destructor called with %s still open.", __FUNCTION__, mPath.string());
close();
}
}
status_t FileOutput::open() {
if (mOpen) {
ALOGW("%s: Open called when file %s already open.", __FUNCTION__, mPath.string());
return OK;
}
mFp = ::fopen(mPath, "wb");
if (!mFp) {
ALOGE("%s: Could not open file %s", __FUNCTION__, mPath.string());
return BAD_VALUE;
}
mOpen = true;
return OK;
}
status_t FileOutput::write(const uint8_t* buf, size_t offset, size_t count) {
if (!mOpen) {
ALOGE("%s: Could not write file %s, file not open.", __FUNCTION__, mPath.string());
return BAD_VALUE;
}
::fwrite(buf + offset, sizeof(uint8_t), count, mFp);
int error = ::ferror(mFp);
if (error != 0) {
ALOGE("%s: Error %d occurred while writing file %s.", __FUNCTION__, error, mPath.string());
return BAD_VALUE;
}
return OK;
}
status_t FileOutput::close() {
if(!mOpen) {
ALOGW("%s: Close called when file %s already close.", __FUNCTION__, mPath.string());
return OK;
}
status_t ret = OK;
if(::fclose(mFp) != 0) {
ALOGE("%s: Failed to close file %s.", __FUNCTION__, mPath.string());
ret = BAD_VALUE;
}
mOpen = false;
return ret;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,57 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/Input.h>
namespace android {
namespace img_utils {
Input::~Input() {}
status_t Input::open() { return OK; }
status_t Input::close() { return OK; }
ssize_t Input::skip(size_t count) {
const size_t SKIP_BUF_SIZE = 1024;
uint8_t skipBuf[SKIP_BUF_SIZE];
size_t remaining = count;
while (remaining > 0) {
size_t amt = (SKIP_BUF_SIZE > remaining) ? remaining : SKIP_BUF_SIZE;
ssize_t ret = read(skipBuf, 0, amt);
if (ret < 0) {
if(ret == NOT_ENOUGH_DATA) {
// End of file encountered
if (remaining == count) {
// Read no bytes, return EOF
return NOT_ENOUGH_DATA;
} else {
// Return num bytes read
return count - remaining;
}
}
// Return error code.
return ret;
}
remaining -= ret;
}
return count;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,39 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/Orderable.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
#define COMPARE(op) \
bool Orderable::operator op (const Orderable& orderable) const { \
return getComparableValue() op orderable.getComparableValue(); \
}
COMPARE(>)
COMPARE(<)
COMPARE(>=)
COMPARE(<=)
COMPARE(==)
COMPARE(!=)
Orderable::~Orderable() {}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,28 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/Output.h>
namespace android {
namespace img_utils {
Output::~Output() {}
status_t Output::open() { return OK; }
status_t Output::close() { return OK; }
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,44 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/SortedEntryVector.h>
#include <utils/TypeHelpers.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
SortedEntryVector::~SortedEntryVector() {}
ssize_t SortedEntryVector::indexOfTag(uint16_t tag) const {
// TODO: Use binary search here.
for (size_t i = 0; i < size(); ++i) {
if (itemAt(i)->getTag() == tag) {
return i;
}
}
return -1;
}
int SortedEntryVector::do_compare(const void* lhs, const void* rhs) const {
const sp<TiffEntry>* lEntry = reinterpret_cast<const sp<TiffEntry>*>(lhs);
const sp<TiffEntry>* rEntry = reinterpret_cast<const sp<TiffEntry>*>(rhs);
return compare_type(**lEntry, **rEntry);
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,25 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/StripSource.h>
namespace android {
namespace img_utils {
StripSource::~StripSource() {}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,251 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/TiffEntry.h>
#include <utils/Errors.h>
#include <utils/StrongPointer.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
TiffEntry::~TiffEntry() {}
/**
* Specialize for each valid type, including sub-IFDs.
*
* Values with types other than the ones given here should not compile.
*/
template<>
const sp<TiffIfd>* TiffEntry::forceValidType<sp<TiffIfd> >(TagType type, const sp<TiffIfd>* value) {
if (type == LONG) {
return value;
}
ALOGE("%s: Value of type 'ifd' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const uint8_t* TiffEntry::forceValidType<uint8_t>(TagType type, const uint8_t* value) {
if (type == BYTE || type == ASCII || type == UNDEFINED) {
return value;
}
ALOGE("%s: Value of type 'uint8_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const int8_t* TiffEntry::forceValidType<int8_t>(TagType type, const int8_t* value) {
if (type == SBYTE || type == ASCII || type == UNDEFINED) {
return value;
}
ALOGE("%s: Value of type 'int8_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const uint16_t* TiffEntry::forceValidType<uint16_t>(TagType type, const uint16_t* value) {
if (type == SHORT) {
return value;
}
ALOGE("%s: Value of type 'uint16_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const int16_t* TiffEntry::forceValidType<int16_t>(TagType type, const int16_t* value) {
if (type == SSHORT) {
return value;
}
ALOGE("%s: Value of type 'int16_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const uint32_t* TiffEntry::forceValidType<uint32_t>(TagType type, const uint32_t* value) {
if (type == LONG || type == RATIONAL) {
return value;
}
ALOGE("%s: Value of type 'uint32_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const int32_t* TiffEntry::forceValidType<int32_t>(TagType type, const int32_t* value) {
if (type == SLONG || type == SRATIONAL) {
return value;
}
ALOGE("%s: Value of type 'int32_t' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const double* TiffEntry::forceValidType<double>(TagType type, const double* value) {
if (type == DOUBLE) {
return value;
}
ALOGE("%s: Value of type 'double' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
template<>
const float* TiffEntry::forceValidType<float>(TagType type, const float* value) {
if (type == FLOAT) {
return value;
}
ALOGE("%s: Value of type 'float' is not valid for tag with TIFF type %d.",
__FUNCTION__, type);
return NULL;
}
std::string TiffEntry::toString() const {
std::string output;
uint32_t count = getCount();
char buf[256] = { 0 };
snprintf(buf, sizeof(buf), "[id: %x, type: %d, count: %u, value: '", getTag(), getType(), count);
output.append(buf);
size_t cappedCount = count;
if (count > MAX_PRINT_STRING_LENGTH) {
cappedCount = MAX_PRINT_STRING_LENGTH;
}
TagType type = getType();
switch (type) {
case UNDEFINED:
case BYTE: {
const uint8_t* typed_data = getData<uint8_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case ASCII: {
const char* typed_data = reinterpret_cast<const char*>(getData<uint8_t>());
size_t len = count;
if (count > MAX_PRINT_STRING_LENGTH) {
len = MAX_PRINT_STRING_LENGTH;
}
output.append(typed_data, len);
break;
}
case SHORT: {
const uint16_t* typed_data = getData<uint16_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case LONG: {
const uint32_t* typed_data = getData<uint32_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case RATIONAL: {
const uint32_t* typed_data = getData<uint32_t>();
cappedCount <<= 1;
for (size_t i = 0; i < cappedCount; i+=2) {
output.append(std::to_string(typed_data[i]));
output.append("/");
output.append(std::to_string(typed_data[i + 1]));
output.append(" ");
}
break;
}
case SBYTE: {
const int8_t* typed_data = getData<int8_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case SSHORT: {
const int16_t* typed_data = getData<int16_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case SLONG: {
const int32_t* typed_data = getData<int32_t>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case SRATIONAL: {
const int32_t* typed_data = getData<int32_t>();
cappedCount <<= 1;
for (size_t i = 0; i < cappedCount; i+=2) {
output.append(std::to_string(typed_data[i]));
output.append("/");
output.append(std::to_string(typed_data[i + 1]));
output.append(" ");
}
break;
}
case FLOAT: {
const float* typed_data = getData<float>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
case DOUBLE: {
const double* typed_data = getData<double>();
for (size_t i = 0; i < cappedCount; ++i) {
output.append(std::to_string(typed_data[i]));
output.append(" ");
}
break;
}
default: {
output.append("unknown type ");
break;
}
}
if (count > MAX_PRINT_STRING_LENGTH) {
output.append("...");
}
output.append("']");
return output;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,25 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/TiffEntryImpl.h>
// #include <utils/Vector.h>
namespace android {
namespace img_utils {
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,386 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "TiffIfd"
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffWriter.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
TiffIfd::TiffIfd(uint32_t ifdId)
: mNextIfd(), mIfdId(ifdId), mStripOffsetsInitialized(false) {}
TiffIfd::~TiffIfd() {}
status_t TiffIfd::addEntry(const sp<TiffEntry>& entry) {
size_t size = mEntries.size();
if (size >= MAX_IFD_ENTRIES) {
ALOGW("%s: Failed to add entry for tag 0x%x to IFD %u, too many entries in IFD!",
__FUNCTION__, entry->getTag(), mIfdId);
return BAD_INDEX;
}
mEntries[entry->getTag()] = entry;
return OK;
}
sp<TiffEntry> TiffIfd::getEntry(uint16_t tag) const {
auto it = mEntries.find(tag);
if (it == mEntries.cend()) {
ALOGW("%s: No entry for tag 0x%x in ifd %u.", __FUNCTION__, tag, mIfdId);
return NULL;
}
return it->second;
}
void TiffIfd::removeEntry(uint16_t tag) {
std::map<uint16_t, sp<TiffEntry> >::iterator it = mEntries.find(tag);
if (it != mEntries.end()) {
mEntries.erase(it);
}
}
void TiffIfd::setNextIfd(const sp<TiffIfd>& ifd) {
mNextIfd = ifd;
}
sp<TiffIfd> TiffIfd::getNextIfd() const {
return mNextIfd;
}
uint32_t TiffIfd::checkAndGetOffset(uint32_t offset) const {
size_t size = mEntries.size();
if (size > MAX_IFD_ENTRIES) {
ALOGW("%s: Could not calculate IFD offsets, IFD %u contains too many entries.",
__FUNCTION__, mIfdId);
return BAD_OFFSET;
}
if (size <= 0) {
ALOGW("%s: Could not calculate IFD offsets, IFD %u contains no entries.", __FUNCTION__,
mIfdId);
return BAD_OFFSET;
}
if (offset == BAD_OFFSET) {
ALOGW("%s: Could not calculate IFD offsets, IFD %u had a bad initial offset.",
__FUNCTION__, mIfdId);
return BAD_OFFSET;
}
uint32_t ifdSize = calculateIfdSize(size);
WORD_ALIGN(ifdSize);
return offset + ifdSize;
}
status_t TiffIfd::writeData(uint32_t offset, /*out*/EndianOutput* out) const {
assert((offset % TIFF_WORD_SIZE) == 0);
status_t ret = OK;
ALOGV("%s: IFD %u written to offset %u", __FUNCTION__, mIfdId, offset );
uint32_t valueOffset = checkAndGetOffset(offset);
if (valueOffset == 0) {
return BAD_VALUE;
}
size_t size = mEntries.size();
// Writer IFD header (2 bytes, number of entries).
uint16_t header = static_cast<uint16_t>(size);
BAIL_ON_FAIL(out->write(&header, 0, 1), ret);
// Write tag entries
for (auto it = mEntries.cbegin(); it != mEntries.cend(); ++it) {
BAIL_ON_FAIL(it->second->writeTagInfo(valueOffset, out), ret);
valueOffset += it->second->getSize();
}
// Writer IFD footer (4 bytes, offset to next IFD).
uint32_t footer = (mNextIfd != NULL) ? offset + getSize() : 0;
BAIL_ON_FAIL(out->write(&footer, 0, 1), ret);
assert(out->getCurrentOffset() == offset + calculateIfdSize(size));
// Write zeroes till word aligned
ZERO_TILL_WORD(out, calculateIfdSize(size), ret);
// Write values for each tag entry
for (auto it = mEntries.cbegin(); it != mEntries.cend(); ++it) {
size_t last = out->getCurrentOffset();
// Only write values that are too large to fit in the 12-byte TIFF entry
if (it->second->getSize() > OFFSET_SIZE) {
BAIL_ON_FAIL(it->second->writeData(out->getCurrentOffset(), out), ret);
}
size_t next = out->getCurrentOffset();
size_t diff = (next - last);
size_t actual = it->second->getSize();
if (diff != actual) {
ALOGW("Sizes do not match for tag %x. Expected %zu, received %zu",
it->first, actual, diff);
}
}
assert(out->getCurrentOffset() == offset + getSize());
return ret;
}
size_t TiffIfd::getSize() const {
size_t size = mEntries.size();
uint32_t total = calculateIfdSize(size);
WORD_ALIGN(total);
for (auto it = mEntries.cbegin(); it != mEntries.cend(); ++it) {
total += it->second->getSize();
}
return total;
}
uint32_t TiffIfd::getId() const {
return mIfdId;
}
uint32_t TiffIfd::getComparableValue() const {
return mIfdId;
}
status_t TiffIfd::validateAndSetStripTags() {
sp<TiffEntry> widthEntry = getEntry(TAG_IMAGEWIDTH);
if (widthEntry == NULL) {
ALOGE("%s: IFD %u doesn't have a ImageWidth tag set", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
sp<TiffEntry> heightEntry = getEntry(TAG_IMAGELENGTH);
if (heightEntry == NULL) {
ALOGE("%s: IFD %u doesn't have a ImageLength tag set", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
sp<TiffEntry> samplesEntry = getEntry(TAG_SAMPLESPERPIXEL);
if (samplesEntry == NULL) {
ALOGE("%s: IFD %u doesn't have a SamplesPerPixel tag set", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
sp<TiffEntry> bitsEntry = getEntry(TAG_BITSPERSAMPLE);
if (bitsEntry == NULL) {
ALOGE("%s: IFD %u doesn't have a BitsPerSample tag set", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
uint32_t width = *(widthEntry->getData<uint32_t>());
uint32_t height = *(heightEntry->getData<uint32_t>());
uint16_t bitsPerSample = *(bitsEntry->getData<uint16_t>());
uint16_t samplesPerPixel = *(samplesEntry->getData<uint16_t>());
if ((bitsPerSample % 8) != 0) {
ALOGE("%s: BitsPerSample %d in IFD %u is not byte-aligned.", __FUNCTION__,
bitsPerSample, mIfdId);
return BAD_VALUE;
}
uint32_t bytesPerSample = bitsPerSample / 8;
// Choose strip size as close to 8kb as possible without splitting rows.
// If the row length is >8kb, each strip will only contain a single row.
const uint32_t rowLengthBytes = bytesPerSample * samplesPerPixel * width;
const uint32_t idealChunkSize = (1 << 13); // 8kb
uint32_t rowsPerChunk = idealChunkSize / rowLengthBytes;
rowsPerChunk = (rowsPerChunk == 0) ? 1 : rowsPerChunk;
const uint32_t actualChunkSize = rowLengthBytes * rowsPerChunk;
const uint32_t lastChunkRows = height % rowsPerChunk;
const uint32_t lastChunkSize = lastChunkRows * rowLengthBytes;
if (actualChunkSize > /*max strip size for TIFF/EP*/65536) {
ALOGE("%s: Strip length too long.", __FUNCTION__);
return BAD_VALUE;
}
size_t numStrips = height / rowsPerChunk;
// Add another strip for the incomplete chunk.
if (lastChunkRows > 0) {
numStrips += 1;
}
// Put each row in it's own strip
uint32_t rowsPerStripVal = rowsPerChunk;
sp<TiffEntry> rowsPerStrip = TiffWriter::uncheckedBuildEntry(TAG_ROWSPERSTRIP, LONG, 1,
UNDEFINED_ENDIAN, &rowsPerStripVal);
if (rowsPerStrip == NULL) {
ALOGE("%s: Could not build entry for RowsPerStrip tag.", __FUNCTION__);
return BAD_VALUE;
}
std::vector<uint32_t> byteCounts;
byteCounts.reserve(numStrips);
for (size_t i = 0; i < numStrips; ++i) {
if (lastChunkRows > 0 && i == (numStrips - 1)) {
byteCounts.push_back(lastChunkSize);
} else {
byteCounts.push_back(actualChunkSize);
}
}
// Set byte counts for each strip
sp<TiffEntry> stripByteCounts = TiffWriter::uncheckedBuildEntry(TAG_STRIPBYTECOUNTS, LONG,
static_cast<uint32_t>(numStrips), UNDEFINED_ENDIAN, &byteCounts[0]);
if (stripByteCounts == NULL) {
ALOGE("%s: Could not build entry for StripByteCounts tag.", __FUNCTION__);
return BAD_VALUE;
}
std::vector<uint32_t> stripOffsetsVector;
stripOffsetsVector.resize(numStrips);
// Set uninitialized offsets
sp<TiffEntry> stripOffsets = TiffWriter::uncheckedBuildEntry(TAG_STRIPOFFSETS, LONG,
static_cast<uint32_t>(numStrips), UNDEFINED_ENDIAN, &stripOffsetsVector[0]);
if (stripOffsets == NULL) {
ALOGE("%s: Could not build entry for StripOffsets tag.", __FUNCTION__);
return BAD_VALUE;
}
if(addEntry(stripByteCounts) != OK) {
ALOGE("%s: Could not add entry for StripByteCounts to IFD %u", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
if(addEntry(rowsPerStrip) != OK) {
ALOGE("%s: Could not add entry for StripByteCounts to IFD %u", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
if(addEntry(stripOffsets) != OK) {
ALOGE("%s: Could not add entry for StripByteCounts to IFD %u", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
mStripOffsetsInitialized = true;
return OK;
}
bool TiffIfd::uninitializedOffsets() const {
return mStripOffsetsInitialized;
}
status_t TiffIfd::setStripOffset(uint32_t offset) {
// Get old offsets and bytecounts
sp<TiffEntry> oldOffsets = getEntry(TAG_STRIPOFFSETS);
if (oldOffsets == NULL) {
ALOGE("%s: IFD %u does not contain StripOffsets entry.", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
sp<TiffEntry> stripByteCounts = getEntry(TAG_STRIPBYTECOUNTS);
if (stripByteCounts == NULL) {
ALOGE("%s: IFD %u does not contain StripByteCounts entry.", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
uint32_t offsetsCount = oldOffsets->getCount();
uint32_t byteCount = stripByteCounts->getCount();
if (offsetsCount != byteCount) {
ALOGE("%s: StripOffsets count (%u) doesn't match StripByteCounts count (%u) in IFD %u",
__FUNCTION__, offsetsCount, byteCount, mIfdId);
return BAD_VALUE;
}
const uint32_t* stripByteCountsArray = stripByteCounts->getData<uint32_t>();
size_t numStrips = offsetsCount;
std::vector<uint32_t> stripOffsets;
stripOffsets.reserve(numStrips);
// Calculate updated byte offsets
for (size_t i = 0; i < numStrips; ++i) {
stripOffsets.push_back(offset);
offset += stripByteCountsArray[i];
}
sp<TiffEntry> newOffsets = TiffWriter::uncheckedBuildEntry(TAG_STRIPOFFSETS, LONG,
static_cast<uint32_t>(numStrips), UNDEFINED_ENDIAN, &stripOffsets[0]);
if (newOffsets == NULL) {
ALOGE("%s: Coult not build updated offsets entry in IFD %u", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
if (addEntry(newOffsets) != OK) {
ALOGE("%s: Failed to add updated offsets entry in IFD %u", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
return OK;
}
uint32_t TiffIfd::getStripSize() const {
sp<TiffEntry> stripByteCounts = getEntry(TAG_STRIPBYTECOUNTS);
if (stripByteCounts == NULL) {
ALOGE("%s: IFD %u does not contain StripByteCounts entry.", __FUNCTION__, mIfdId);
return BAD_VALUE;
}
uint32_t count = stripByteCounts->getCount();
const uint32_t* byteCounts = stripByteCounts->getData<uint32_t>();
uint32_t total = 0;
for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
total += byteCounts[i];
}
return total;
}
std::string TiffIfd::toString() const {
size_t s = mEntries.size();
std::string output;
char buf[1024] = { 0 };
snprintf(buf, sizeof(buf), "[ifd: %x, num_entries: %zu, entries:\n", getId(), s);
output.append(buf);
for(auto it = mEntries.cbegin(); it != mEntries.cend(); ++it) {
output.append("\t");
output.append(it->second->toString());
output.append("\n");
}
output.append(", next_ifd: %x]", ((mNextIfd != NULL) ? mNextIfd->getId() : 0));
return output;
}
void TiffIfd::log() const {
size_t s = mEntries.size();
ALOGI("[ifd: %x, num_entries: %zu, entries:\n", getId(), s);
for(auto it = mEntries.cbegin(); it != mEntries.cend(); ++it) {
ALOGI("\t%s", it->second->toString().c_str());
}
ALOGI(", next_ifd: %x]", ((mNextIfd != NULL) ? mNextIfd->getId() : 0));
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,31 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/TiffWritable.h>
#include <img_utils/TiffHelpers.h>
#include <assert.h>
namespace android {
namespace img_utils {
TiffWritable::TiffWritable() {}
TiffWritable::~TiffWritable() {}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,425 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "TiffWriter"
#include <img_utils/TiffHelpers.h>
#include <img_utils/TiffWriter.h>
#include <img_utils/TagDefinitions.h>
#include <assert.h>
namespace android {
namespace img_utils {
#if 0
KeyedVector<uint16_t, const TagDefinition_t*> TiffWriter::buildTagMap(
const TagDefinition_t* definitions, size_t length) {
KeyedVector<uint16_t, const TagDefinition_t*> map;
for(size_t i = 0; i < length; ++i) {
map.add(definitions[i].tagId, definitions + i);
}
return map;
}
#endif
#define COMPARE(op) \
bool Orderable::operator op (const Orderable& orderable) const { \
return getComparableValue() op orderable.getComparableValue(); \
}
#define ARRAY_SIZE(array) \
(sizeof(array) / sizeof((array)[0]))
#if 0
KeyedVector<uint16_t, const TagDefinition_t*> TiffWriter::sTagMaps[] = {
buildTagMap(TIFF_EP_TAG_DEFINITIONS, ARRAY_SIZE(TIFF_EP_TAG_DEFINITIONS)),
buildTagMap(DNG_TAG_DEFINITIONS, ARRAY_SIZE(DNG_TAG_DEFINITIONS)),
buildTagMap(EXIF_2_3_TAG_DEFINITIONS, ARRAY_SIZE(EXIF_2_3_TAG_DEFINITIONS)),
buildTagMap(TIFF_6_TAG_DEFINITIONS, ARRAY_SIZE(TIFF_6_TAG_DEFINITIONS))
};
#endif
TiffWriter::TiffWriter() : mNumTagMaps(DEFAULT_NUM_TAG_MAPS)
{
mTagMaps.reserve(DEFAULT_NUM_TAG_MAPS);
// = new KeyedVector<uint16_t, const TagDefinition_t*>[DEFAULT_NUM_TAG_MAPS];
std::vector<std::map<uint16_t, const TagDefinition_t*> >::iterator it = mTagMaps.insert(mTagMaps.end(), std::map<uint16_t, const TagDefinition_t*>());
for(size_t i = 0; i < ARRAY_SIZE(TIFF_EP_TAG_DEFINITIONS); ++i) {
(*it)[TIFF_EP_TAG_DEFINITIONS[i].tagId] = TIFF_EP_TAG_DEFINITIONS + i;
}
it = mTagMaps.insert(mTagMaps.end(), std::map<uint16_t, const TagDefinition_t*>());
for(size_t i = 0; i < ARRAY_SIZE(DNG_TAG_DEFINITIONS); ++i) {
(*it)[DNG_TAG_DEFINITIONS[i].tagId] = DNG_TAG_DEFINITIONS + i;
}
it = mTagMaps.insert(mTagMaps.end(), std::map<uint16_t, const TagDefinition_t*>());
for(size_t i = 0; i < ARRAY_SIZE(EXIF_2_3_TAG_DEFINITIONS); ++i) {
(*it)[EXIF_2_3_TAG_DEFINITIONS[i].tagId] = EXIF_2_3_TAG_DEFINITIONS + i;
}
it = mTagMaps.insert(mTagMaps.end(), std::map<uint16_t, const TagDefinition_t*>());
for(size_t i = 0; i < ARRAY_SIZE(TIFF_6_TAG_DEFINITIONS); ++i) {
(*it)[TIFF_6_TAG_DEFINITIONS[i].tagId] = TIFF_6_TAG_DEFINITIONS + i;
}
}
TiffWriter::TiffWriter(std::map<uint16_t, const TagDefinition_t*>* enabledDefinitions,
size_t length) : mNumTagMaps(length)
{
mTagMaps.reserve(length);
for (int i = 0; i < length; ++i)
{
auto it = mTagMaps.insert(mTagMaps.end(), std::map<uint16_t, const TagDefinition_t*>());
for(auto it2 = enabledDefinitions[i].cbegin(); it2 != enabledDefinitions[i].cend(); ++it2) {
(*it)[it2->first] = it2->second;
}
}
}
TiffWriter::~TiffWriter() {}
status_t TiffWriter::write(Output* out, StripSource** sources, size_t sourcesCount,
Endianness end) {
status_t ret = OK;
EndianOutput endOut(out, end);
if (mIfd == NULL) {
ALOGE("%s: Tiff header is empty.", __FUNCTION__);
return BAD_VALUE;
}
uint32_t totalSize = getTotalSize();
std::map<uint32_t, uint32_t> offsetVector;
for (std::map<uint32_t, sp<TiffIfd> >::iterator it = mNamedIfds.begin(); it != mNamedIfds.end(); ++it) {
if (it->second->uninitializedOffsets()) {
uint32_t stripSize = it->second->getStripSize();
if (it->second->setStripOffset(totalSize) != OK) {
ALOGE("%s: Could not set strip offsets.", __FUNCTION__);
return BAD_VALUE;
}
totalSize += stripSize;
WORD_ALIGN(totalSize);
offsetVector[it->first] = totalSize;
}
}
size_t offVecSize = offsetVector.size();
if (offVecSize != sourcesCount) {
ALOGE("%s: Mismatch between number of IFDs with uninitialized strips (%zu) and"
" sources (%zu).", __FUNCTION__, offVecSize, sourcesCount);
return BAD_VALUE;
}
BAIL_ON_FAIL(writeFileHeader(endOut), ret);
uint32_t offset = FILE_HEADER_SIZE;
sp<TiffIfd> ifd = mIfd;
while(ifd != NULL) {
BAIL_ON_FAIL(ifd->writeData(offset, &endOut), ret);
offset += ifd->getSize();
ifd = ifd->getNextIfd();
}
#ifndef NDEBUG
log();
#endif
for (auto it = offsetVector.begin(); it != offsetVector.end(); ++it) {
uint32_t ifdKey = it->first;
uint32_t sizeToWrite = mNamedIfds[ifdKey]->getStripSize();
bool found = false;
for (size_t j = 0; j < sourcesCount; ++j) {
if (sources[j]->getIfd() == ifdKey) {
int i = std::distance(offsetVector.begin(), it);
if ((ret = sources[i]->writeToStream(endOut, sizeToWrite)) != OK) {
ALOGE("%s: Could not write to stream, received %d.", __FUNCTION__, ret);
return ret;
}
ZERO_TILL_WORD(&endOut, sizeToWrite, ret);
found = true;
break;
}
}
if (!found) {
ALOGE("%s: No stream for byte strips for IFD %u", __FUNCTION__, ifdKey);
return BAD_VALUE;
}
assert(it->second == endOut.getCurrentOffset());
}
return ret;
}
status_t TiffWriter::write(Output* out, Endianness end) {
status_t ret = OK;
EndianOutput endOut(out, end);
if (mIfd == NULL) {
ALOGE("%s: Tiff header is empty.", __FUNCTION__);
return BAD_VALUE;
}
BAIL_ON_FAIL(writeFileHeader(endOut), ret);
uint32_t offset = FILE_HEADER_SIZE;
sp<TiffIfd> ifd = mIfd;
while(ifd != NULL) {
BAIL_ON_FAIL(ifd->writeData(offset, &endOut), ret);
offset += ifd->getSize();
ifd = ifd->getNextIfd();
}
return ret;
}
const TagDefinition_t* TiffWriter::lookupDefinition(uint16_t tag) const {
const TagDefinition_t* definition = NULL;
for (size_t i = 0; i < mNumTagMaps; ++i) {
auto it = mTagMaps[i].find(tag);
if (it != mTagMaps[i].cend()) {
definition = it->second;
break;
}
}
if (definition == NULL) {
ALOGE("%s: No definition exists for tag with id %x.", __FUNCTION__, tag);
}
return definition;
}
sp<TiffEntry> TiffWriter::getEntry(uint16_t tag, uint32_t ifd) const {
auto it = mNamedIfds.find(ifd);
if (it == mNamedIfds.cend()) {
ALOGE("%s: No IFD %d set for this writer.", __FUNCTION__, ifd);
return NULL;
}
return it->second->getEntry(tag);
}
void TiffWriter::removeEntry(uint16_t tag, uint32_t ifd) {
auto it = mNamedIfds.find(ifd);
if (it != mNamedIfds.end()) {
it->second->removeEntry(tag);
}
}
status_t TiffWriter::addEntry(const sp<TiffEntry>& entry, uint32_t ifd) {
uint16_t tag = entry->getTag();
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
ALOGE("%s: No definition exists for tag 0x%x.", __FUNCTION__, tag);
return BAD_INDEX;
}
std::map<uint32_t, sp<TiffIfd> >::iterator it = mNamedIfds.find(ifd);
// Add a new IFD if necessary
if (it == mNamedIfds.end()) {
ALOGE("%s: No IFD %u exists.", __FUNCTION__, ifd);
return NAME_NOT_FOUND;
}
sp<TiffIfd> selectedIfd = it->second;
return selectedIfd->addEntry(entry);
}
status_t TiffWriter::addStrip(uint32_t ifd) {
std::map<uint32_t, sp<TiffIfd> >::iterator it = mNamedIfds.find(ifd);
if (it == mNamedIfds.end()) {
ALOGE("%s: Ifd %u doesn't exist, cannot add strip entries.", __FUNCTION__, ifd);
return BAD_VALUE;
}
sp<TiffIfd> selected = it->second;
return selected->validateAndSetStripTags();
}
status_t TiffWriter::addIfd(uint32_t ifd) {
std::map<uint32_t, sp<TiffIfd> >::iterator it = mNamedIfds.find(ifd);
if (it != mNamedIfds.end()) {
ALOGE("%s: Ifd with ID 0x%x already exists.", __FUNCTION__, ifd);
return BAD_VALUE;
}
sp<TiffIfd> newIfd = new TiffIfd(ifd);
if (mIfd == NULL) {
mIfd = newIfd;
} else {
sp<TiffIfd> last = findLastIfd();
last->setNextIfd(newIfd);
}
mNamedIfds[ifd] = newIfd;
return OK;
}
status_t TiffWriter::addSubIfd(uint32_t parentIfd, uint32_t ifd, SubIfdType type) {
std::map<uint32_t, sp<TiffIfd> >::iterator it = mNamedIfds.find(ifd);
if (it != mNamedIfds.end()) {
ALOGE("%s: Ifd with ID 0x%x already exists.", __FUNCTION__, ifd);
return BAD_VALUE;
}
std::map<uint32_t, sp<TiffIfd> >::iterator parentIt = mNamedIfds.find(parentIfd);
if (parentIt == mNamedIfds.end()) {
ALOGE("%s: Parent IFD with ID 0x%x does not exist.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
sp<TiffIfd> parent = parentIt->second;
sp<TiffIfd> newIfd = new TiffIfd(ifd);
uint16_t subIfdTag;
if (type == SUBIFD) {
subIfdTag = TAG_SUBIFDS;
} else if (type == GPSINFO) {
subIfdTag = TAG_GPSINFO;
} else {
ALOGE("%s: Unknown SubIFD type %d.", __FUNCTION__, type);
return BAD_VALUE;
}
sp<TiffEntry> subIfds = parent->getEntry(subIfdTag);
if (subIfds == NULL) {
if (buildEntry(subIfdTag, 1, &newIfd, &subIfds) < 0) {
ALOGE("%s: Failed to build SubIfd entry in IFD 0x%x.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
} else {
if (type == GPSINFO) {
ALOGE("%s: Cannot add GPSInfo SubIFD to IFD %u, one already exists.", __FUNCTION__,
ifd);
return BAD_VALUE;
}
std::vector<sp<TiffIfd> > subIfdList;
const sp<TiffIfd>* oldIfdArray = subIfds->getData<sp<TiffIfd> >();
subIfdList.insert(subIfdList.end(), oldIfdArray, oldIfdArray + subIfds->getCount());
#if 0
ALOGE("%s: Failed to build SubIfd entry in IFD 0x%x.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
#endif
subIfdList.push_back(newIfd); // < 0) {
#if 0
ALOGE("%s: Failed to build SubIfd entry in IFD 0x%x.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
#endif
uint32_t count = subIfdList.size();
if (buildEntry(subIfdTag, count, &subIfdList[0], &subIfds) < 0) {
ALOGE("%s: Failed to build SubIfd entry in IFD 0x%x.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
}
if (parent->addEntry(subIfds) < 0) {
ALOGE("%s: Failed to add SubIfd entry in IFD 0x%x.", __FUNCTION__, parentIfd);
return BAD_VALUE;
}
mNamedIfds[ifd] = newIfd;
return OK;
}
TagType TiffWriter::getDefaultType(uint16_t tag) const {
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
ALOGE("%s: Could not find definition for tag %x", __FUNCTION__, tag);
return UNKNOWN_TAGTYPE;
}
return definition->defaultType;
}
uint32_t TiffWriter::getDefaultCount(uint16_t tag) const {
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
ALOGE("%s: Could not find definition for tag %x", __FUNCTION__, tag);
return 0;
}
return definition->fixedCount;
}
bool TiffWriter::hasIfd(uint32_t ifd) const {
auto it = mNamedIfds.find(ifd);
return it != mNamedIfds.cend();
}
bool TiffWriter::checkIfDefined(uint16_t tag) const {
return lookupDefinition(tag) != NULL;
}
const char* TiffWriter::getTagName(uint16_t tag) const {
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
return NULL;
}
return definition->tagName;
}
sp<TiffIfd> TiffWriter::findLastIfd() {
sp<TiffIfd> ifd = mIfd;
while(ifd != NULL) {
sp<TiffIfd> nextIfd = ifd->getNextIfd();
if (nextIfd == NULL) {
break;
}
ifd = std::move(nextIfd);
}
return ifd;
}
status_t TiffWriter::writeFileHeader(EndianOutput& out) {
status_t ret = OK;
uint16_t endMarker = (out.getEndianness() == BIG) ? BIG_ENDIAN_MARKER : LITTLE_ENDIAN_MARKER;
BAIL_ON_FAIL(out.write(&endMarker, 0, 1), ret);
uint16_t tiffMarker = TIFF_FILE_MARKER;
BAIL_ON_FAIL(out.write(&tiffMarker, 0, 1), ret);
uint32_t offsetMarker = FILE_HEADER_SIZE;
BAIL_ON_FAIL(out.write(&offsetMarker, 0, 1), ret);
return ret;
}
uint32_t TiffWriter::getTotalSize() const {
uint32_t totalSize = FILE_HEADER_SIZE;
sp<TiffIfd> ifd = mIfd;
while(ifd != NULL) {
totalSize += ifd->getSize();
ifd = ifd->getNextIfd();
}
return totalSize;
}
void TiffWriter::log() const {
ALOGI("%s: TiffWriter:", __FUNCTION__);
size_t length = mNamedIfds.size();
for (auto it = mNamedIfds.begin(); it != mNamedIfds.end(); ++it) {
it->second->log();
}
}
} /*namespace img_utils*/
} /*namespace android*/

@ -0,0 +1,53 @@
/*
* Copyright 2011, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/android_reboot.h>
#include <stdio.h>
#include <stdlib.h>
#include <cutils/properties.h>
#define TAG "android_reboot"
int android_reboot(unsigned cmd, int /*flags*/, const char* arg) {
int ret;
const char* restart_cmd = NULL;
char* prop_value;
switch (cmd) {
case ANDROID_RB_RESTART: // deprecated
case ANDROID_RB_RESTART2:
restart_cmd = "reboot";
break;
case ANDROID_RB_POWEROFF:
restart_cmd = "shutdown";
break;
case ANDROID_RB_THERMOFF:
restart_cmd = "shutdown,thermal";
break;
}
if (!restart_cmd) return -1;
if (arg && arg[0]) {
ret = asprintf(&prop_value, "%s,%s", restart_cmd, arg);
} else {
ret = asprintf(&prop_value, "%s", restart_cmd);
}
if (ret < 0) return -1;
ret = property_set(ANDROID_RB_PROPERTY, prop_value);
free(prop_value);
return ret;
}

@ -0,0 +1,28 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined(__slm__)
/* Values are optimized for Silvermont */
#define SHARED_CACHE_SIZE (1024*1024) /* Silvermont L2 Cache */
#define DATA_CACHE_SIZE (24*1024) /* Silvermont L1 Data Cache */
#else
/* Values are optimized for Atom */
#define SHARED_CACHE_SIZE (512*1024) /* Atom L2 Cache */
#define DATA_CACHE_SIZE (24*1024) /* Atom L1 Data Cache */
#endif
#define SHARED_CACHE_SIZE_HALF (SHARED_CACHE_SIZE / 2)
#define DATA_CACHE_SIZE_HALF (DATA_CACHE_SIZE / 2)

@ -0,0 +1,22 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Values are optimized for Silvermont */
#define SHARED_CACHE_SIZE (1024*1024) /* Silvermont L2 Cache */
#define DATA_CACHE_SIZE (24*1024) /* Silvermont L1 Data Cache */
#define SHARED_CACHE_SIZE_HALF (SHARED_CACHE_SIZE / 2)
#define DATA_CACHE_SIZE_HALF (DATA_CACHE_SIZE / 2)

@ -0,0 +1,468 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/ashmem.h>
/*
* Implementation of the user-space ashmem API for devices, which have our
* ashmem-enabled kernel. See ashmem-sim.c for the "fake" tmp-based version,
* used by the simulator.
*/
#define LOG_TAG "ashmem"
#include <errno.h>
#include <fcntl.h>
#include <linux/ashmem.h>
#include <linux/memfd.h>
#include <log/log.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <unistd.h>
#include <android-base/file.h>
#include <android-base/properties.h>
#include <android-base/strings.h>
#include <android-base/unique_fd.h>
/* Will be added to UAPI once upstream change is merged */
#define F_SEAL_FUTURE_WRITE 0x0010
/*
* The minimum vendor API level at and after which it is safe to use memfd.
* This is to facilitate deprecation of ashmem.
*/
#define MIN_MEMFD_VENDOR_API_LEVEL 29
#define MIN_MEMFD_VENDOR_API_LEVEL_CHAR 'Q'
/* ashmem identity */
static dev_t __ashmem_rdev;
/*
* If we trigger a signal handler in the middle of locked activity and the
* signal handler calls ashmem, we could get into a deadlock state.
*/
static pthread_mutex_t __ashmem_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* has_memfd_support() determines if the device can use memfd. memfd support
* has been there for long time, but certain things in it may be missing. We
* check for needed support in it. Also we check if the VNDK version of
* libcutils being used is new enough, if its not, then we cannot use memfd
* since the older copies may be using ashmem so we just use ashmem. Once all
* Android devices that are getting updates are new enough (ex, they were
* originally shipped with Android release > P), then we can just use memfd and
* delete all ashmem code from libcutils (while preserving the interface).
*
* NOTE:
* The sys.use_memfd property is set by default to false in Android
* to temporarily disable memfd, till vendor and apps are ready for it.
* The main issue: either apps or vendor processes can directly make ashmem
* IOCTLs on FDs they receive by assuming they are ashmem, without going
* through libcutils. Such fds could have very well be originally created with
* libcutils hence they could be memfd. Thus the IOCTLs will break.
*
* Set default value of sys.use_memfd property to true once the issue is
* resolved, so that the code can then self-detect if kernel support is present
* on the device. The property can also set to true from adb shell, for
* debugging.
*/
static bool debug_log = false; /* set to true for verbose logging and other debug */
static bool pin_deprecation_warn = true; /* Log the pin deprecation warning only once */
/* Determine if vendor processes would be ok with memfd in the system:
*
* If VNDK is using older libcutils, don't use memfd. This is so that the
* same shared memory mechanism is used across binder transactions between
* vendor partition processes and system partition processes.
*/
static bool check_vendor_memfd_allowed() {
std::string vndk_version = android::base::GetProperty("ro.vndk.version", "");
if (vndk_version == "") {
ALOGE("memfd: ro.vndk.version not defined or invalid (%s), this is mandated since P.\n",
vndk_version.c_str());
return false;
}
/* No issues if vendor is targetting current Dessert */
if (vndk_version == "current") {
return false;
}
/* Check if VNDK version is a number and act on it */
char* p;
long int vers = strtol(vndk_version.c_str(), &p, 10);
if (*p == 0) {
if (vers < MIN_MEMFD_VENDOR_API_LEVEL) {
ALOGI("memfd: device VNDK version (%s) is < Q so using ashmem.\n",
vndk_version.c_str());
return false;
}
return true;
}
/* If its not a number, assume string, but check if its a sane string */
if (tolower(vndk_version[0]) < 'a' || tolower(vndk_version[0]) > 'z') {
ALOGE("memfd: ro.vndk.version not defined or invalid (%s), this is mandated since P.\n",
vndk_version.c_str());
return false;
}
if (tolower(vndk_version[0]) < tolower(MIN_MEMFD_VENDOR_API_LEVEL_CHAR)) {
ALOGI("memfd: device is using VNDK version (%s) which is less than Q. Use ashmem only.\n",
vndk_version.c_str());
return false;
}
return true;
}
/* Determine if memfd can be supported. This is just one-time hardwork
* which will be cached by the caller.
*/
static bool __has_memfd_support() {
if (check_vendor_memfd_allowed() == false) {
return false;
}
/* Used to turn on/off the detection at runtime, in the future this
* property will be removed once we switch everything over to ashmem.
* Currently it is used only for debugging to switch the system over.
*/
if (!android::base::GetBoolProperty("sys.use_memfd", false)) {
if (debug_log) {
ALOGD("sys.use_memfd=false so memfd disabled\n");
}
return false;
}
/* Check if kernel support exists, otherwise fall back to ashmem */
android::base::unique_fd fd(
syscall(__NR_memfd_create, "test_android_memfd", MFD_ALLOW_SEALING));
if (fd == -1) {
ALOGE("memfd_create failed: %s, no memfd support.\n", strerror(errno));
return false;
}
if (fcntl(fd, F_ADD_SEALS, F_SEAL_FUTURE_WRITE) == -1) {
ALOGE("fcntl(F_ADD_SEALS) failed: %s, no memfd support.\n", strerror(errno));
return false;
}
if (debug_log) {
ALOGD("memfd: device has memfd support, using it\n");
}
return true;
}
static bool has_memfd_support() {
/* memfd_supported is the initial global per-process state of what is known
* about memfd.
*/
static bool memfd_supported = __has_memfd_support();
return memfd_supported;
}
static std::string get_ashmem_device_path() {
static const std::string boot_id_path = "/proc/sys/kernel/random/boot_id";
std::string boot_id;
if (!android::base::ReadFileToString(boot_id_path, &boot_id)) {
ALOGE("Failed to read %s: %s.\n", boot_id_path.c_str(), strerror(errno));
return "";
};
boot_id = android::base::Trim(boot_id);
return "/dev/ashmem" + boot_id;
}
/* logistics of getting file descriptor for ashmem */
static int __ashmem_open_locked()
{
static const std::string ashmem_device_path = get_ashmem_device_path();
if (ashmem_device_path.empty()) {
return -1;
}
int fd = TEMP_FAILURE_RETRY(open(ashmem_device_path.c_str(), O_RDWR | O_CLOEXEC));
// fallback for APEX w/ use_vendor on Q, which would have still used /dev/ashmem
if (fd < 0) {
fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDWR | O_CLOEXEC));
}
if (fd < 0) {
return fd;
}
struct stat st;
int ret = TEMP_FAILURE_RETRY(fstat(fd, &st));
if (ret < 0) {
int save_errno = errno;
close(fd);
errno = save_errno;
return ret;
}
if (!S_ISCHR(st.st_mode) || !st.st_rdev) {
close(fd);
errno = ENOTTY;
return -1;
}
__ashmem_rdev = st.st_rdev;
return fd;
}
static int __ashmem_open()
{
int fd;
pthread_mutex_lock(&__ashmem_lock);
fd = __ashmem_open_locked();
pthread_mutex_unlock(&__ashmem_lock);
return fd;
}
/* Make sure file descriptor references ashmem, negative number means false */
static int __ashmem_is_ashmem(int fd, int fatal)
{
dev_t rdev;
struct stat st;
if (fstat(fd, &st) < 0) {
return -1;
}
rdev = 0; /* Too much complexity to sniff __ashmem_rdev */
if (S_ISCHR(st.st_mode) && st.st_rdev) {
pthread_mutex_lock(&__ashmem_lock);
rdev = __ashmem_rdev;
if (rdev) {
pthread_mutex_unlock(&__ashmem_lock);
} else {
int fd = __ashmem_open_locked();
if (fd < 0) {
pthread_mutex_unlock(&__ashmem_lock);
return -1;
}
rdev = __ashmem_rdev;
pthread_mutex_unlock(&__ashmem_lock);
close(fd);
}
if (st.st_rdev == rdev) {
return 0;
}
}
if (fatal) {
if (rdev) {
LOG_ALWAYS_FATAL("illegal fd=%d mode=0%o rdev=%d:%d expected 0%o %d:%d",
fd, st.st_mode, major(st.st_rdev), minor(st.st_rdev),
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IRGRP,
major(rdev), minor(rdev));
} else {
LOG_ALWAYS_FATAL("illegal fd=%d mode=0%o rdev=%d:%d expected 0%o",
fd, st.st_mode, major(st.st_rdev), minor(st.st_rdev),
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IRGRP);
}
/* NOTREACHED */
}
errno = ENOTTY;
return -1;
}
static int __ashmem_check_failure(int fd, int result)
{
if (result == -1 && errno == ENOTTY) __ashmem_is_ashmem(fd, 1);
return result;
}
static bool memfd_is_ashmem(int fd) {
static bool fd_check_error_once = false;
if (__ashmem_is_ashmem(fd, 0) == 0) {
if (!fd_check_error_once) {
ALOGE("memfd: memfd expected but ashmem fd used - please use libcutils.\n");
fd_check_error_once = true;
}
return true;
}
return false;
}
int ashmem_valid(int fd)
{
if (has_memfd_support() && !memfd_is_ashmem(fd)) {
return 1;
}
return __ashmem_is_ashmem(fd, 0) >= 0;
}
static int memfd_create_region(const char* name, size_t size) {
android::base::unique_fd fd(syscall(__NR_memfd_create, name, MFD_ALLOW_SEALING));
if (fd == -1) {
ALOGE("memfd_create(%s, %zd) failed: %s\n", name, size, strerror(errno));
return -1;
}
if (ftruncate(fd, size) == -1) {
ALOGE("ftruncate(%s, %zd) failed for memfd creation: %s\n", name, size, strerror(errno));
return -1;
}
if (debug_log) {
ALOGE("memfd_create(%s, %zd) success. fd=%d\n", name, size, fd.get());
}
return fd.release();
}
/*
* ashmem_create_region - creates a new ashmem region and returns the file
* descriptor, or <0 on error
*
* `name' is an optional label to give the region (visible in /proc/pid/maps)
* `size' is the size of the region, in page-aligned bytes
*/
int ashmem_create_region(const char *name, size_t size)
{
int ret, save_errno;
if (has_memfd_support()) {
return memfd_create_region(name ? name : "none", size);
}
int fd = __ashmem_open();
if (fd < 0) {
return fd;
}
if (name) {
char buf[ASHMEM_NAME_LEN] = {0};
strlcpy(buf, name, sizeof(buf));
ret = TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_SET_NAME, buf));
if (ret < 0) {
goto error;
}
}
ret = TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_SET_SIZE, size));
if (ret < 0) {
goto error;
}
return fd;
error:
save_errno = errno;
close(fd);
errno = save_errno;
return ret;
}
static int memfd_set_prot_region(int fd, int prot) {
/* Only proceed if an fd needs to be write-protected */
if (prot & PROT_WRITE) {
return 0;
}
if (fcntl(fd, F_ADD_SEALS, F_SEAL_FUTURE_WRITE) == -1) {
ALOGE("memfd_set_prot_region(%d, %d): F_SEAL_FUTURE_WRITE seal failed: %s\n", fd, prot,
strerror(errno));
return -1;
}
return 0;
}
int ashmem_set_prot_region(int fd, int prot)
{
if (has_memfd_support() && !memfd_is_ashmem(fd)) {
return memfd_set_prot_region(fd, prot);
}
return __ashmem_check_failure(fd, TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_SET_PROT_MASK, prot)));
}
int ashmem_pin_region(int fd, size_t offset, size_t len)
{
if (!pin_deprecation_warn || debug_log) {
ALOGE("Pinning is deprecated since Android Q. Please use trim or other methods.\n");
pin_deprecation_warn = true;
}
if (has_memfd_support() && !memfd_is_ashmem(fd)) {
return 0;
}
// TODO: should LP64 reject too-large offset/len?
ashmem_pin pin = { static_cast<uint32_t>(offset), static_cast<uint32_t>(len) };
return __ashmem_check_failure(fd, TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_PIN, &pin)));
}
int ashmem_unpin_region(int fd, size_t offset, size_t len)
{
if (!pin_deprecation_warn || debug_log) {
ALOGE("Pinning is deprecated since Android Q. Please use trim or other methods.\n");
pin_deprecation_warn = true;
}
if (has_memfd_support() && !memfd_is_ashmem(fd)) {
return 0;
}
// TODO: should LP64 reject too-large offset/len?
ashmem_pin pin = { static_cast<uint32_t>(offset), static_cast<uint32_t>(len) };
return __ashmem_check_failure(fd, TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_UNPIN, &pin)));
}
int ashmem_get_size_region(int fd)
{
if (has_memfd_support() && !memfd_is_ashmem(fd)) {
struct stat sb;
if (fstat(fd, &sb) == -1) {
ALOGE("ashmem_get_size_region(%d): fstat failed: %s\n", fd, strerror(errno));
return -1;
}
if (debug_log) {
ALOGD("ashmem_get_size_region(%d): %d\n", fd, static_cast<int>(sb.st_size));
}
return sb.st_size;
}
return __ashmem_check_failure(fd, TEMP_FAILURE_RETRY(ioctl(fd, ASHMEM_GET_SIZE, NULL)));
}

@ -0,0 +1,96 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/ashmem.h>
/*
* Implementation of the user-space ashmem API for the simulator, which lacks
* an ashmem-enabled kernel. See ashmem-dev.c for the real ashmem-based version.
*/
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <utils/Compat.h>
static bool ashmem_validate_stat(int fd, struct stat* buf) {
int result = fstat(fd, buf);
if (result == -1) {
return false;
}
/*
* Check if this is an "ashmem" region.
* TODO: This is very hacky, and can easily break.
* We need some reliable indicator.
*/
if (!(buf->st_nlink == 0 && S_ISREG(buf->st_mode))) {
errno = ENOTTY;
return false;
}
return true;
}
int ashmem_valid(int fd) {
struct stat buf;
return ashmem_validate_stat(fd, &buf);
}
int ashmem_create_region(const char* /*ignored*/, size_t size) {
char pattern[PATH_MAX];
snprintf(pattern, sizeof(pattern), "/tmp/android-ashmem-%d-XXXXXXXXX", getpid());
int fd = mkstemp(pattern);
if (fd == -1) return -1;
unlink(pattern);
if (TEMP_FAILURE_RETRY(ftruncate(fd, size)) == -1) {
close(fd);
return -1;
}
return fd;
}
int ashmem_set_prot_region(int /*fd*/, int /*prot*/) {
return 0;
}
int ashmem_pin_region(int /*fd*/, size_t /*offset*/, size_t /*len*/) {
return 0 /*ASHMEM_NOT_PURGED*/;
}
int ashmem_unpin_region(int /*fd*/, size_t /*offset*/, size_t /*len*/) {
return 0 /*ASHMEM_IS_UNPINNED*/;
}
int ashmem_get_size_region(int fd)
{
struct stat buf;
if (!ashmem_validate_stat(fd, &buf)) {
return -1;
}
return buf.st_size;
}

@ -0,0 +1,128 @@
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <private/android_filesystem_config.h>
#include <private/canned_fs_config.h>
#include <private/fs_config.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef struct {
const char* path;
unsigned uid;
unsigned gid;
unsigned mode;
uint64_t capabilities;
} Path;
static Path* canned_data = NULL;
static int canned_alloc = 0;
static int canned_used = 0;
static int path_compare(const void* a, const void* b) {
return strcmp(((Path*)a)->path, ((Path*)b)->path);
}
int load_canned_fs_config(const char* fn) {
char buf[PATH_MAX + 200];
FILE* f;
f = fopen(fn, "r");
if (f == NULL) {
fprintf(stderr, "failed to open %s: %s\n", fn, strerror(errno));
return -1;
}
while (fgets(buf, sizeof(buf), f)) {
Path* p;
char* token;
char* line = buf;
bool rootdir;
while (canned_used >= canned_alloc) {
canned_alloc = (canned_alloc+1) * 2;
canned_data = (Path*) realloc(canned_data, canned_alloc * sizeof(Path));
}
p = canned_data + canned_used;
if (line[0] == '/') line++;
rootdir = line[0] == ' ';
p->path = strdup(rootdir ? "" : strtok(line, " "));
p->uid = atoi(strtok(rootdir ? line : NULL, " "));
p->gid = atoi(strtok(NULL, " "));
p->mode = strtol(strtok(NULL, " "), NULL, 8); // mode is in octal
p->capabilities = 0;
do {
token = strtok(NULL, " ");
if (token && strncmp(token, "capabilities=", 13) == 0) {
p->capabilities = strtoll(token+13, NULL, 0);
break;
}
} while (token);
canned_used++;
}
fclose(f);
qsort(canned_data, canned_used, sizeof(Path), path_compare);
printf("loaded %d fs_config entries\n", canned_used);
return 0;
}
static const int kDebugCannedFsConfig = 0;
void canned_fs_config(const char* path, int dir, const char* target_out_path,
unsigned* uid, unsigned* gid, unsigned* mode, uint64_t* capabilities) {
Path key, *p;
key.path = path;
if (path[0] == '/') key.path++; // canned paths lack the leading '/'
p = (Path*) bsearch(&key, canned_data, canned_used, sizeof(Path), path_compare);
if (p == NULL) {
fprintf(stderr, "failed to find [%s] in canned fs_config\n", path);
exit(1);
}
*uid = p->uid;
*gid = p->gid;
*mode = p->mode;
*capabilities = p->capabilities;
if (kDebugCannedFsConfig) {
// for debugging, run the built-in fs_config and compare the results.
unsigned c_uid, c_gid, c_mode;
uint64_t c_capabilities;
fs_config(path, dir, target_out_path, &c_uid, &c_gid, &c_mode, &c_capabilities);
if (c_uid != *uid) printf("%s uid %d %d\n", path, *uid, c_uid);
if (c_gid != *gid) printf("%s gid %d %d\n", path, *gid, c_gid);
if (c_mode != *mode) printf("%s mode 0%o 0%o\n", path, *mode, c_mode);
if (c_capabilities != *capabilities) {
printf("%s capabilities %" PRIx64 " %" PRIx64 "\n",
path,
*capabilities,
c_capabilities);
}
}
}

@ -0,0 +1,328 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/config_utils.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <cutils/misc.h>
cnode* config_node(const char *name, const char *value)
{
cnode* node = static_cast<cnode*>(calloc(sizeof(cnode), 1));
if(node) {
node->name = name ? name : "";
node->value = value ? value : "";
}
return node;
}
cnode* config_find(cnode *root, const char *name)
{
cnode *node, *match = NULL;
/* we walk the whole list, as we need to return the last (newest) entry */
for(node = root->first_child; node; node = node->next)
if(!strcmp(node->name, name))
match = node;
return match;
}
static cnode* _config_create(cnode *root, const char *name)
{
cnode *node;
node = config_node(name, NULL);
if(root->last_child)
root->last_child->next = node;
else
root->first_child = node;
root->last_child = node;
return node;
}
int config_bool(cnode *root, const char *name, int _default)
{
cnode *node;
node = config_find(root, name);
if(!node)
return _default;
switch(node->value[0]) {
case 'y':
case 'Y':
case '1':
return 1;
default:
return 0;
}
}
const char* config_str(cnode *root, const char *name, const char *_default)
{
cnode *node;
node = config_find(root, name);
if(!node)
return _default;
return node->value;
}
void config_set(cnode *root, const char *name, const char *value)
{
cnode *node;
node = config_find(root, name);
if(node)
node->value = value;
else {
node = _config_create(root, name);
node->value = value;
}
}
#define T_EOF 0
#define T_TEXT 1
#define T_DOT 2
#define T_OBRACE 3
#define T_CBRACE 4
typedef struct
{
char *data;
char *text;
int len;
char next;
} cstate;
static int _lex(cstate *cs, int value)
{
char c;
char *s;
char *data;
data = cs->data;
if(cs->next != 0) {
c = cs->next;
cs->next = 0;
goto got_c;
}
restart:
for(;;) {
c = *data++;
got_c:
if(isspace(c))
continue;
switch(c) {
case 0:
return T_EOF;
case '#':
for(;;) {
switch(*data) {
case 0:
cs->data = data;
return T_EOF;
case '\n':
cs->data = data + 1;
goto restart;
default:
data++;
}
}
break;
case '.':
cs->data = data;
return T_DOT;
case '{':
cs->data = data;
return T_OBRACE;
case '}':
cs->data = data;
return T_CBRACE;
default:
s = data - 1;
if(value) {
for(;;) {
if(*data == 0) {
cs->data = data;
break;
}
if(*data == '\n') {
cs->data = data + 1;
*data-- = 0;
break;
}
data++;
}
/* strip trailing whitespace */
while(data > s){
if(!isspace(*data)) break;
*data-- = 0;
}
goto got_text;
} else {
for(;;) {
if(isspace(*data)) {
*data = 0;
cs->data = data + 1;
goto got_text;
}
switch(*data) {
case 0:
cs->data = data;
goto got_text;
case '.':
case '{':
case '}':
cs->next = *data;
*data = 0;
cs->data = data + 1;
goto got_text;
default:
data++;
}
}
}
}
}
got_text:
cs->text = s;
return T_TEXT;
}
#if 0
char *TOKENNAMES[] = { "EOF", "TEXT", "DOT", "OBRACE", "CBRACE" };
static int lex(cstate *cs, int value)
{
int tok = _lex(cs, value);
printf("TOKEN(%d) %s %s\n", value, TOKENNAMES[tok],
tok == T_TEXT ? cs->text : "");
return tok;
}
#else
#define lex(cs,v) _lex(cs,v)
#endif
static int parse_expr(cstate *cs, cnode *node);
static int parse_block(cstate *cs, cnode *node)
{
for(;;){
switch(lex(cs, 0)){
case T_TEXT:
if(parse_expr(cs, node)) return -1;
continue;
case T_CBRACE:
return 0;
default:
return -1;
}
}
}
static int parse_expr(cstate *cs, cnode *root)
{
cnode *node;
/* last token was T_TEXT */
node = config_find(root, cs->text);
if(!node || *node->value)
node = _config_create(root, cs->text);
for(;;) {
switch(lex(cs, 1)) {
case T_DOT:
if(lex(cs, 0) != T_TEXT)
return -1;
node = _config_create(node, cs->text);
continue;
case T_TEXT:
node->value = cs->text;
return 0;
case T_OBRACE:
return parse_block(cs, node);
default:
return -1;
}
}
}
void config_load(cnode *root, char *data)
{
if(data != 0) {
cstate cs;
cs.data = data;
cs.next = 0;
for(;;) {
switch(lex(&cs, 0)) {
case T_TEXT:
if(parse_expr(&cs, root))
return;
break;
default:
return;
}
}
}
}
void config_load_file(cnode *root, const char *fn)
{
char* data = static_cast<char*>(load_file(fn, nullptr));
config_load(root, data);
// TODO: deliberate leak :-/
}
void config_free(cnode *root)
{
cnode *cur = root->first_child;
while (cur) {
cnode *prev = cur;
config_free(cur);
cur = cur->next;
free(prev);
}
}

@ -0,0 +1,44 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_CUTILS_COMPILER_H
#define ANDROID_CUTILS_COMPILER_H
/*
* helps the compiler's optimizer predicting branches
*/
#ifdef __cplusplus
# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), true ))
# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), false ))
#else
# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), 1 ))
# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 ))
#endif
/**
* exports marked symbols
*
* if used on a C++ class declaration, this macro must be inserted
* after the "class" keyword. For instance:
*
* template <typename TYPE>
* class ANDROID_API Singleton { }
*/
#define ANDROID_API __attribute__((visibility("default")))
#endif // ANDROID_CUTILS_COMPILER_H

@ -0,0 +1,65 @@
// DO NOT INCLUDE ANYTHING NEW IN THIS FILE.
// <log/log.h> has replaced this file and all changes should go there instead.
// This path remains strictly to include that header as there are thousands of
// references to <utils/Log.h> in the tree.
// #include <log/log.h>
#include <jni.h>
#include <android/log.h>
#ifdef NDEBUG
#define LOG_NDEBUG 1
#else
#define LOG_NDEBUG 0
#endif
#define LOG_TAG "MPLOG"
#define ALOGV(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG,__VA_ARGS__)
#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG,__VA_ARGS__)
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__)
#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG,__VA_ARGS__)
#define android_printAssert(cond, tag, ...) \
__android_log_assert(cond, tag, \
__android_second(0, ##__VA_ARGS__, NULL) \
__android_rest(__VA_ARGS__))
#define __FAKE_USE_VA_ARGS(...) ((void)(0))
#ifndef LOG_ALWAYS_FATAL_IF
#define LOG_ALWAYS_FATAL_IF(cond, ...) \
((__predict_false(cond)) ? (__FAKE_USE_VA_ARGS(__VA_ARGS__), \
((void)android_printAssert(#cond, LOG_TAG, ##__VA_ARGS__))) \
: ((void)0))
#endif
#ifndef LOG_ALWAYS_FATAL
#define LOG_ALWAYS_FATAL(...) \
(((void)android_printAssert(NULL, LOG_TAG, ##__VA_ARGS__)))
#endif
#if NDEBUG
#ifndef LOG_FATAL_IF
#define LOG_FATAL_IF(cond, ...) __FAKE_USE_VA_ARGS(__VA_ARGS__)
#endif
#ifndef LOG_FATAL
#define LOG_FATAL(...) __FAKE_USE_VA_ARGS(__VA_ARGS__)
#endif
#else
#ifndef LOG_FATAL_IF
#define LOG_FATAL_IF(cond, ...) LOG_ALWAYS_FATAL_IF(cond, ##__VA_ARGS__)
#endif
#ifndef LOG_FATAL
#define LOG_FATAL(...) LOG_ALWAYS_FATAL(__VA_ARGS__)
#endif
#endif

@ -0,0 +1,144 @@
/*
* Copyright (C) 2005 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "sharedbuffer"
#include "SharedBuffer.h"
#include <stdlib.h>
#include <string.h>
#include <log/log.h>
// ---------------------------------------------------------------------------
namespace android {
SharedBuffer* SharedBuffer::alloc(size_t size)
{
// Don't overflow if the combined size of the buffer / header is larger than
// size_max.
#if 0
LOG_ALWAYS_FATAL_IF((size >= (SIZE_MAX - sizeof(SharedBuffer))),
"Invalid buffer size %zu", size);
#endif
SharedBuffer* sb = static_cast<SharedBuffer *>(malloc(sizeof(SharedBuffer) + size));
if (sb) {
// Should be std::atomic_init(&sb->mRefs, 1);
// But that generates a warning with some compilers.
// The following is OK on Android-supported platforms.
sb->mRefs.store(1, std::memory_order_relaxed);
sb->mSize = size;
sb->mClientMetadata = 0;
}
return sb;
}
void SharedBuffer::dealloc(const SharedBuffer* released)
{
free(const_cast<SharedBuffer*>(released));
}
SharedBuffer* SharedBuffer::edit() const
{
if (onlyOwner()) {
return const_cast<SharedBuffer*>(this);
}
SharedBuffer* sb = alloc(mSize);
if (sb) {
memcpy(sb->data(), data(), size());
release();
}
return sb;
}
SharedBuffer* SharedBuffer::editResize(size_t newSize) const
{
if (onlyOwner()) {
SharedBuffer* buf = const_cast<SharedBuffer*>(this);
if (buf->mSize == newSize) return buf;
// Don't overflow if the combined size of the new buffer / header is larger than
// size_max.
#if 0
LOG_ALWAYS_FATAL_IF((newSize >= (SIZE_MAX - sizeof(SharedBuffer))),
"Invalid buffer size %zu", newSize);
#endif
buf = (SharedBuffer*)realloc(buf, sizeof(SharedBuffer) + newSize);
if (buf != nullptr) {
buf->mSize = newSize;
return buf;
}
}
SharedBuffer* sb = alloc(newSize);
if (sb) {
const size_t mySize = mSize;
memcpy(sb->data(), data(), newSize < mySize ? newSize : mySize);
release();
}
return sb;
}
SharedBuffer* SharedBuffer::attemptEdit() const
{
if (onlyOwner()) {
return const_cast<SharedBuffer*>(this);
}
return nullptr;
}
SharedBuffer* SharedBuffer::reset(size_t new_size) const
{
// cheap-o-reset.
SharedBuffer* sb = alloc(new_size);
if (sb) {
release();
}
return sb;
}
void SharedBuffer::acquire() const {
mRefs.fetch_add(1, std::memory_order_relaxed);
}
int32_t SharedBuffer::release(uint32_t flags) const
{
const bool useDealloc = ((flags & eKeepStorage) == 0);
if (onlyOwner()) {
// Since we're the only owner, our reference count goes to zero.
mRefs.store(0, std::memory_order_relaxed);
if (useDealloc) {
dealloc(this);
}
// As the only owner, our previous reference count was 1.
return 1;
}
// There's multiple owners, we need to use an atomic decrement.
int32_t prevRefCount = mRefs.fetch_sub(1, std::memory_order_release);
if (prevRefCount == 1) {
// We're the last reference, we need the acquire fence.
atomic_thread_fence(std::memory_order_acquire);
if (useDealloc) {
dealloc(this);
}
}
return prevRefCount;
}
}; // namespace android

@ -0,0 +1,151 @@
/*
* Copyright (C) 2005 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* DEPRECATED. DO NOT USE FOR NEW CODE.
*/
#ifndef ANDROID_SHARED_BUFFER_H
#define ANDROID_SHARED_BUFFER_H
#include <atomic>
#include <stdint.h>
#include <sys/types.h>
// ---------------------------------------------------------------------------
namespace android {
class SharedBuffer
{
public:
/* flags to use with release() */
enum {
eKeepStorage = 0x00000001
};
/*! allocate a buffer of size 'size' and acquire() it.
* call release() to free it.
*/
static SharedBuffer* alloc(size_t size);
/*! free the memory associated with the SharedBuffer.
* Fails if there are any users associated with this SharedBuffer.
* In other words, the buffer must have been release by all its
* users.
*/
static void dealloc(const SharedBuffer* released);
//! access the data for read
inline const void* data() const;
//! access the data for read/write
inline void* data();
//! get size of the buffer
inline size_t size() const;
//! get back a SharedBuffer object from its data
static inline SharedBuffer* bufferFromData(void* data);
//! get back a SharedBuffer object from its data
static inline const SharedBuffer* bufferFromData(const void* data);
//! get the size of a SharedBuffer object from its data
static inline size_t sizeFromData(const void* data);
//! edit the buffer (get a writtable, or non-const, version of it)
SharedBuffer* edit() const;
//! edit the buffer, resizing if needed
SharedBuffer* editResize(size_t size) const;
//! like edit() but fails if a copy is required
SharedBuffer* attemptEdit() const;
//! resize and edit the buffer, loose it's content.
SharedBuffer* reset(size_t size) const;
//! acquire/release a reference on this buffer
void acquire() const;
/*! release a reference on this buffer, with the option of not
* freeing the memory associated with it if it was the last reference
* returns the previous reference count
*/
int32_t release(uint32_t flags = 0) const;
//! returns wether or not we're the only owner
inline bool onlyOwner() const;
private:
inline SharedBuffer() { }
inline ~SharedBuffer() { }
SharedBuffer(const SharedBuffer&);
SharedBuffer& operator = (const SharedBuffer&);
// Must be sized to preserve correct alignment.
mutable std::atomic<int32_t> mRefs;
size_t mSize;
uint32_t mReserved;
public:
// mClientMetadata is reserved for client use. It is initialized to 0
// and the clients can do whatever they want with it. Note that this is
// placed last so that it is adjcent to the buffer allocated.
uint32_t mClientMetadata;
};
static_assert(sizeof(SharedBuffer) % 8 == 0
&& (sizeof(size_t) > 4 || sizeof(SharedBuffer) == 16),
"SharedBuffer has unexpected size");
// ---------------------------------------------------------------------------
const void* SharedBuffer::data() const {
return this + 1;
}
void* SharedBuffer::data() {
return this + 1;
}
size_t SharedBuffer::size() const {
return mSize;
}
SharedBuffer* SharedBuffer::bufferFromData(void* data) {
return data ? static_cast<SharedBuffer *>(data)-1 : nullptr;
}
const SharedBuffer* SharedBuffer::bufferFromData(const void* data) {
return data ? static_cast<const SharedBuffer *>(data)-1 : nullptr;
}
size_t SharedBuffer::sizeFromData(const void* data) {
return data ? bufferFromData(data)->mSize : 0;
}
bool SharedBuffer::onlyOwner() const {
return (mRefs.load(std::memory_order_acquire) == 1);
}
} // namespace android
// ---------------------------------------------------------------------------
#endif // ANDROID_VECTOR_H

@ -0,0 +1,37 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "sp"
#include <log/log.h>
namespace android {
void sp_report_race()
{
#if 0
LOG_ALWAYS_FATAL("sp<> assignment detected data race");
#endif
}
void sp_report_stack_pointer()
{
#if 0
LOG_ALWAYS_FATAL("sp<> constructed with stack pointer argument");
#endif
}
}

@ -0,0 +1,85 @@
/*
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <errno.h>
#include <stdint.h>
#include <sys/types.h>
#include <string>
namespace android {
/**
* The type used to return success/failure from frameworks APIs.
* See the anonymous enum below for valid values.
*/
typedef int32_t status_t;
/*
* Error codes.
* All error codes are negative values.
*/
// Win32 #defines NO_ERROR as well. It has the same value, so there's no
// real conflict, though it's a bit awkward.
#ifdef _WIN32
# undef NO_ERROR
#endif
enum {
OK = 0, // Preferred constant for checking success.
NO_ERROR = OK, // Deprecated synonym for `OK`. Prefer `OK` because it doesn't conflict with Windows.
UNKNOWN_ERROR = (-2147483647-1), // INT32_MIN value
NO_MEMORY = -ENOMEM,
INVALID_OPERATION = -ENOSYS,
BAD_VALUE = -EINVAL,
BAD_TYPE = (UNKNOWN_ERROR + 1),
NAME_NOT_FOUND = -ENOENT,
PERMISSION_DENIED = -EPERM,
NO_INIT = -ENODEV,
ALREADY_EXISTS = -EEXIST,
DEAD_OBJECT = -EPIPE,
FAILED_TRANSACTION = (UNKNOWN_ERROR + 2),
#if !defined(_WIN32)
BAD_INDEX = -EOVERFLOW,
NOT_ENOUGH_DATA = -ENODATA,
WOULD_BLOCK = -EWOULDBLOCK,
TIMED_OUT = -ETIMEDOUT,
UNKNOWN_TRANSACTION = -EBADMSG,
#else
BAD_INDEX = -E2BIG,
NOT_ENOUGH_DATA = (UNKNOWN_ERROR + 3),
WOULD_BLOCK = (UNKNOWN_ERROR + 4),
TIMED_OUT = (UNKNOWN_ERROR + 5),
UNKNOWN_TRANSACTION = (UNKNOWN_ERROR + 6),
#endif
FDS_NOT_ALLOWED = (UNKNOWN_ERROR + 7),
UNEXPECTED_NULL = (UNKNOWN_ERROR + 8),
};
// Human readable name of error
std::string statusToString(status_t status);
// Restore define; enumeration is in "android" namespace, so the value defined
// there won't work for Win32 code in a different namespace.
#ifdef _WIN32
# define NO_ERROR 0L
#endif
} // namespace android

@ -0,0 +1,70 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
* See documentation in RefBase.h
*/
#include <atomic>
#include <sys/types.h>
namespace android {
class ReferenceRenamer;
template <class T>
class LightRefBase
{
public:
inline LightRefBase() : mCount(0) { }
inline void incStrong(__attribute__((unused)) const void* id) const {
mCount.fetch_add(1, std::memory_order_relaxed);
}
inline void decStrong(__attribute__((unused)) const void* id) const {
if (mCount.fetch_sub(1, std::memory_order_release) == 1) {
std::atomic_thread_fence(std::memory_order_acquire);
delete static_cast<const T*>(this);
}
}
//! DEBUGGING ONLY: Get current strong ref count.
inline int32_t getStrongCount() const {
return mCount.load(std::memory_order_relaxed);
}
protected:
inline ~LightRefBase() { }
private:
friend class ReferenceMover;
inline static void renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { }
inline static void renameRefId(T* /*ref*/, const void* /*old_id*/ , const void* /*new_id*/) { }
private:
mutable std::atomic<int32_t> mCount;
};
// This is a wrapper around LightRefBase that simply enforces a virtual
// destructor to eliminate the template requirement of LightRefBase
class VirtualLightRefBase : public LightRefBase<VirtualLightRefBase> {
public:
virtual ~VirtualLightRefBase() = default;
};
} // namespace android

@ -0,0 +1,9 @@
// DO NOT INCLUDE ANYTHING NEW IN THIS FILE.
// <log/log.h> has replaced this file and all changes should go there instead.
// This path remains strictly to include that header as there are thousands of
// references to <utils/Log.h> in the tree.
// #include <log/log.h>
#include <android/log.h>
#include <AndroidHelper.h>

@ -0,0 +1,713 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// SOME COMMENTS ABOUT USAGE:
// This provides primarily wp<> weak pointer types and RefBase, which work
// together with sp<> from <StrongPointer.h>.
// sp<> (and wp<>) are a type of smart pointer that use a well defined protocol
// to operate. As long as the object they are templated with implements that
// protocol, these smart pointers work. In several places the platform
// instantiates sp<> with non-RefBase objects; the two are not tied to each
// other.
// RefBase is such an implementation and it supports strong pointers, weak
// pointers and some magic features for the binder.
// So, when using RefBase objects, you have the ability to use strong and weak
// pointers through sp<> and wp<>.
// Normally, when the last strong pointer goes away, the object is destroyed,
// i.e. it's destructor is called. HOWEVER, parts of its associated memory is not
// freed until the last weak pointer is released.
// Weak pointers are essentially "safe" pointers. They are always safe to
// access through promote(). They may return nullptr if the object was
// destroyed because it ran out of strong pointers. This makes them good candidates
// for keys in a cache for instance.
// Weak pointers remain valid for comparison purposes even after the underlying
// object has been destroyed. Even if object A is destroyed and its memory reused
// for B, A remaining weak pointer to A will not compare equal to one to B.
// This again makes them attractive for use as keys.
// How is this supposed / intended to be used?
// Our recommendation is to use strong references (sp<>) when there is an
// ownership relation. e.g. when an object "owns" another one, use a strong
// ref. And of course use strong refs as arguments of functions (it's extremely
// rare that a function will take a wp<>).
// Typically a newly allocated object will immediately be used to initialize
// a strong pointer, which may then be used to construct or assign to other
// strong and weak pointers.
// Use weak references when there are no ownership relation. e.g. the keys in a
// cache (you cannot use plain pointers because there is no safe way to acquire
// a strong reference from a vanilla pointer).
// This implies that two objects should never (or very rarely) have sp<> on
// each other, because they can't both own each other.
// Caveats with reference counting
// Obviously, circular strong references are a big problem; this creates leaks
// and it's hard to debug -- except it's in fact really easy because RefBase has
// tons of debugging code for that. It can basically tell you exactly where the
// leak is.
// Another problem has to do with destructors with side effects. You must
// assume that the destructor of reference counted objects can be called AT ANY
// TIME. For instance code as simple as this:
// void setStuff(const sp<Stuff>& stuff) {
// std::lock_guard<std::mutex> lock(mMutex);
// mStuff = stuff;
// }
// is very dangerous. This code WILL deadlock one day or another.
// What isn't obvious is that ~Stuff() can be called as a result of the
// assignment. And it gets called with the lock held. First of all, the lock is
// protecting mStuff, not ~Stuff(). Secondly, if ~Stuff() uses its own internal
// mutex, now you have mutex ordering issues. Even worse, if ~Stuff() is
// virtual, now you're calling into "user" code (potentially), by that, I mean,
// code you didn't even write.
// A correct way to write this code is something like:
// void setStuff(const sp<Stuff>& stuff) {
// std::unique_lock<std::mutex> lock(mMutex);
// sp<Stuff> hold = mStuff;
// mStuff = stuff;
// lock.unlock();
// }
// More importantly, reference counted objects should do as little work as
// possible in their destructor, or at least be mindful that their destructor
// could be called from very weird and unintended places.
// Other more specific restrictions for wp<> and sp<>:
// Do not construct a strong pointer to "this" in an object's constructor.
// The onFirstRef() callback would be made on an incompletely constructed
// object.
// Construction of a weak pointer to "this" in an object's constructor is also
// discouraged. But the implementation was recently changed so that, in the
// absence of extendObjectLifetime() calls, weak pointers no longer impact
// object lifetime, and hence this no longer risks premature deallocation,
// and hence usually works correctly.
// Such strong or weak pointers can be safely created in the RefBase onFirstRef()
// callback.
// Use of wp::unsafe_get() for any purpose other than debugging is almost
// always wrong. Unless you somehow know that there is a longer-lived sp<> to
// the same object, it may well return a pointer to a deallocated object that
// has since been reallocated for a different purpose. (And if you know there
// is a longer-lived sp<>, why not use an sp<> directly?) A wp<> should only be
// dereferenced by using promote().
// Any object inheriting from RefBase should always be destroyed as the result
// of a reference count decrement, not via any other means. Such objects
// should never be stack allocated, or appear directly as data members in other
// objects. Objects inheriting from RefBase should have their strong reference
// count incremented as soon as possible after construction. Usually this
// will be done via construction of an sp<> to the object, but may instead
// involve other means of calling RefBase::incStrong().
// Explicitly deleting or otherwise destroying a RefBase object with outstanding
// wp<> or sp<> pointers to it will result in an abort or heap corruption.
// It is particularly important not to mix sp<> and direct storage management
// since the sp from raw pointer constructor is implicit. Thus if a RefBase-
// -derived object of type T is managed without ever incrementing its strong
// count, and accidentally passed to f(sp<T>), a strong pointer to the object
// will be temporarily constructed and destroyed, prematurely deallocating the
// object, and resulting in heap corruption. None of this would be easily
// visible in the source.
// Extra Features:
// RefBase::extendObjectLifetime() can be used to prevent destruction of the
// object while there are still weak references. This is really special purpose
// functionality to support Binder.
// Wp::promote(), implemented via the attemptIncStrong() member function, is
// used to try to convert a weak pointer back to a strong pointer. It's the
// normal way to try to access the fields of an object referenced only through
// a wp<>. Binder code also sometimes uses attemptIncStrong() directly.
// RefBase provides a number of additional callbacks for certain reference count
// events, as well as some debugging facilities.
// Debugging support can be enabled by turning on DEBUG_REFS in RefBase.cpp.
// Otherwise little checking is provided.
// Thread safety:
// Like std::shared_ptr, sp<> and wp<> allow concurrent accesses to DIFFERENT
// sp<> and wp<> instances that happen to refer to the same underlying object.
// They do NOT support concurrent access (where at least one access is a write)
// to THE SAME sp<> or wp<>. In effect, their thread-safety properties are
// exactly like those of T*, NOT atomic<T*>.
#ifndef ANDROID_REF_BASE_H
#define ANDROID_REF_BASE_H
#include <atomic>
#include <functional>
#include <type_traits> // for common_type.
#include <stdint.h>
#include <sys/types.h>
#include <stdlib.h>
#include <string.h>
// LightRefBase used to be declared in this header, so we have to include it
#include <utils/LightRefBase.h>
#include <utils/StrongPointer.h>
#include <utils/TypeHelpers.h>
// ---------------------------------------------------------------------------
namespace android {
// ---------------------------------------------------------------------------
#define COMPARE_WEAK(_op_) \
template<typename U> \
inline bool operator _op_ (const U* o) const { \
return m_ptr _op_ o; \
} \
/* Needed to handle type inference for nullptr: */ \
inline bool operator _op_ (const T* o) const { \
return m_ptr _op_ o; \
}
template<template<typename C> class comparator, typename T, typename U>
static inline bool _wp_compare_(T* a, U* b) {
return comparator<typename std::common_type<T*, U*>::type>()(a, b);
}
// Use std::less and friends to avoid undefined behavior when ordering pointers
// to different objects.
#define COMPARE_WEAK_FUNCTIONAL(_op_, _compare_) \
template<typename U> \
inline bool operator _op_ (const U* o) const { \
return _wp_compare_<_compare_>(m_ptr, o); \
}
// ---------------------------------------------------------------------------
// RefererenceRenamer is pure abstract, there is no virtual method
// implementation to put in a translation unit in order to silence the
// weak vtables warning.
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wweak-vtables"
#endif
class ReferenceRenamer {
protected:
// destructor is purposely not virtual so we avoid code overhead from
// subclasses; we have to make it protected to guarantee that it
// cannot be called from this base class (and to make strict compilers
// happy).
~ReferenceRenamer() { }
public:
virtual void operator()(size_t i) const = 0;
};
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
// ---------------------------------------------------------------------------
class RefBase
{
public:
void incStrong(const void* id) const;
void decStrong(const void* id) const;
void forceIncStrong(const void* id) const;
//! DEBUGGING ONLY: Get current strong ref count.
int32_t getStrongCount() const;
class weakref_type
{
public:
RefBase* refBase() const;
void incWeak(const void* id);
void decWeak(const void* id);
// acquires a strong reference if there is already one.
bool attemptIncStrong(const void* id);
// acquires a weak reference if there is already one.
// This is not always safe. see ProcessState.cpp and BpBinder.cpp
// for proper use.
bool attemptIncWeak(const void* id);
//! DEBUGGING ONLY: Get current weak ref count.
int32_t getWeakCount() const;
//! DEBUGGING ONLY: Print references held on object.
void printRefs() const;
//! DEBUGGING ONLY: Enable tracking for this object.
// enable -- enable/disable tracking
// retain -- when tracking is enable, if true, then we save a stack trace
// for each reference and dereference; when retain == false, we
// match up references and dereferences and keep only the
// outstanding ones.
void trackMe(bool enable, bool retain);
};
weakref_type* createWeak(const void* id) const;
weakref_type* getWeakRefs() const;
//! DEBUGGING ONLY: Print references held on object.
inline void printRefs() const { getWeakRefs()->printRefs(); }
//! DEBUGGING ONLY: Enable tracking of object.
inline void trackMe(bool enable, bool retain)
{
getWeakRefs()->trackMe(enable, retain);
}
protected:
RefBase();
virtual ~RefBase();
//! Flags for extendObjectLifetime()
enum {
OBJECT_LIFETIME_STRONG = 0x0000,
OBJECT_LIFETIME_WEAK = 0x0001,
OBJECT_LIFETIME_MASK = 0x0001
};
void extendObjectLifetime(int32_t mode);
//! Flags for onIncStrongAttempted()
enum {
FIRST_INC_STRONG = 0x0001
};
// Invoked after creation of initial strong pointer/reference.
virtual void onFirstRef();
// Invoked when either the last strong reference goes away, or we need to undo
// the effect of an unnecessary onIncStrongAttempted.
virtual void onLastStrongRef(const void* id);
// Only called in OBJECT_LIFETIME_WEAK case. Returns true if OK to promote to
// strong reference. May have side effects if it returns true.
// The first flags argument is always FIRST_INC_STRONG.
// TODO: Remove initial flag argument.
virtual bool onIncStrongAttempted(uint32_t flags, const void* id);
// Invoked in the OBJECT_LIFETIME_WEAK case when the last reference of either
// kind goes away. Unused.
// TODO: Remove.
virtual void onLastWeakRef(const void* id);
private:
friend class weakref_type;
class weakref_impl;
RefBase(const RefBase& o);
RefBase& operator=(const RefBase& o);
private:
friend class ReferenceMover;
static void renameRefs(size_t n, const ReferenceRenamer& renamer);
static void renameRefId(weakref_type* ref,
const void* old_id, const void* new_id);
static void renameRefId(RefBase* ref,
const void* old_id, const void* new_id);
weakref_impl* const mRefs;
};
// ---------------------------------------------------------------------------
template <typename T>
class wp
{
public:
typedef typename RefBase::weakref_type weakref_type;
inline wp() : m_ptr(nullptr), m_refs(nullptr) { }
wp(T* other); // NOLINT(implicit)
wp(const wp<T>& other);
explicit wp(const sp<T>& other);
template<typename U> wp(U* other); // NOLINT(implicit)
template<typename U> wp(const sp<U>& other); // NOLINT(implicit)
template<typename U> wp(const wp<U>& other); // NOLINT(implicit)
~wp();
// Assignment
wp& operator = (T* other);
wp& operator = (const wp<T>& other);
wp& operator = (const sp<T>& other);
template<typename U> wp& operator = (U* other);
template<typename U> wp& operator = (const wp<U>& other);
template<typename U> wp& operator = (const sp<U>& other);
void set_object_and_refs(T* other, weakref_type* refs);
// promotion to sp
sp<T> promote() const;
// Reset
void clear();
// Accessors
inline weakref_type* get_refs() const { return m_refs; }
inline T* unsafe_get() const { return m_ptr; }
// Operators
COMPARE_WEAK(==)
COMPARE_WEAK(!=)
COMPARE_WEAK_FUNCTIONAL(>, std::greater)
COMPARE_WEAK_FUNCTIONAL(<, std::less)
COMPARE_WEAK_FUNCTIONAL(<=, std::less_equal)
COMPARE_WEAK_FUNCTIONAL(>=, std::greater_equal)
template<typename U>
inline bool operator == (const wp<U>& o) const {
return m_refs == o.m_refs; // Implies m_ptr == o.mptr; see invariants below.
}
template<typename U>
inline bool operator == (const sp<U>& o) const {
// Just comparing m_ptr fields is often dangerous, since wp<> may refer to an older
// object at the same address.
if (o == nullptr) {
return m_ptr == nullptr;
} else {
return m_refs == o->getWeakRefs(); // Implies m_ptr == o.mptr.
}
}
template<typename U>
inline bool operator != (const sp<U>& o) const {
return !(*this == o);
}
template<typename U>
inline bool operator > (const wp<U>& o) const {
if (m_ptr == o.m_ptr) {
return _wp_compare_<std::greater>(m_refs, o.m_refs);
} else {
return _wp_compare_<std::greater>(m_ptr, o.m_ptr);
}
}
template<typename U>
inline bool operator < (const wp<U>& o) const {
if (m_ptr == o.m_ptr) {
return _wp_compare_<std::less>(m_refs, o.m_refs);
} else {
return _wp_compare_<std::less>(m_ptr, o.m_ptr);
}
}
template<typename U> inline bool operator != (const wp<U>& o) const { return !operator == (o); }
template<typename U> inline bool operator <= (const wp<U>& o) const { return !operator > (o); }
template<typename U> inline bool operator >= (const wp<U>& o) const { return !operator < (o); }
private:
template<typename Y> friend class sp;
template<typename Y> friend class wp;
T* m_ptr;
weakref_type* m_refs;
};
#undef COMPARE_WEAK
#undef COMPARE_WEAK_FUNCTIONAL
// ---------------------------------------------------------------------------
// No user serviceable parts below here.
// Implementation invariants:
// Either
// 1) m_ptr and m_refs are both null, or
// 2) m_refs == m_ptr->mRefs, or
// 3) *m_ptr is no longer live, and m_refs points to the weakref_type object that corresponded
// to m_ptr while it was live. *m_refs remains live while a wp<> refers to it.
//
// The m_refs field in a RefBase object is allocated on construction, unique to that RefBase
// object, and never changes. Thus if two wp's have identical m_refs fields, they are either both
// null or point to the same object. If two wp's have identical m_ptr fields, they either both
// point to the same live object and thus have the same m_ref fields, or at least one of the
// objects is no longer live.
//
// Note that the above comparison operations go out of their way to provide an ordering consistent
// with ordinary pointer comparison; otherwise they could ignore m_ptr, and just compare m_refs.
template<typename T>
wp<T>::wp(T* other)
: m_ptr(other)
{
m_refs = other ? m_refs = other->createWeak(this) : nullptr;
}
template<typename T>
wp<T>::wp(const wp<T>& other)
: m_ptr(other.m_ptr), m_refs(other.m_refs)
{
if (m_ptr) m_refs->incWeak(this);
}
template<typename T>
wp<T>::wp(const sp<T>& other)
: m_ptr(other.m_ptr)
{
m_refs = m_ptr ? m_ptr->createWeak(this) : nullptr;
}
template<typename T> template<typename U>
wp<T>::wp(U* other)
: m_ptr(other)
{
m_refs = other ? other->createWeak(this) : nullptr;
}
template<typename T> template<typename U>
wp<T>::wp(const wp<U>& other)
: m_ptr(other.m_ptr)
{
if (m_ptr) {
m_refs = other.m_refs;
m_refs->incWeak(this);
} else {
m_refs = nullptr;
}
}
template<typename T> template<typename U>
wp<T>::wp(const sp<U>& other)
: m_ptr(other.m_ptr)
{
m_refs = m_ptr ? m_ptr->createWeak(this) : nullptr;
}
template<typename T>
wp<T>::~wp()
{
if (m_ptr) m_refs->decWeak(this);
}
template<typename T>
wp<T>& wp<T>::operator = (T* other)
{
weakref_type* newRefs =
other ? other->createWeak(this) : nullptr;
if (m_ptr) m_refs->decWeak(this);
m_ptr = other;
m_refs = newRefs;
return *this;
}
template<typename T>
wp<T>& wp<T>::operator = (const wp<T>& other)
{
weakref_type* otherRefs(other.m_refs);
T* otherPtr(other.m_ptr);
if (otherPtr) otherRefs->incWeak(this);
if (m_ptr) m_refs->decWeak(this);
m_ptr = otherPtr;
m_refs = otherRefs;
return *this;
}
template<typename T>
wp<T>& wp<T>::operator = (const sp<T>& other)
{
weakref_type* newRefs =
other != nullptr ? other->createWeak(this) : nullptr;
T* otherPtr(other.m_ptr);
if (m_ptr) m_refs->decWeak(this);
m_ptr = otherPtr;
m_refs = newRefs;
return *this;
}
template<typename T> template<typename U>
wp<T>& wp<T>::operator = (U* other)
{
weakref_type* newRefs =
other ? other->createWeak(this) : 0;
if (m_ptr) m_refs->decWeak(this);
m_ptr = other;
m_refs = newRefs;
return *this;
}
template<typename T> template<typename U>
wp<T>& wp<T>::operator = (const wp<U>& other)
{
weakref_type* otherRefs(other.m_refs);
U* otherPtr(other.m_ptr);
if (otherPtr) otherRefs->incWeak(this);
if (m_ptr) m_refs->decWeak(this);
m_ptr = otherPtr;
m_refs = otherRefs;
return *this;
}
template<typename T> template<typename U>
wp<T>& wp<T>::operator = (const sp<U>& other)
{
weakref_type* newRefs =
other != nullptr ? other->createWeak(this) : 0;
U* otherPtr(other.m_ptr);
if (m_ptr) m_refs->decWeak(this);
m_ptr = otherPtr;
m_refs = newRefs;
return *this;
}
template<typename T>
void wp<T>::set_object_and_refs(T* other, weakref_type* refs)
{
if (other) refs->incWeak(this);
if (m_ptr) m_refs->decWeak(this);
m_ptr = other;
m_refs = refs;
}
template<typename T>
sp<T> wp<T>::promote() const
{
sp<T> result;
if (m_ptr && m_refs->attemptIncStrong(&result)) {
result.set_pointer(m_ptr);
}
return result;
}
template<typename T>
void wp<T>::clear()
{
if (m_ptr) {
m_refs->decWeak(this);
m_refs = 0;
m_ptr = 0;
}
}
// ---------------------------------------------------------------------------
// this class just serves as a namespace so TYPE::moveReferences can stay
// private.
class ReferenceMover {
public:
// it would be nice if we could make sure no extra code is generated
// for sp<TYPE> or wp<TYPE> when TYPE is a descendant of RefBase:
// Using a sp<RefBase> override doesn't work; it's a bit like we wanted
// a template<typename TYPE inherits RefBase> template...
template<typename TYPE> static inline
void move_references(sp<TYPE>* dest, sp<TYPE> const* src, size_t n) {
class Renamer : public ReferenceRenamer {
sp<TYPE>* d_;
sp<TYPE> const* s_;
virtual void operator()(size_t i) const {
// The id are known to be the sp<>'s this pointer
TYPE::renameRefId(d_[i].get(), &s_[i], &d_[i]);
}
public:
Renamer(sp<TYPE>* d, sp<TYPE> const* s) : d_(d), s_(s) { }
virtual ~Renamer() { }
};
memmove(dest, src, n*sizeof(sp<TYPE>));
TYPE::renameRefs(n, Renamer(dest, src));
}
template<typename TYPE> static inline
void move_references(wp<TYPE>* dest, wp<TYPE> const* src, size_t n) {
class Renamer : public ReferenceRenamer {
wp<TYPE>* d_;
wp<TYPE> const* s_;
virtual void operator()(size_t i) const {
// The id are known to be the wp<>'s this pointer
TYPE::renameRefId(d_[i].get_refs(), &s_[i], &d_[i]);
}
public:
Renamer(wp<TYPE>* rd, wp<TYPE> const* rs) : d_(rd), s_(rs) { }
virtual ~Renamer() { }
};
memmove(dest, src, n*sizeof(wp<TYPE>));
TYPE::renameRefs(n, Renamer(dest, src));
}
};
// specialization for moving sp<> and wp<> types.
// these are used by the [Sorted|Keyed]Vector<> implementations
// sp<> and wp<> need to be handled specially, because they do not
// have trivial copy operation in the general case (see RefBase.cpp
// when DEBUG ops are enabled), but can be implemented very
// efficiently in most cases.
template<typename TYPE> inline
void move_forward_type(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
ReferenceMover::move_references(d, s, n);
}
template<typename TYPE> inline
void move_backward_type(sp<TYPE>* d, sp<TYPE> const* s, size_t n) {
ReferenceMover::move_references(d, s, n);
}
template<typename TYPE> inline
void move_forward_type(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
ReferenceMover::move_references(d, s, n);
}
template<typename TYPE> inline
void move_backward_type(wp<TYPE>* d, wp<TYPE> const* s, size_t n) {
ReferenceMover::move_references(d, s, n);
}
} // namespace android
// ---------------------------------------------------------------------------
#endif // ANDROID_REF_BASE_H

@ -0,0 +1,317 @@
/*
* Copyright (C) 2005 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_STRONG_POINTER_H
#define ANDROID_STRONG_POINTER_H
#include <functional>
#include <type_traits> // for common_type.
// ---------------------------------------------------------------------------
namespace android {
template<typename T> class wp;
// ---------------------------------------------------------------------------
template<typename T>
class sp {
public:
inline sp() : m_ptr(nullptr) { }
sp(T* other); // NOLINT(implicit)
sp(const sp<T>& other);
sp(sp<T>&& other) noexcept;
template<typename U> sp(U* other); // NOLINT(implicit)
template<typename U> sp(const sp<U>& other); // NOLINT(implicit)
template<typename U> sp(sp<U>&& other); // NOLINT(implicit)
~sp();
// Assignment
sp& operator = (T* other);
sp& operator = (const sp<T>& other);
sp& operator=(sp<T>&& other) noexcept;
template<typename U> sp& operator = (const sp<U>& other);
template<typename U> sp& operator = (sp<U>&& other);
template<typename U> sp& operator = (U* other);
//! Special optimization for use by ProcessState (and nobody else).
void force_set(T* other);
// Reset
void clear();
// Accessors
inline T& operator* () const { return *m_ptr; }
inline T* operator-> () const { return m_ptr; }
inline T* get() const { return m_ptr; }
inline explicit operator bool () const { return m_ptr != nullptr; }
// Punt these to the wp<> implementation.
template<typename U>
inline bool operator == (const wp<U>& o) const {
return o == *this;
}
template<typename U>
inline bool operator != (const wp<U>& o) const {
return o != *this;
}
private:
template<typename Y> friend class sp;
template<typename Y> friend class wp;
void set_pointer(T* ptr);
static inline void check_not_on_stack(const void* ptr);
T* m_ptr;
};
#define COMPARE_STRONG(_op_) \
template <typename T, typename U> \
static inline bool operator _op_(const sp<T>& t, const sp<U>& u) { \
return t.get() _op_ u.get(); \
} \
template <typename T, typename U> \
static inline bool operator _op_(const T* t, const sp<U>& u) { \
return t _op_ u.get(); \
} \
template <typename T, typename U> \
static inline bool operator _op_(const sp<T>& t, const U* u) { \
return t.get() _op_ u; \
} \
template <typename T> \
static inline bool operator _op_(const sp<T>& t, std::nullptr_t) { \
return t.get() _op_ nullptr; \
} \
template <typename T> \
static inline bool operator _op_(std::nullptr_t, const sp<T>& t) { \
return nullptr _op_ t.get(); \
}
template <template <typename C> class comparator, typename T, typename U>
static inline bool _sp_compare_(T* a, U* b) {
return comparator<typename std::common_type<T*, U*>::type>()(a, b);
}
#define COMPARE_STRONG_FUNCTIONAL(_op_, _compare_) \
template <typename T, typename U> \
static inline bool operator _op_(const sp<T>& t, const sp<U>& u) { \
return _sp_compare_<_compare_>(t.get(), u.get()); \
} \
template <typename T, typename U> \
static inline bool operator _op_(const T* t, const sp<U>& u) { \
return _sp_compare_<_compare_>(t, u.get()); \
} \
template <typename T, typename U> \
static inline bool operator _op_(const sp<T>& t, const U* u) { \
return _sp_compare_<_compare_>(t.get(), u); \
} \
template <typename T> \
static inline bool operator _op_(const sp<T>& t, std::nullptr_t) { \
return _sp_compare_<_compare_>(t.get(), nullptr); \
} \
template <typename T> \
static inline bool operator _op_(std::nullptr_t, const sp<T>& t) { \
return _sp_compare_<_compare_>(nullptr, t.get()); \
}
COMPARE_STRONG(==)
COMPARE_STRONG(!=)
COMPARE_STRONG_FUNCTIONAL(>, std::greater)
COMPARE_STRONG_FUNCTIONAL(<, std::less)
COMPARE_STRONG_FUNCTIONAL(<=, std::less_equal)
COMPARE_STRONG_FUNCTIONAL(>=, std::greater_equal)
#undef COMPARE_STRONG
#undef COMPARE_STRONG_FUNCTIONAL
// For code size reasons, we do not want these inlined or templated.
void sp_report_race();
void sp_report_stack_pointer();
// ---------------------------------------------------------------------------
// No user serviceable parts below here.
// Check whether address is definitely on the calling stack. We actually check whether it is on
// the same 4K page as the frame pointer.
//
// Assumptions:
// - Pages are never smaller than 4K (MIN_PAGE_SIZE)
// - Malloced memory never shares a page with a stack.
//
// It does not appear safe to broaden this check to include adjacent pages; apparently this code
// is used in environments where there may not be a guard page below (at higher addresses than)
// the bottom of the stack.
//
// TODO: Consider adding make_sp<T>() to allocate an object and wrap the resulting pointer safely
// without checking overhead.
template <typename T>
void sp<T>::check_not_on_stack(const void* ptr) {
static constexpr int MIN_PAGE_SIZE = 0x1000; // 4K. Safer than including sys/user.h.
static constexpr uintptr_t MIN_PAGE_MASK = ~static_cast<uintptr_t>(MIN_PAGE_SIZE - 1);
uintptr_t my_frame_address =
reinterpret_cast<uintptr_t>(__builtin_frame_address(0 /* this frame */));
if (((reinterpret_cast<uintptr_t>(ptr) ^ my_frame_address) & MIN_PAGE_MASK) == 0) {
sp_report_stack_pointer();
}
}
template<typename T>
sp<T>::sp(T* other)
: m_ptr(other) {
if (other) {
check_not_on_stack(other);
other->incStrong(this);
}
}
template<typename T>
sp<T>::sp(const sp<T>& other)
: m_ptr(other.m_ptr) {
if (m_ptr)
m_ptr->incStrong(this);
}
template <typename T>
sp<T>::sp(sp<T>&& other) noexcept : m_ptr(other.m_ptr) {
other.m_ptr = nullptr;
}
template<typename T> template<typename U>
sp<T>::sp(U* other)
: m_ptr(other) {
if (other) {
check_not_on_stack(other);
(static_cast<T*>(other))->incStrong(this);
}
}
template<typename T> template<typename U>
sp<T>::sp(const sp<U>& other)
: m_ptr(other.m_ptr) {
if (m_ptr)
m_ptr->incStrong(this);
}
template<typename T> template<typename U>
sp<T>::sp(sp<U>&& other)
: m_ptr(other.m_ptr) {
other.m_ptr = nullptr;
}
template<typename T>
sp<T>::~sp() {
if (m_ptr)
m_ptr->decStrong(this);
}
template<typename T>
sp<T>& sp<T>::operator =(const sp<T>& other) {
// Force m_ptr to be read twice, to heuristically check for data races.
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
T* otherPtr(other.m_ptr);
if (otherPtr) otherPtr->incStrong(this);
if (oldPtr) oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = otherPtr;
return *this;
}
template <typename T>
sp<T>& sp<T>::operator=(sp<T>&& other) noexcept {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
if (oldPtr) oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = other.m_ptr;
other.m_ptr = nullptr;
return *this;
}
template<typename T>
sp<T>& sp<T>::operator =(T* other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
if (other) {
check_not_on_stack(other);
other->incStrong(this);
}
if (oldPtr) oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = other;
return *this;
}
template<typename T> template<typename U>
sp<T>& sp<T>::operator =(const sp<U>& other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
T* otherPtr(other.m_ptr);
if (otherPtr) otherPtr->incStrong(this);
if (oldPtr) oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = otherPtr;
return *this;
}
template<typename T> template<typename U>
sp<T>& sp<T>::operator =(sp<U>&& other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
if (m_ptr) m_ptr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = other.m_ptr;
other.m_ptr = nullptr;
return *this;
}
template<typename T> template<typename U>
sp<T>& sp<T>::operator =(U* other) {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
if (other) (static_cast<T*>(other))->incStrong(this);
if (oldPtr) oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = other;
return *this;
}
template<typename T>
void sp<T>::force_set(T* other) {
other->forceIncStrong(this);
m_ptr = other;
}
template<typename T>
void sp<T>::clear() {
T* oldPtr(*const_cast<T* volatile*>(&m_ptr));
if (oldPtr) {
oldPtr->decStrong(this);
if (oldPtr != *const_cast<T* volatile*>(&m_ptr)) sp_report_race();
m_ptr = nullptr;
}
}
template<typename T>
void sp<T>::set_pointer(T* ptr) {
m_ptr = ptr;
}
} // namespace android
// ---------------------------------------------------------------------------
#endif // ANDROID_STRONG_POINTER_H

@ -0,0 +1,336 @@
/*
* Copyright (C) 2005 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_TYPE_HELPERS_H
#define ANDROID_TYPE_HELPERS_H
#include <new>
#include <type_traits>
#include <stdint.h>
#include <string.h>
#include <sys/types.h>
// ---------------------------------------------------------------------------
namespace android {
/*
* Types traits
*/
template <typename T> struct trait_trivial_ctor { enum { value = false }; };
template <typename T> struct trait_trivial_dtor { enum { value = false }; };
template <typename T> struct trait_trivial_copy { enum { value = false }; };
template <typename T> struct trait_trivial_move { enum { value = false }; };
template <typename T> struct trait_pointer { enum { value = false }; };
template <typename T> struct trait_pointer<T*> { enum { value = true }; };
template <typename TYPE>
struct traits {
enum {
// whether this type is a pointer
is_pointer = trait_pointer<TYPE>::value,
// whether this type's constructor is a no-op
has_trivial_ctor = is_pointer || trait_trivial_ctor<TYPE>::value,
// whether this type's destructor is a no-op
has_trivial_dtor = is_pointer || trait_trivial_dtor<TYPE>::value,
// whether this type type can be copy-constructed with memcpy
has_trivial_copy = is_pointer || trait_trivial_copy<TYPE>::value,
// whether this type can be moved with memmove
has_trivial_move = is_pointer || trait_trivial_move<TYPE>::value
};
};
template <typename T, typename U>
struct aggregate_traits {
enum {
is_pointer = false,
has_trivial_ctor =
traits<T>::has_trivial_ctor && traits<U>::has_trivial_ctor,
has_trivial_dtor =
traits<T>::has_trivial_dtor && traits<U>::has_trivial_dtor,
has_trivial_copy =
traits<T>::has_trivial_copy && traits<U>::has_trivial_copy,
has_trivial_move =
traits<T>::has_trivial_move && traits<U>::has_trivial_move
};
};
#define ANDROID_TRIVIAL_CTOR_TRAIT( T ) \
template<> struct trait_trivial_ctor< T > { enum { value = true }; };
#define ANDROID_TRIVIAL_DTOR_TRAIT( T ) \
template<> struct trait_trivial_dtor< T > { enum { value = true }; };
#define ANDROID_TRIVIAL_COPY_TRAIT( T ) \
template<> struct trait_trivial_copy< T > { enum { value = true }; };
#define ANDROID_TRIVIAL_MOVE_TRAIT( T ) \
template<> struct trait_trivial_move< T > { enum { value = true }; };
#define ANDROID_BASIC_TYPES_TRAITS( T ) \
ANDROID_TRIVIAL_CTOR_TRAIT( T ) \
ANDROID_TRIVIAL_DTOR_TRAIT( T ) \
ANDROID_TRIVIAL_COPY_TRAIT( T ) \
ANDROID_TRIVIAL_MOVE_TRAIT( T )
// ---------------------------------------------------------------------------
/*
* basic types traits
*/
ANDROID_BASIC_TYPES_TRAITS( void )
ANDROID_BASIC_TYPES_TRAITS( bool )
ANDROID_BASIC_TYPES_TRAITS( char )
ANDROID_BASIC_TYPES_TRAITS( unsigned char )
ANDROID_BASIC_TYPES_TRAITS( short )
ANDROID_BASIC_TYPES_TRAITS( unsigned short )
ANDROID_BASIC_TYPES_TRAITS( int )
ANDROID_BASIC_TYPES_TRAITS( unsigned int )
ANDROID_BASIC_TYPES_TRAITS( long )
ANDROID_BASIC_TYPES_TRAITS( unsigned long )
ANDROID_BASIC_TYPES_TRAITS( long long )
ANDROID_BASIC_TYPES_TRAITS( unsigned long long )
ANDROID_BASIC_TYPES_TRAITS( float )
ANDROID_BASIC_TYPES_TRAITS( double )
// ---------------------------------------------------------------------------
/*
* compare and order types
*/
template<typename TYPE> inline
int strictly_order_type(const TYPE& lhs, const TYPE& rhs) {
return (lhs < rhs) ? 1 : 0;
}
template<typename TYPE> inline
int compare_type(const TYPE& lhs, const TYPE& rhs) {
return strictly_order_type(rhs, lhs) - strictly_order_type(lhs, rhs);
}
/*
* create, destroy, copy and move types...
*/
template<typename TYPE> inline
void construct_type(TYPE* p, size_t n) {
if (!traits<TYPE>::has_trivial_ctor) {
while (n > 0) {
n--;
new(p++) TYPE;
}
}
}
template<typename TYPE> inline
void destroy_type(TYPE* p, size_t n) {
if (!traits<TYPE>::has_trivial_dtor) {
while (n > 0) {
n--;
p->~TYPE();
p++;
}
}
}
template<typename TYPE>
typename std::enable_if<traits<TYPE>::has_trivial_copy>::type
inline
copy_type(TYPE* d, const TYPE* s, size_t n) {
memcpy(d,s,n*sizeof(TYPE));
}
template<typename TYPE>
typename std::enable_if<!traits<TYPE>::has_trivial_copy>::type
inline
copy_type(TYPE* d, const TYPE* s, size_t n) {
while (n > 0) {
n--;
new(d) TYPE(*s);
d++, s++;
}
}
template<typename TYPE> inline
void splat_type(TYPE* where, const TYPE* what, size_t n) {
if (!traits<TYPE>::has_trivial_copy) {
while (n > 0) {
n--;
new(where) TYPE(*what);
where++;
}
} else {
while (n > 0) {
n--;
*where++ = *what;
}
}
}
template<typename TYPE>
struct use_trivial_move : public std::integral_constant<bool,
(traits<TYPE>::has_trivial_dtor && traits<TYPE>::has_trivial_copy)
|| traits<TYPE>::has_trivial_move
> {};
template<typename TYPE>
typename std::enable_if<use_trivial_move<TYPE>::value>::type
inline
move_forward_type(TYPE* d, const TYPE* s, size_t n = 1) {
memmove(d, s, n*sizeof(TYPE));
}
template<typename TYPE>
typename std::enable_if<!use_trivial_move<TYPE>::value>::type
inline
move_forward_type(TYPE* d, const TYPE* s, size_t n = 1) {
d += n;
s += n;
while (n > 0) {
n--;
--d, --s;
if (!traits<TYPE>::has_trivial_copy) {
new(d) TYPE(*s);
} else {
*d = *s;
}
if (!traits<TYPE>::has_trivial_dtor) {
s->~TYPE();
}
}
}
template<typename TYPE>
typename std::enable_if<use_trivial_move<TYPE>::value>::type
inline
move_backward_type(TYPE* d, const TYPE* s, size_t n = 1) {
memmove(d, s, n*sizeof(TYPE));
}
template<typename TYPE>
typename std::enable_if<!use_trivial_move<TYPE>::value>::type
inline
move_backward_type(TYPE* d, const TYPE* s, size_t n = 1) {
while (n > 0) {
n--;
if (!traits<TYPE>::has_trivial_copy) {
new(d) TYPE(*s);
} else {
*d = *s;
}
if (!traits<TYPE>::has_trivial_dtor) {
s->~TYPE();
}
d++, s++;
}
}
// ---------------------------------------------------------------------------
/*
* a key/value pair
*/
template <typename KEY, typename VALUE>
struct key_value_pair_t {
typedef KEY key_t;
typedef VALUE value_t;
KEY key;
VALUE value;
key_value_pair_t() { }
key_value_pair_t(const key_value_pair_t& o) : key(o.key), value(o.value) { }
key_value_pair_t& operator=(const key_value_pair_t& o) {
key = o.key;
value = o.value;
return *this;
}
key_value_pair_t(const KEY& k, const VALUE& v) : key(k), value(v) { }
explicit key_value_pair_t(const KEY& k) : key(k) { }
inline bool operator < (const key_value_pair_t& o) const {
return strictly_order_type(key, o.key);
}
inline const KEY& getKey() const {
return key;
}
inline const VALUE& getValue() const {
return value;
}
};
template <typename K, typename V>
struct trait_trivial_ctor< key_value_pair_t<K, V> >
{ enum { value = aggregate_traits<K,V>::has_trivial_ctor }; };
template <typename K, typename V>
struct trait_trivial_dtor< key_value_pair_t<K, V> >
{ enum { value = aggregate_traits<K,V>::has_trivial_dtor }; };
template <typename K, typename V>
struct trait_trivial_copy< key_value_pair_t<K, V> >
{ enum { value = aggregate_traits<K,V>::has_trivial_copy }; };
template <typename K, typename V>
struct trait_trivial_move< key_value_pair_t<K, V> >
{ enum { value = aggregate_traits<K,V>::has_trivial_move }; };
// ---------------------------------------------------------------------------
/*
* Hash codes.
*/
typedef uint32_t hash_t;
template <typename TKey>
hash_t hash_type(const TKey& key);
/* Built-in hash code specializations */
#define ANDROID_INT32_HASH(T) \
template <> inline hash_t hash_type(const T& value) { return hash_t(value); }
#define ANDROID_INT64_HASH(T) \
template <> inline hash_t hash_type(const T& value) { \
return hash_t((value >> 32) ^ value); }
#define ANDROID_REINTERPRET_HASH(T, R) \
template <> inline hash_t hash_type(const T& value) { \
R newValue; \
static_assert(sizeof(newValue) == sizeof(value), "size mismatch"); \
memcpy(&newValue, &value, sizeof(newValue)); \
return hash_type(newValue); \
}
ANDROID_INT32_HASH(bool)
ANDROID_INT32_HASH(int8_t)
ANDROID_INT32_HASH(uint8_t)
ANDROID_INT32_HASH(int16_t)
ANDROID_INT32_HASH(uint16_t)
ANDROID_INT32_HASH(int32_t)
ANDROID_INT32_HASH(uint32_t)
ANDROID_INT64_HASH(int64_t)
ANDROID_INT64_HASH(uint64_t)
ANDROID_REINTERPRET_HASH(float, uint32_t)
ANDROID_REINTERPRET_HASH(double, uint64_t)
template <typename T> inline hash_t hash_type(T* const & value) {
return hash_type(uintptr_t(value));
}
} // namespace android
// ---------------------------------------------------------------------------
#endif // ANDROID_TYPE_HELPERS_H

@ -221,6 +221,48 @@ bool YoloV5Ncnn_Init(const std::string& paramFile, const std::string& binFile)
return true;
}
bool YoloV5Ncnn_Init(ncnn::Net& net, const std::string& paramFile, const std::string& binFile)
{
ncnn::Option opt;
opt.lightmode = true;
opt.num_threads = 4;
opt.blob_allocator = &g_blob_pool_allocator;
opt.workspace_allocator = &g_workspace_pool_allocator;
opt.use_packing_layout = true;
// use vulkan compute
if (ncnn::get_gpu_count() != 0)
opt.use_vulkan_compute = true;
// AAssetManager* mgr = AAssetManager_fromJava(env, assetManager);
net.opt = opt;
net.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator);
// init param
{
int ret = net.load_param(paramFile.c_str());
if (ret != 0)
{
// __android_log_print(ANDROID_LOG_DEBUG, "YoloV5Ncnn", "load_param failed");
return false;
}
}
// init bin
{
int ret = net.load_model(binFile.c_str());
if (ret != 0)
{
// __android_log_print(ANDROID_LOG_DEBUG, "YoloV5Ncnn", "load_model failed");
return false;
}
}
return true;
}
// public native Obj[] Detect(Bitmap bitmap, boolean use_gpu);
bool YoloV5NcnnDetect( ncnn::Mat& mat, bool use_gpu, std::vector<IDevice::RECOG_OBJECT>& objects)
{
@ -601,3 +643,159 @@ bool YoloV5NcnnDetect( cv::Mat& mat, bool use_gpu, const std::string& blobName8,
return true;
}
bool YoloV5NcnnDetect( ncnn::Net& net, cv::Mat& mat, bool use_gpu, const std::string& blobName8, const std::string& blobName16, const std::string& blobName32, std::vector<IDevice::RECOG_OBJECT>& objects)
{
if (use_gpu && ncnn::get_gpu_count() == 0)
{
return false;
//return env->NewStringUTF("no vulkan capable gpu");
}
// AndroidBitmapInfo info;
// AndroidBitmap_getInfo(env, bitmap, &info);
const int width = mat.cols;
const int height = mat.rows;
// if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888)
// return NULL;
// ncnn from bitmap
const int target_size = 640;
// letterbox pad to multiple of 32
int w = width;
int h = height;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}
ncnn::Mat in = ncnn::Mat::from_pixels_resize(mat.data, ncnn::Mat::PIXEL_BGR2RGB, mat.cols, mat.rows, w, h);
// pad to target_size rectangle
// yolov5/utils/datasets.py letterbox
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);
// yolov5
{
const float prob_threshold = 0.25f;
const float nms_threshold = 0.45f;
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);
ncnn::Extractor ex = net.create_extractor();
ex.set_vulkan_compute(use_gpu);
ex.input("images", in_pad);
std::vector<IDevice::RECOG_OBJECT> proposals;
// anchor setting from yolov5/models/yolov5s.yaml
// stride 8
{
ncnn::Mat out;
ex.extract(blobName8.c_str(), out);
ncnn::Mat anchors(6);
anchors[0] = 10.f;
anchors[1] = 13.f;
anchors[2] = 16.f;
anchors[3] = 30.f;
anchors[4] = 33.f;
anchors[5] = 23.f;
std::vector<IDevice::RECOG_OBJECT> objects8;
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);
proposals.insert(proposals.end(), objects8.begin(), objects8.end());
}
// stride 16
{
ncnn::Mat out;
ex.extract(blobName16.c_str(), out);
ncnn::Mat anchors(6);
anchors[0] = 30.f;
anchors[1] = 61.f;
anchors[2] = 62.f;
anchors[3] = 45.f;
anchors[4] = 59.f;
anchors[5] = 119.f;
std::vector<IDevice::RECOG_OBJECT> objects16;
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);
proposals.insert(proposals.end(), objects16.begin(), objects16.end());
}
// stride 32
{
ncnn::Mat out;
ex.extract(blobName32.c_str(), out);
ncnn::Mat anchors(6);
anchors[0] = 116.f;
anchors[1] = 90.f;
anchors[2] = 156.f;
anchors[3] = 198.f;
anchors[4] = 373.f;
anchors[5] = 326.f;
std::vector<IDevice::RECOG_OBJECT> objects32;
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);
proposals.insert(proposals.end(), objects32.begin(), objects32.end());
}
// sort all proposals by score from highest to lowest
qsort_descent_inplace(proposals);
// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, nms_threshold);
int count = picked.size();
objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];
// adjust offset to original unpadded
float x0 = (objects[i].x - (wpad / 2)) / scale;
float y0 = (objects[i].y - (hpad / 2)) / scale;
float x1 = (objects[i].x + objects[i].w - (wpad / 2)) / scale;
float y1 = (objects[i].y + objects[i].h - (hpad / 2)) / scale;
// clip
x0 = std::max(std::min(x0, (float)(width - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(height - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(width - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(height - 1)), 0.f);
objects[i].x = x0;
objects[i].y = y0;
objects[i].w = x1 - x0;
objects[i].h = y1 - y0;
}
}
return true;
}

@ -107,6 +107,9 @@ inline void ncnn_uninit()
// public native boolean Init(AssetManager mgr);
bool YoloV5Ncnn_Init(const std::string& paramFile, const std::string& binFile);
bool YoloV5Ncnn_Init(ncnn::Net& net, const std::string& paramFile, const std::string& binFile);
// public native Obj[] Detect(Bitmap bitmap, boolean use_gpu);
bool YoloV5NcnnDetect( ncnn::Mat& mat, bool use_gpu, std::vector<IDevice::RECOG_OBJECT>& objects);
bool YoloV5NcnnDetect( cv::Mat& mat, bool use_gpu, const std::string& blobName8, const std::string& blobName16, const std::string& blobName32, std::vector<IDevice::RECOG_OBJECT>& objects);
bool YoloV5NcnnDetect( cv::Mat& mat, bool use_gpu, const std::string& blobName8, const std::string& blobName16, const std::string& blobName32, std::vector<IDevice::RECOG_OBJECT>& objects);
bool YoloV5NcnnDetect( ncnn::Net& net, cv::Mat& mat, bool use_gpu, const std::string& blobName8, const std::string& blobName16, const std::string& blobName32, std::vector<IDevice::RECOG_OBJECT>& objects);

@ -142,11 +142,6 @@ static void set12VEnable(bool z) {
setInt(CMD_SET_12V_EN_STATE, z ? 1 : 0);
}
static void setCam3V3Enable(bool enabled)
{
setInt(CMD_SET_CAM_3V3_EN_STATE, enabled ? 1 : 0);
}
/*********************************************************************************
* *

@ -25,11 +25,13 @@ public class BridgeActivity extends AppCompatActivity {
private final static String TAG = "MPLOG";
private final static String ACTION_IMP_PUBKEY = "imp_pubkey";
private final static String ACTION_IMP_PRIKEY = "imp_prikey";
private final static String ACTION_GEN_KEYS = "gen_keys";
private final static String ACTION_CERT_REQ = "cert_req";
private final static String ACTION_BATTERY_VOLTAGE = "query_bv";
private final static String ACTION_RECORDING = "recording";
private final static String ACTION_TAKE_PHOTO = "take_photo";
private final static String ACTION_QUERY_SEC_VERSION = "query_sec_v";
private final static int REQUEST_CODE_RECORDING = Camera2VideoActivity.REQUEST_CODE_RECORDING;
@ -49,25 +51,68 @@ public class BridgeActivity extends AppCompatActivity {
Intent intent = getIntent();
final String action = intent.getStringExtra("action");
if (!TextUtils.isEmpty(action)) {
if (TextUtils.equals(action, ACTION_IMP_PUBKEY)) {
if (TextUtils.equals(action, ACTION_QUERY_SEC_VERSION)) {
String resultFile = intent.getStringExtra("resultFile");
String version = MicroPhotoService.querySecVersion();
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, version);
}
} else if (TextUtils.equals(action, ACTION_IMP_PUBKEY)) {
String cert = intent.getStringExtra("cert");
String path = intent.getStringExtra("path");
int index = intent.getIntExtra("index", 1);
String resultFile = intent.getStringExtra("resultFile");
int index = intent.getIntExtra("index", 0);
boolean res = false;
if (!TextUtils.isEmpty(cert)) {
// Import
// String cert = intent.getStringExtra("md5");
byte[] content = Base64.decode(cert, Base64.DEFAULT);
if (content != null) {
MicroPhotoService.importPublicKey(index, content);
res = MicroPhotoService.importPublicKey(index, content);
}
} else if (TextUtils.isEmpty(path)) {
String md5 = intent.getStringExtra("md5");
File file = new File(path);
if (file.exists() && file.isFile()) {
res = MicroPhotoService.importPublicKeyFile(index, path, md5);
}
}
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, res ? "1" : "0");
}
} else if (TextUtils.equals(action, ACTION_IMP_PRIKEY)) {
String cert = intent.getStringExtra("cert");
String path = intent.getStringExtra("path");
String resultFile = intent.getStringExtra("resultFile");
int index = intent.getIntExtra("index", 0);
boolean res = false;
if (!TextUtils.isEmpty(cert)) {
// Import
byte[] content = Base64.decode(cert, Base64.DEFAULT);
if (content != null) {
res = MicroPhotoService.importPrivateKey(index, content);
}
} else if (TextUtils.isEmpty(path)) {
String md5 = intent.getStringExtra("md5");
File file = new File(path);
if (file.exists() && file.isFile()) {
MicroPhotoService.importPublicKeyFile(index, path, md5);
res = MicroPhotoService.importPrivateKeyFile(index, path, md5);
}
}
Log.i(TAG, "Import Private Key result=" + (res ? "1" : "0"));
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, res ? "1" : "0");
}
} else if (TextUtils.equals(action, ACTION_GEN_KEYS)) {
int index = intent.getIntExtra("index", 0);
boolean res = MicroPhotoService.genKeys(index);
@ -184,13 +229,13 @@ public class BridgeActivity extends AppCompatActivity {
}
Intent recordingIntent = MicroPhotoService.makeRecordingIntent(getApplicationContext(),
cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd);
false, cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd, null);
mVideoFilePath = path;
mAutoClose = false;
recordingIntent.putExtra("ActivityResult", true);
recordingIntent.putExtra("resultType", 1);
startActivityForResult(recordingIntent, REQUEST_CODE_RECORDING);
}
}

@ -0,0 +1,419 @@
package com.xypower.mpapp;
import android.content.ContentProvider;
import android.content.ContentValues;
import android.content.Context;
import android.content.Intent;
import android.content.UriMatcher;
import android.database.Cursor;
import android.database.MatrixCursor;
import android.net.Uri;
import android.text.TextUtils;
import android.util.Base64;
import android.util.Log;
import com.xypower.common.FilesUtils;
import com.xypower.common.JSONUtils;
import com.xypower.common.MicroPhotoContext;
import org.json.JSONObject;
import java.io.File;
public class BridgeProvider extends ContentProvider {
private final static String TAG = "MPLOG";
private final static String AUTHORITY = "com.xypower.mpapp.provider";
private final static String PATH_QUERY_SEC_VERSION = "/querySecVersion";
private final static String PATH_QUERY_BATTERY_VOLTAGE = "/queryBatVol";
private final static String PATH_IMP_PRI_KEY = "/importPriKey";
private final static String PATH_IMP_PUB_KEY = "/importPubKey";
private final static String PATH_GEN_KEYS = "/genKeys";
private final static String PATH_GEN_CERT_REQ = "/genCertReq";
private final static String PATH_TAKE_PHOTO = "/takePhoto";
private final static String PATH_TAKE_VIDEO = "/takeVideo";
private final static String PATH_RECOG_PIC = "/recogPic";
public BridgeProvider() {
Log.i(TAG, "BridgeProvider");
}
@Override
public int delete(Uri uri, String selection, String[] selectionArgs) {
// Implement this to handle requests to delete one or more rows.
throw new UnsupportedOperationException("Not yet implemented");
}
@Override
public String getType(Uri uri) {
// TODO: Implement this to handle requests for the MIME type of the data
// at the given URI.
throw new UnsupportedOperationException("Not yet implemented");
}
@Override
public Uri insert(Uri uri, ContentValues values) {
return null;
}
@Override
public boolean onCreate() {
// TODO: Implement this to initialize your content provider on startup.
return true;
}
@Override
public Cursor query(Uri uri, String[] projection, String selection,
String[] selectionArgs, String sortOrder) {
UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH);
Log.i(TAG, uri.toString());
matcher.addURI(AUTHORITY, PATH_QUERY_SEC_VERSION, 1);
matcher.addURI(AUTHORITY, PATH_QUERY_BATTERY_VOLTAGE, 2);
matcher.addURI(AUTHORITY, PATH_RECOG_PIC, 3);
Cursor cursor = null;
int matched = matcher.match(uri);
switch (matched) {
case 1:
cursor = querySecVersion();
break;
case 2:
cursor = queryBattaryVoltage();
break;
case 3:
cursor = recoganizePicture(uri, selection, selectionArgs);
break;
default:
break;
}
return cursor;
}
@Override
public int update(Uri uri, ContentValues values, String selection,
String[] selectionArgs) {
UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH);
matcher.addURI(AUTHORITY, PATH_IMP_PRI_KEY, 1);
matcher.addURI(AUTHORITY, PATH_IMP_PUB_KEY, 2);
matcher.addURI(AUTHORITY, PATH_GEN_KEYS, 3);
matcher.addURI(AUTHORITY, PATH_GEN_CERT_REQ, 4);
matcher.addURI(AUTHORITY, PATH_TAKE_PHOTO, 5);
matcher.addURI(AUTHORITY, PATH_TAKE_VIDEO, 6);
int res = 0;
int matched = matcher.match(uri);
switch (matched) {
case 1:
res = importPrivateKey(uri, values);
break;
case 2:
res = importPublicKey(uri, values);
break;
case 3:
res = genKeys(uri, values);
break;
case 4:
res = genCertReq(uri, values);
break;
case 5:
res = takePhoto(uri, values);
break;
case 6:
res = takeVideo(uri, values);
break;
default:
break;
}
return res;
}
private Cursor querySecVersion() {
String version = MicroPhotoService.querySecVersion();
String[] columns = { "version" };
MatrixCursor matrixCursor = new MatrixCursor(columns, 1);
matrixCursor.addRow(new Object[] { version });
return matrixCursor;
}
private Cursor queryBattaryVoltage() {
// #define CMD_GET_CHARGING_BUS_VOLTAGE_STATE 112
// #define CMD_GET_BAT_VOL_STATE 115
// #define CMD_GET_BAT_BUS_VOLTAGE_STATE 117
int bv = MicroPhotoService.getGpioInt(117);
int bcv = MicroPhotoService.getGpioInt(112);
String[] columns = { "bv", "bcv" };
MatrixCursor matrixCursor = new MatrixCursor(columns, 1);
matrixCursor.addRow(new Object[] { Integer.valueOf(bv), Integer.valueOf(bcv) });
return matrixCursor;
}
private Cursor recoganizePicture(Uri uri, String selection, String[] selectionArgs) {
String decodedSelection = stringFromBase64(selection);
String paramPath = null;
String binPath = null;
String blobName8 = null;
String blobName16 = null;
String blobName32 = null;
String path = null;
if (!TextUtils.isEmpty(decodedSelection)) {
Uri u = Uri.parse("http://a.com/?" + decodedSelection);
paramPath = u.getQueryParameter("param");
binPath = u.getQueryParameter("bin");
blobName8 = u.getQueryParameter("b8");
blobName16 = u.getQueryParameter("b16");
blobName32 = u.getQueryParameter("b32");
path = u.getQueryParameter("path");
}
if (TextUtils.isEmpty(paramPath) || TextUtils.isEmpty(binPath) || TextUtils.isEmpty(blobName8) ||
TextUtils.isEmpty(blobName16) || TextUtils.isEmpty(blobName32) || TextUtils.isEmpty(path)) {
return null;
}
int[] data = MicroPhotoService.recoganizePicture(paramPath, binPath, blobName8, blobName16, blobName32, path);
if (data == null || data.length == 0) {
return null;
}
int rows = data.length / 6;
if (rows == 0) {
return null;
}
String[] columns = { "x", "y", "w", "h", "label", "prob" };
MatrixCursor matrixCursor = new MatrixCursor(columns, rows);
int idx = 0;
for (int row = 0; row < rows; row++) {
matrixCursor.addRow(new Object[] { Integer.valueOf(data[idx]), Integer.valueOf(data[idx + 1]), Integer.valueOf(data[idx + 2]),
Integer.valueOf(data[idx + 3]), Integer.valueOf(data[idx + 4]), Integer.valueOf(data[idx + 5])});
idx += 6;
}
return matrixCursor;
}
private int importPrivateKey(Uri uri, ContentValues values) {
String cert = values.containsKey("cert") ? values.getAsString("cert") : null;
String path = values.containsKey("path") ? values.getAsString("path") : null;
String resultFile = values.containsKey("resultFile") ? values.getAsString("resultFile") : null;
int index = values.containsKey("index") ? values.getAsInteger("index").intValue() : 0;
Log.i(TAG, "Start import private key");
boolean res = false;
if (!TextUtils.isEmpty(cert)) {
// Import
byte[] content = Base64.decode(cert, Base64.DEFAULT);
if (content != null) {
res = MicroPhotoService.importPrivateKey(index, content);
}
} else if (TextUtils.isEmpty(path)) {
String md5 = values.containsKey("md5") ? values.getAsString("md5") : null;
File file = new File(path);
if (file.exists() && file.isFile()) {
res = MicroPhotoService.importPrivateKeyFile(index, path, md5);
}
}
Log.i(TAG, "Finish import private key");
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, res ? "1" : "0");
}
return res ? 1 : 0;
}
private int importPublicKey(Uri uri, ContentValues values) {
String cert = values.containsKey("cert") ? values.getAsString("cert") : null;
String path = values.containsKey("path") ? values.getAsString("path") : null;
String resultFile = values.containsKey("resultFile") ? values.getAsString("resultFile") : null;
int index = values.containsKey("index") ? values.getAsInteger("index").intValue() : 0;
boolean res = false;
if (!TextUtils.isEmpty(cert)) {
// Import
byte[] content = Base64.decode(cert, Base64.DEFAULT);
if (content != null) {
res = MicroPhotoService.importPublicKey(index, content);
}
} else if (TextUtils.isEmpty(path)) {
String md5 = values.containsKey("md5") ? values.getAsString("md5") : null;
File file = new File(path);
if (file.exists() && file.isFile()) {
res = MicroPhotoService.importPublicKeyFile(index, path, md5);
}
}
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, res ? "1" : "0");
}
return res ? 1 : 0;
}
private int genKeys(Uri uri, ContentValues values) {
int index = values.containsKey("index") ? values.getAsInteger("index").intValue() : 0;
String resultFile = values.containsKey("resultFile") ? values.getAsString("resultFile") : null;
boolean res = MicroPhotoService.genKeys(index);
if (!TextUtils.isEmpty(resultFile)) {
FilesUtils.ensureParentDirectoryExisted(resultFile);
FilesUtils.writeTextFile(resultFile, res ? "1" : "0");
}
return res ? 1 : 0;
}
private int genCertReq(Uri uri, ContentValues values) {
int index = values.containsKey("index") ? values.getAsInteger("index").intValue() : 0;
int type = values.containsKey("type") ? values.getAsInteger("type").intValue() : 0;
String subject = values.containsKey("subject") ? values.getAsString("subject") : null;
String path = values.containsKey("path") ? values.getAsString("path") : null;
if (TextUtils.isEmpty(subject) || TextUtils.isEmpty(path)) {
return 0;
}
boolean res = MicroPhotoService.genCertRequest(index, type, subject, path);
return res ? 1 : 0;
}
private String stringFromBase64(String decoded) {
if (TextUtils.isEmpty(decoded)) {
return null;
}
byte[] bytes = Base64.decode(decoded, Base64.DEFAULT);
String str = null;
try {
str = new String(bytes, "UTF-8");
} catch (Exception ex) {
ex.printStackTrace();
}
return str;
}
private int takePhoto(Uri uri, ContentValues values) {
String path = values.containsKey("path") ? values.getAsString("path") : null;
int channel = values.containsKey("channel") ? values.getAsInteger("channel").intValue() : 1;
int preset = values.containsKey("preset") ? values.getAsInteger("preset").intValue() : 0xFF;
int width = values.containsKey("width") ? values.getAsInteger("width").intValue() : 0;
int height = values.containsKey("height") ? values.getAsInteger("height").intValue() : 0;
String leftTopOsd = stringFromBase64(values.containsKey("leftTopOsd") ? values.getAsString("leftTopOsd") : null);
String rightTopOsd = stringFromBase64(values.containsKey("rightTopOsd") ? values.getAsString("rightTopOsd") : null);
String rightBottomOsd = stringFromBase64(values.containsKey("rightBottomOsd") ? values.getAsString("rightBottomOsd") : null);
String leftBottomOsd = stringFromBase64(values.containsKey("leftBottomOsd") ? values.getAsString("leftBottomOsd") : null);
String appPath = MicroPhotoContext.buildMpAppDir(getContext());
File configFile = new File(appPath);
configFile = new File(configFile, "data/channels/" + Integer.toString(channel) + ".json");
File tmpConfigFile = new File(appPath);
tmpConfigFile = new File(tmpConfigFile, "tmp/" + Integer.toString(channel) + "-" + Long.toString(System.currentTimeMillis()) + ".json");
if (configFile.exists()) {
try {
FilesUtils.copyFile(configFile, tmpConfigFile);
} catch (Exception ex) {
ex.printStackTrace();
}
}
JSONObject configJson = JSONUtils.loadJson(tmpConfigFile.getAbsolutePath());
try {
if (configJson == null) {
configJson = new JSONObject();
}
if (width > 0) {
configJson.put("resolutionCX", width);
}
if (height > 0) {
configJson.put("resolutionCY", height);
}
JSONObject osdJson = configJson.getJSONObject("osd");
if (osdJson == null) {
osdJson = configJson.put("osd", new JSONObject());
}
osdJson.put("leftTop", TextUtils.isEmpty(leftTopOsd) ? "" : leftTopOsd);
osdJson.put("rightTop", TextUtils.isEmpty(rightTopOsd) ? "" : rightTopOsd);
osdJson.put("rightBottom", TextUtils.isEmpty(rightBottomOsd) ? "" : rightBottomOsd);
osdJson.put("leftBottom", TextUtils.isEmpty(leftBottomOsd) ? "" : leftBottomOsd);
JSONUtils.saveJson(tmpConfigFile.getAbsolutePath(), configJson);
} catch (Exception ex) {
ex.printStackTrace();
}
File file = new File(path);
if (file.exists()) {
file.delete();
} else {
FilesUtils.ensureParentDirectoryExisted(path);
}
MicroPhotoService.takePhoto(channel, preset, true, tmpConfigFile.getAbsolutePath(), path);
if (tmpConfigFile.exists()) {
tmpConfigFile.delete();
}
return 1;
}
private int takeVideo(Uri uri, ContentValues values) {
String path = values.containsKey("path") ? values.getAsString("path") : null;
int channel = values.containsKey("channel") ? values.getAsInteger("channel").intValue() : 1;
int preset = values.containsKey("preset") ? values.getAsInteger("preset").intValue() : 0xFF;
int width = values.containsKey("width") ? values.getAsInteger("width").intValue() : 0;
int height = values.containsKey("height") ? values.getAsInteger("height").intValue() : 0;
int quality = values.containsKey("quality") ? values.getAsInteger("quality").intValue() : 0;
int cameraId = values.containsKey("cameraId") ? values.getAsInteger("cameraId").intValue() : -1;
int duration = values.containsKey("duration") ? values.getAsInteger("duration").intValue() : 15;
int orientation = values.containsKey("orientation") ? values.getAsInteger("orientation").intValue() : 0;
long videoId = System.currentTimeMillis() / 1000;
String leftTopOsd = stringFromBase64(values.containsKey("leftTopOsd") ? values.getAsString("leftTopOsd") : null);
String rightTopOsd = stringFromBase64(values.containsKey("rightTopOsd") ? values.getAsString("rightTopOsd") : null);
String rightBottomOsd = stringFromBase64(values.containsKey("rightBottomOsd") ? values.getAsString("rightBottomOsd") : null);
String leftBottomOsd = stringFromBase64(values.containsKey("leftBottomOsd") ? values.getAsString("leftBottomOsd") : null);
if (cameraId == -1) {
cameraId = channel - 1;
}
Context context = getContext();
Intent recordingIntent = MicroPhotoService.makeRecordingIntent(context,
false, cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd, path);
recordingIntent.putExtra("resultType", 0);
recordingIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
context.startActivity(recordingIntent);
return 1;
}
}

@ -1,235 +0,0 @@
package com.xypower.mpapp;
import android.app.Service;
import android.content.Intent;
import android.os.Handler;
import android.os.IBinder;
import android.text.TextUtils;
import android.util.Base64;
import android.util.Log;
import androidx.annotation.Nullable;
import com.xypower.common.FilesUtils;
import com.xypower.common.JSONUtils;
import com.xypower.common.MicroPhotoContext;
import com.xypower.mpapp.v2.Camera2VideoActivity;
import org.json.JSONObject;
import java.io.File;
public class BridgeService extends Service {
private final static String TAG = "MPLOG";
private final static String ACTION_IMP_PUBKEY = "imp_pubkey";
private final static String ACTION_GEN_KEYS = "gen_keys";
private final static String ACTION_CERT_REQ = "cert_req";
private final static String ACTION_BATTERY_VOLTAGE = "query_bv";
private final static String ACTION_RECORDING = "recording";
private final static String ACTION_TAKE_PHOTO = "take_photo";
private final static int REQUEST_CODE_RECORDING = Camera2VideoActivity.REQUEST_CODE_RECORDING;
private Handler mHandler = null;
private boolean m3V3TurnedOn = false;
private boolean mAutoClose = true;
private String mVideoFilePath = null;
public BridgeService() {
}
@Override
public void onCreate() {
super.onCreate();
mHandler = new Handler();
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
if (intent == null) {
stopSelf();
return START_NOT_STICKY;
}
final String action = intent.getStringExtra("action");
if (!TextUtils.isEmpty(action)) {
if (TextUtils.equals(action, ACTION_IMP_PUBKEY)) {
String cert = intent.getStringExtra("cert");
String path = intent.getStringExtra("path");
int index = intent.getIntExtra("index", 1);
if (!TextUtils.isEmpty(cert)) {
// Import
// String cert = intent.getStringExtra("md5");
byte[] content = Base64.decode(cert, Base64.DEFAULT);
if (content != null) {
MicroPhotoService.importPublicKey(index, content);
}
} else if (TextUtils.isEmpty(path)) {
String md5 = intent.getStringExtra("md5");
File file = new File(path);
if (file.exists() && file.isFile()) {
MicroPhotoService.importPublicKeyFile(index, path, md5);
}
}
} else if (TextUtils.equals(action, ACTION_GEN_KEYS)) {
int index = intent.getIntExtra("index", 0);
boolean res = MicroPhotoService.genKeys(index);
String path = intent.getStringExtra("path");
if (!TextUtils.isEmpty(path)) {
FilesUtils.ensureParentDirectoryExisted(path);
FilesUtils.writeTextFile(path, res ? "1" : "0");
}
} else if (TextUtils.equals(action, ACTION_CERT_REQ)) {
int index = intent.getIntExtra("index", 0);
int type = intent.getIntExtra("type", 0);
String subject = intent.getStringExtra("subject");
String path = intent.getStringExtra("path");
MicroPhotoService.genCertRequest(index, type, subject, path);
} else if (TextUtils.equals(action, ACTION_BATTERY_VOLTAGE)) {
String path = intent.getStringExtra("path");
// #define CMD_GET_CHARGING_BUS_VOLTAGE_STATE 112
// #define CMD_GET_BAT_VOL_STATE 115
// #define CMD_GET_BAT_BUS_VOLTAGE_STATE 117
int bv = MicroPhotoService.getGpioInt(117);
int bcv = MicroPhotoService.getGpioInt(112);
if (!TextUtils.isEmpty(path)) {
FilesUtils.ensureParentDirectoryExisted(path);
FilesUtils.writeTextFile(path + ".tmp", Integer.toString(bv) + " " + Integer.toString(bcv));
File file = new File(path + ".tmp");
file.renameTo(new File(path));
}
} else if (TextUtils.equals(action, ACTION_TAKE_PHOTO)) {
String path = intent.getStringExtra("path");
int channel = intent.getIntExtra("channel", 1);
int preset = intent.getIntExtra("preset", 0xFF);
int width = intent.getIntExtra("width", 0);
int height = intent.getIntExtra("height", 0);
String leftTopOsd = intent.getStringExtra("leftTopOsd");
String rightTopOsd = intent.getStringExtra("rightTopOsd");
String rightBottomOsd = intent.getStringExtra("rightBottomOsd");
String leftBottomOsd = intent.getStringExtra("leftBottomOsd");
String appPath = MicroPhotoContext.buildMpAppDir(getApplicationContext());
File configFile = new File(appPath);
configFile = new File(configFile, "data/channels/" + Integer.toString(channel) + ".json");
File tmpConfigFile = new File(appPath);
tmpConfigFile = new File(tmpConfigFile, "tmp/" + Integer.toString(channel) + "-" + Long.toString(System.currentTimeMillis()) + ".json");
if (configFile.exists()) {
try {
FilesUtils.copyFile(configFile, tmpConfigFile);
} catch (Exception ex) {
ex.printStackTrace();
}
}
JSONObject configJson = JSONUtils.loadJson(tmpConfigFile.getAbsolutePath());
try {
if (configJson == null) {
configJson = new JSONObject();
}
if (width > 0) {
configJson.put("resolutionCX", width);
}
if (height > 0) {
configJson.put("resolutionCY", height);
}
JSONObject osdJson = configJson.getJSONObject("osd");
if (osdJson == null) {
osdJson = configJson.put("osd", new JSONObject());
}
osdJson.put("leftTop", TextUtils.isEmpty(leftTopOsd) ? "" : leftTopOsd);
osdJson.put("rightTop", TextUtils.isEmpty(rightTopOsd) ? "" : rightTopOsd);
osdJson.put("rightBottom", TextUtils.isEmpty(rightBottomOsd) ? "" : rightBottomOsd);
osdJson.put("leftBottom", TextUtils.isEmpty(leftBottomOsd) ? "" : leftBottomOsd);
JSONUtils.saveJson(tmpConfigFile.getAbsolutePath(), configJson);
} catch (Exception ex) {
ex.printStackTrace();
}
File file = new File(path);
if (file.exists()) {
file.delete();
} else {
FilesUtils.ensureParentDirectoryExisted(path);
}
MicroPhotoService.takePhoto(channel, preset, true, tmpConfigFile.getAbsolutePath(), path);
if (tmpConfigFile.exists()) {
tmpConfigFile.delete();
}
} else if (TextUtils.equals(action, ACTION_RECORDING)) {
String path = intent.getStringExtra("path");
int channel = intent.getIntExtra("channel", 1);
int cameraId = intent.getIntExtra("cameraId", -1);
int quality = intent.getIntExtra("quality", 0);
int width = intent.getIntExtra("width", 1280);
int height = intent.getIntExtra("height", 720);
int duration = intent.getIntExtra("duration", 15);
int orientation = intent.getIntExtra("orientation", 0);
long videoId = System.currentTimeMillis() / 1000;
String leftTopOsd = intent.getStringExtra("leftTopOsd");
String rightTopOsd = intent.getStringExtra("rightTopOsd");
String rightBottomOsd = intent.getStringExtra("rightBottomOsd");
String leftBottomOsd = intent.getStringExtra("leftBottomOsd");
if (cameraId == -1) {
cameraId = channel - 1;
}
Intent recordingIntent = MicroPhotoService.makeRecordingIntent(getApplicationContext(),
cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd);
mVideoFilePath = path;
mAutoClose = false;
recordingIntent.putExtra("ActivityResult", true);
// startActivityForResult(recordingIntent, REQUEST_CODE_RECORDING);
}
}
if (mAutoClose) {
mHandler.postDelayed(new Runnable() {
@Override
public void run() {
Log.i(TAG, "BridgeActivity will finish automatically");
stopSelf();
}
}, 200);
}
return super.onStartCommand(intent, flags, startId);
}
/**
*
*/
@Override
public void onDestroy() {
super.onDestroy();
}
@Nullable
@Override
public IBinder onBind(Intent intent) {
return null;
}
}

@ -1,8 +1,6 @@
package com.xypower.mpapp;
import android.Manifest;
import android.app.Activity;
import android.app.KeyguardManager;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
@ -11,7 +9,6 @@ import android.location.Location;
import android.location.LocationListener;
import android.location.LocationManager;
import android.os.Build;
import android.os.Environment;
import android.os.FileObserver;
import android.os.Handler;
import android.os.Looper;
@ -38,15 +35,12 @@ import android.widget.Toast;
import com.dev.devapi.api.SysApi;
import com.xypower.common.CameraUtils;
import com.xypower.common.FilesUtils;
import com.xypower.common.JSONUtils;
import com.xypower.common.MicroPhotoContext;
import com.xypower.mpapp.databinding.ActivityMainBinding;
import com.xypower.mpapp.utils.LocationUtil;
import com.xypower.mpapp.utils.RandomReader;
import org.json.JSONObject;
import java.io.File;
import java.lang.reflect.Method;
import java.text.SimpleDateFormat;
@ -126,28 +120,12 @@ public class MainActivity extends AppCompatActivity {
// getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_ALWAYS_HIDDEN);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_ALT_FOCUSABLE_IM);
// InputMethodManager imm = (InputMethodManager) getSystemService(Context.INPUT_METHOD_SERVICE);
// imm.hideSoftInputFromWindow(getWindow().getDecorView().getWindowToken(), 0);
// ViewUtils.hideSoftKeyboard(this);
ActionBar actionBar = getSupportActionBar();
// String buildTime = BuildConfig.BUILD_
Date date = new Date(BuildConfig.BUILD_TIMESTAMP);
// SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");
actionBar.setTitle(actionBar.getTitle().toString() + " v" + MicroPhotoContext.getVersionName(getApplicationContext()) + " " + sdf.format(date));
// CompactSpinnerAdapter adapter = new CompactSpinnerAdapter(this, R.array.networkProtocols, R.layout.spinner_dropdown_item);
// adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// binding.networkProtocol.setAdapter(adapter);
// ArrayAdapter adapter1 = ArrayAdapter.createFromResource(this, R.array.networkProtocols, android.R.layout.simple_spinner_item);
// adapter1.setDropDownViewResource(R.layout.spinner_dropdown_item);
// binding.protocol.setAdapter(adapter1);
binding.logs.setText("");
binding.logs.setMovementMethod(ScrollingMovementMethod.getInstance());
binding.logs.setScrollbarFadingEnabled(false);
@ -175,12 +153,9 @@ public class MainActivity extends AppCompatActivity {
}
}
break;
}
}
};
// mMessenger = new Messenger(new Handler());
StrictMode.ThreadPolicy policy = new StrictMode.ThreadPolicy.Builder().permitAll().build();
StrictMode.setThreadPolicy(policy);
@ -189,7 +164,6 @@ public class MainActivity extends AppCompatActivity {
final int noDelay = intent.getIntExtra("noDelay", 0);
int rebootFlag = intent.getIntExtra("reboot", 0);
if (rebootFlag == 1) {
// SysApi.enableAirPlane(MainActivity.this, true);
Log.i(TAG, "After Reboot");
}
@ -249,9 +223,9 @@ public class MainActivity extends AppCompatActivity {
binding.logs.setText("");
MicroPhotoContext.AppConfig curAppConfig = retrieveAndSaveAppConfig();
TakeAndThrowPhoto(2, 0xFF);
// TakeAndThrowPhoto(2, 0xFF);
try {
Thread.sleep(20);
// Thread.sleep(20);
} catch (Exception ex) {
ex.printStackTrace();
}
@ -473,7 +447,6 @@ public class MainActivity extends AppCompatActivity {
@Override
protected void onDestroy() {
super.onDestroy();
// System.exit(0);
}
public static void startMicroPhotoService(Context context, MicroPhotoContext.AppConfig curAppConfig, Messenger messenger) {
@ -603,21 +576,6 @@ public class MainActivity extends AppCompatActivity {
}
}
private void setDefaultDataSubId(int subId) {
SubscriptionManager subscriptionManager = (SubscriptionManager) getSystemService(Context.TELEPHONY_SUBSCRIPTION_SERVICE);
try {
Method method = subscriptionManager.getClass().getDeclaredMethod("setDefaultDataSubId", int.class);
method.invoke(subscriptionManager, subId);
TelephonyManager telephonyManager = (TelephonyManager) getSystemService(Context.TELEPHONY_SERVICE);
Method method1 = telephonyManager.getClass().getDeclaredMethod("setDataEnabled", boolean.class);
method1.invoke(telephonyManager, true);
} catch (Exception e) {
Log.e(TAG, "wjz debug setDefaultDataSubId: error is " + e.getMessage());
}
}
private MicroPhotoContext.AppConfig retrieveAndSaveAppConfig() {
MicroPhotoContext.AppConfig appConfig = new MicroPhotoContext.AppConfig();
@ -645,7 +603,7 @@ public class MainActivity extends AppCompatActivity {
}
private MicroPhotoContext.AppConfig getAppConfig() {
return MicroPhotoContext.getMpAppConfig(this.getApplicationContext());
return MicroPhotoContext.getMpAppConfig(getApplicationContext());
}
private void saveAppConfig(MicroPhotoContext.AppConfig appConfig) {
@ -663,23 +621,6 @@ public class MainActivity extends AppCompatActivity {
return 0;
}
private void gpsTake() {
LocationManager locationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE);
// 注册位置监听器
if (ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_FINE_LOCATION) != PackageManager.PERMISSION_GRANTED && ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_COARSE_LOCATION) != PackageManager.PERMISSION_GRANTED) {
return;
}
locationManager.requestLocationUpdates(LocationManager.GPS_PROVIDER, 0, 0, locationListener);
Location lastKnownLocation = locationManager.getLastKnownLocation(LocationManager.GPS_PROVIDER);
if (lastKnownLocation != null) {
double latitude = lastKnownLocation.getLatitude();
double longitude = lastKnownLocation.getLongitude();
// 处理最新位置信息
System.out.printf("gps" + latitude + "fds:" + longitude);
}
}
LocationListener locationListener = new LocationListener() {
@Override
public void onLocationChanged(Location location) {
@ -703,83 +644,20 @@ public class MainActivity extends AppCompatActivity {
@Override
public void onStatusChanged(String provider, int status, Bundle extras) {
// 处理位置状态变化事件
System.out.printf("fsdaf");
}
@Override
public void onProviderEnabled(String provider) {
// 处理位置提供者启用事件
System.out.printf("fsdaf");
}
@Override
public void onProviderDisabled(String provider) {
// 处理位置提供者禁用事件
System.out.printf("fsdaf");
}
};
private void startLocate() {
LocationManager mLocationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE);
boolean providerEnabled = mLocationManager.isProviderEnabled(LocationManager.GPS_PROVIDER);
if (providerEnabled) { //GPS已开启
/**
*
* 1GPS_PROVIDERNETWORK_PROVIDERGPS,GPRSWIFI
* 2.
* 3
* 4
* 23303300
*/
if (ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_FINE_LOCATION) != PackageManager.PERMISSION_GRANTED && ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_COARSE_LOCATION) != PackageManager.PERMISSION_GRANTED) {
// TODO: Consider calling
// ActivityCompat#requestPermissions
// here to request the missing permissions, and then overriding
// public void onRequestPermissionsResult(int requestCode, String[] permissions,
// int[] grantResults)
// to handle the case where the user grants the permission. See the documentation
// for ActivityCompat#requestPermissions for more details.
return;
}
mLocationManager.requestLocationUpdates(LocationManager.GPS_PROVIDER, 0, 0, locationListener);
} else {
Toast.makeText(this, "请打开GPS", Toast.LENGTH_SHORT).show();
}
}
// private void initSocket() {
// NettyChatClient nettyChatClient = NettyChatClient.newInstance("47.96.238.157", 6891);
//// NettyChatClient nettyChatClient = NettyChatClient.newInstance("180.166.218.222", 40032);
// nettyChatClient.init(new INettyMessageListener() {
// @Override
// public void onReceive(String message) {
//// for (INettyMessageListener nettyMessageListener : mIMessageListenerList) {
//// nettyMessageListener.onReceive(message);
//// }
// System.out.println("dsfa");
// }
//
// @Override
// public void onConnectSuccess() {
//// for (INettyMessageListener nettyMessageListener : mIMessageListenerList) {
//// nettyMessageListener.onConnectSuccess();
//// }
// System.out.println("dsfa");
// }
//
// @Override
// public void onError() {
//// for (INettyMessageListener nettyMessageListener : mIMessageListenerList) {
//// nettyMessageListener.onError();
//// }
// System.out.println("dsfa");
// }
// });
// nettyChatClient.connect();
// }
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_BACK) {

@ -13,6 +13,10 @@ import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.ImageDecoder;
import android.graphics.Matrix;
import android.location.Location;
import android.location.LocationListener;
import android.location.LocationManager;
@ -29,12 +33,14 @@ import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.IBinder;
import android.os.Looper;
import android.os.Message;
import android.os.Messenger;
import android.os.PowerManager;
import android.os.RemoteException;
import android.os.SystemClock;
import androidx.annotation.NonNull;
import androidx.core.app.NotificationCompat;
import androidx.core.content.FileProvider;
import androidx.localbroadcastmanager.content.LocalBroadcastManager;
@ -50,9 +56,12 @@ import android.widget.Toast;
import com.dev.devapi.api.SysApi;
import com.xypower.common.FileDownloader;
import com.xypower.common.InetAddressUtils;
import com.xypower.common.NetworkUtils;
import com.xypower.common.MicroPhotoContext;
import com.xypower.mpapp.adb.CameraAdb;
import com.xypower.mpapp.utils.DeviceUtil;
import com.xypower.mpapp.v2.Camera2VideoActivity;
import com.xypower.mpapp.video.RawActivity;
import java.io.File;
import java.lang.reflect.Method;
@ -128,6 +137,7 @@ public class MicroPhotoService extends Service {
private long mGpsTimeout = 60000; // 1 minute
private PendingIntent mPreviousGpsTimer = null;
private long mLastLocationRequested = 0;
private ServiceHandler mHander = null;
private Messenger mMessenger = null;
@ -306,9 +316,12 @@ public class MicroPhotoService extends Service {
Thread th = new Thread(runnable);
th.start();
mService.registerHeartbeatTimer();
mService.registerHeartbeatTimer(mService.mHeartbeatDuration);
try {
mService.detectGpsStatus();
ConnectivityManager connectivityManager = (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE);
if (connectivityManager != null) {
if (!connectivityManager.isDefaultNetworkActive()) {
@ -333,6 +346,27 @@ public class MicroPhotoService extends Service {
Log.i(TAG, "PhotoTimer Fired: CH=" + channel + " PR=" + preset);
mService.notifyToTakePhoto(mService.mNativeHandle, channel, preset, ts, photoOrVideo);
}
File cameraAdbCfg = new File(MicroPhotoContext.buildMpAppDir(mService.getApplication()), "data/cameraAdb.cfg");
if (cameraAdbCfg.exists()) {
final String appPath = MicroPhotoContext.buildMpAppDir(context);
mService.mHander.postDelayed(new Runnable() {
@Override
public void run() {
final CameraAdb cameraAdb = new CameraAdb(context, appPath);
cameraAdb.setCallback(new Runnable() {
@Override
public void run() {
List<String> targetPaths = cameraAdb.getTargetPaths();
for (String targetPath : targetPaths) {
mService.sendExternalPhoto(mService.mNativeHandle, targetPath);
}
}
});
cameraAdb.takePhoto();
}
}, 10000 * cnt);
}
}
// Register Next Photo Timer
@ -368,12 +402,98 @@ public class MicroPhotoService extends Service {
mService.reloadConfigs(mService.mNativeHandle);
}
} else if (TextUtils.equals(ACTION_VIDEO_FINISHED, action)) {
boolean result = intent.getBooleanExtra("result", false);
String path = intent.getStringExtra("path");
long videoId = intent.getLongExtra("videoId", 0);
final boolean photoOrVideo = intent.getBooleanExtra("photoOrVideo", false);
final boolean result = intent.getBooleanExtra("result", false);
final String path = intent.getStringExtra("path");
final long videoId = intent.getLongExtra("videoId", 0);
final int orientation = intent.getIntExtra("orientation", 0);
final boolean frontCamera = intent.getBooleanExtra("frontCamera", false);
final int numberOfCaptures = intent.getIntExtra("captures", 1);
final List<String> paths = new ArrayList<>();
if (numberOfCaptures > 1) {
for (int idx = 0; idx < numberOfCaptures; idx++) {
String p = intent.getStringExtra("path" + Integer.toString(idx));
if (!TextUtils.isEmpty(p)) {
paths.add(p);
}
}
}
Log.i(TAG, "Recording received(" + Long.toString(videoId) + "):" + path);
mService.recordingFinished(mService.mNativeHandle, result, path, videoId);
if (photoOrVideo) {
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
if (numberOfCaptures == 1) {
processCapture();
} else {
processCaptures();
}
}
private void processCaptures() {
Bitmap bm = null;
File rawFile = new File(path);
String pathsStr = String.join("\t", paths);
mService.burstCaptureFinished(mService.mNativeHandle, result, numberOfCaptures, pathsStr, frontCamera, orientation, videoId);
for (String p : paths) {
try {
File f = new File(p);
f.delete();
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
private void processCapture() {
Bitmap bm = null;
File rawFile = new File(path);
try {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) {
ImageDecoder.Source src = ImageDecoder.createSource(rawFile);
ImageDecoder.OnHeaderDecodedListener listener =
new ImageDecoder.OnHeaderDecodedListener(){
@Override
public void onHeaderDecoded(@NonNull ImageDecoder decoder, @NonNull ImageDecoder.ImageInfo info, @NonNull ImageDecoder.Source source) {
decoder.setAllocator(ImageDecoder.ALLOCATOR_SOFTWARE);
// decoder.setTargetSize(info.getSize().getWidth(), info.getSize().getHeight());
}
};
bm = ImageDecoder.decodeBitmap(src, listener);
} else {
bm = BitmapFactory.decodeFile(path);
}
if (orientation != 0 || frontCamera) {
Matrix matrix = new Matrix();
if (orientation != 0) {
matrix.postRotate(orientation);
}
if (frontCamera) {
matrix.postScale(-1, 1);
}
bm = Bitmap.createBitmap(bm, 0, 0, bm.getWidth(), bm.getHeight(), matrix, true);
}
} catch (Exception ex) {
}
mService.captureFinished(mService.mNativeHandle, photoOrVideo, result && bm != null, bm, videoId);
try {
rawFile.delete();
} catch (Exception ex) {
ex.printStackTrace();
}
}
});
thread.start();
} else {
mService.recordingFinished(mService.mNativeHandle, photoOrVideo, result, path, videoId);
}
} else if (TextUtils.equals(ACTION_STOP, action)) {
mService.stopTerminalService();
} else if (TextUtils.equals(ACTION_IMP_PUBKRY, action)) {
@ -388,6 +508,14 @@ public class MicroPhotoService extends Service {
}
mService.enableGps(false);
} else if (TextUtils.equals(ACTION_RESTART, action)) {
String reason = intent.getStringExtra("reason");
MicroPhotoService.infoLog("Recv RESTART APP cmd, reason=" + (TextUtils.isEmpty(reason) ? "" : reason));
try {
Thread.sleep(100);
} catch (Exception ex) {
ex.printStackTrace();
}
MicroPhotoService.restartApp(context.getApplicationContext(), MicroPhotoContext.PACKAGE_NAME_MPAPP);
}
}
@ -400,36 +528,36 @@ public class MicroPhotoService extends Service {
if (orgHeartbeatDuration == 0) {
if (nextPhotoTime == 0) {
mHeartbeatDuration = duration;
registerHeartbeatTimer();
registerHeartbeatTimer(duration);
} else {
long ts = System.currentTimeMillis();
nextPhotoTime *= 1000;
if (nextPhotoTime > ts) {
mHeartbeatDuration = (int) ((nextPhotoTime - ts) % duration) + 999;
registerHeartbeatTimer();
mHeartbeatDuration = duration;
registerHeartbeatTimer((int) ((nextPhotoTime - ts) % duration) + 999);
} else {
mHeartbeatDuration = duration;
registerHeartbeatTimer();
registerHeartbeatTimer(duration);
}
}
} else {
mHeartbeatDuration = duration;
}
}
private void registerHeartbeatTimer() {
private void registerHeartbeatTimer(long timeout) {
// 创建延迟意图
long triggerTime = System.currentTimeMillis() + timeout;
Intent alarmIntent = new Intent();
alarmIntent.setAction(ACTION_HEARTBEAT);
alarmIntent.putExtra("HeartbeatDuration", mHeartbeatDuration);
PendingIntent pendingIntent = PendingIntent.getBroadcast(this, 0, alarmIntent, 0);
alarmIntent.putExtra("HeartbeatTime", triggerTime);
PendingIntent pendingIntent = PendingIntent.getBroadcast(this, 0, alarmIntent, PendingIntent.FLAG_UPDATE_CURRENT);
AlarmManager alarmManager = (AlarmManager) getSystemService(ALARM_SERVICE);
alarmManager.setExactAndAllowWhileIdle(AlarmManager.ELAPSED_REALTIME_WAKEUP, SystemClock.elapsedRealtime() + mHeartbeatDuration, pendingIntent);
// mNextHeartbeatTime = System.currentTimeMillis() + mHeartbeatDuration;
// alarmManager.setExactAndAllowWhileIdle(AlarmManager.ELAPSED_REALTIME, SystemClock.elapsedRealtime() + timeout, pendingIntent);
alarmManager.setExactAndAllowWhileIdle(AlarmManager.RTC_WAKEUP, triggerTime, pendingIntent);
}
private static void registerPhotoTimer(Context context, long scheduleTime, long takingTime, long timeout, List<Long> schedules) {
@ -478,23 +606,28 @@ public class MicroPhotoService extends Service {
registerPhotoTimer(getApplicationContext(), scheduleTime, scheduleTime, timeout, schedules);
}
public void startRecording(int cameraId, long videoId, int duration, int width, int height, int quality, int orientation, String leftTopOsd, String rightTopOsd, String rightBottomOsd, String leftBottomOsd) {
// From Native
public void startRecording(boolean photoOrVideo, int cameraId, long videoId, int duration, int width, int height, int quality, int orientation, String leftTopOsd, String rightTopOsd, String rightBottomOsd, String leftBottomOsd) {
Context context = getApplicationContext();
// Intent intent = new Intent(this, VideoActivity.class);
Intent intent = makeRecordingIntent(context, cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd);
Intent intent = makeRecordingIntent(context, photoOrVideo, cameraId, videoId, duration, width, height, quality, orientation,
leftTopOsd, rightTopOsd, rightBottomOsd, leftBottomOsd, null);
intent.putExtra("resultType", 2);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
context.startActivity(intent);
}
public static Intent makeRecordingIntent(Context context, int cameraId, long videoId, int duration, int width, int height, int quality, int orientation, String leftTopOsd, String rightTopOsd, String rightBottomOsd, String leftBottomOsd) {
public static Intent makeRecordingIntent(Context context, boolean photoOrVideo, int cameraId, long videoId, int duration, int width, int height, int quality, int orientation, String leftTopOsd, String rightTopOsd, String rightBottomOsd, String leftBottomOsd, String path) {
// Intent intent = new Intent(this, VideoActivity.class);
Intent intent = new Intent(context, Camera2VideoActivity.class);
Intent intent = photoOrVideo ? new Intent(context, RawActivity.class) : new Intent(context, Camera2VideoActivity.class);
intent.putExtra("cameraId", cameraId);
intent.putExtra("videoId", videoId);
if (!TextUtils.isEmpty(path)) {
intent.putExtra("path", path);
}
intent.putExtra("duration", duration);
intent.putExtra("width", width);
intent.putExtra("height", height);
@ -505,6 +638,14 @@ public class MicroPhotoService extends Service {
intent.putExtra("rightBottomOsd", rightBottomOsd);
intent.putExtra("leftBottomOsd", leftBottomOsd);
String tmpPath = MicroPhotoContext.buildMpAppDir(context);
tmpPath += "tmp";
intent.putExtra("cameraTmpPath", tmpPath);
if (photoOrVideo) {
intent.putExtra("burstCaptures", 8);
}
// intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
@ -672,7 +813,7 @@ public class MicroPhotoService extends Service {
public void run() {
String ip = server;
if (!InetAddressUtils.isIPv4Address(ip) && !InetAddressUtils.isIPv6Address(ip)) {
if (!NetworkUtils.isIPv4Address(ip) && !NetworkUtils.isIPv6Address(ip)) {
// It is a domain
InetAddress addr = null;
try {
@ -939,7 +1080,9 @@ public class MicroPhotoService extends Service {
// Set Listener
}
try {
mLocationManager.requestLocationUpdates(mLocateType, 30000, 1, mLocationListener);
enableGps(true);
mLastLocationRequested = System.currentTimeMillis();
mLocationManager.requestLocationUpdates(mLocateType, 30000, 1, mLocationListener, Looper.getMainLooper());
} catch (Exception ex) {
ex.printStackTrace();
}
@ -957,6 +1100,13 @@ public class MicroPhotoService extends Service {
return false;
}
private void detectGpsStatus() {
if (System.currentTimeMillis() - mLastLocationRequested > 10 * 60000) {
// 10minutes close it
enableGps(false);
}
}
public void downloadAndInstall(final String url) {
final Context context = getApplicationContext();
@ -1172,19 +1322,25 @@ cellSignalStrengthGsm.getDbm();
protected native boolean reloadConfigs(long handler);
protected native void updatePosition(long handler, double lon, double lat, double radius, long ts);
protected native boolean uninit(long handler);
protected native void recordingFinished(long handler, boolean result, String path, long videoId);
protected native void recordingFinished(long handler, boolean photoOrVideo, boolean result, String path, long videoId);
protected native void captureFinished(long handler, boolean photoOrVideo, boolean result, Bitmap bm, long videoId);
protected native void burstCaptureFinished(long handler, boolean result, int numberOfCaptures, String pathsJoinedByTab, boolean frontCamera, int rotation, long photoId);
public static native long takePhoto(int channel, int preset, boolean photoOrVideo, String configFilePath, String path);
public static native void releaseDeviceHandle(long deviceHandle);
public static native boolean sendExternalPhoto(long deviceHandle, String path);
public static native void infoLog(String log);
public static native void setOtgState(boolean enabled);
public static native void setCam3V3Enable(boolean enabled);
public static native String getSerialNumber();
public static native boolean importPublicKeyFile(int index, String outputPath, String md5);
public static native boolean importPublicKey(int index, byte cert[]);
public static native boolean importPrivateKey(int index, byte cert[]);
public static native boolean genKeys(int index);
public native static int getGpioInt(int cmd);
public static native int[] recoganizePicture(String paramPath, String binPath, String blobName8, String blobName16, String blobName32, String picPath);
public static native String querySecVersion();
public static native boolean genCertRequest(int index, int type, String subject, String outputPath);
public static native boolean importPrivateKeyFile(int index, String outputPath, String md5);

@ -0,0 +1,180 @@
package com.xypower.mpapp;
import androidx.annotation.Nullable;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.graphics.SurfaceTexture;
import android.hardware.Camera;
import android.os.Bundle;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.Message;
import android.view.View;
import android.widget.EditText;
import android.widget.TextView;
/*
import com.chillingvan.canvasgl.ICanvasGL;
import com.chillingvan.canvasgl.glcanvas.BasicTexture;
import com.chillingvan.canvasgl.glview.texture.GLTexture;
import com.chillingvan.canvasgl.textureFilter.BasicTextureFilter;
import com.chillingvan.canvasgl.textureFilter.HueFilter;
import com.chillingvan.canvasgl.textureFilter.TextureFilter;
import io.antmedia.rtmp_client.RTMPMuxer;
import com.xypower.stream.camera.InstantVideoCamera;
import com.xypower.stream.encoder.video.H264Encoder;
import com.xypower.stream.muxer.RTMPStreamMuxer;
import com.xypower.stream.publisher.CameraStreamPublisher;
import com.xypower.stream.publisher.StreamPublisher;
*/
import java.io.IOException;
import java.util.List;
public class StreamActivity extends AppCompatActivity {
/*
private CameraStreamPublisher streamPublisher;
private com.chillingvan.instantvideo.sample.test.camera.CameraPreviewTextureView cameraPreviewTextureView;
private InstantVideoCamera instantVideoCamera;
private Handler handler;
private EditText addrEditText;
private HandlerThread handlerThread;
private TextureFilter textureFilterLT;
private TextureFilter textureFilterRT;
private com.chillingvan.instantvideo.sample.test.VideoFrameHandlerHelper videoFrameHandlerHelper;
*/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
initFrameHandlerHelper();
setContentView(R.layout.activity_stream);
/*
cameraPreviewTextureView = findViewById(R.id.camera_produce_view);
cameraPreviewTextureView.setOnDrawListener(new H264Encoder.OnDrawListener() {
@Override
public void onGLDraw(ICanvasGL canvasGL, List<GLTexture> producedTextures, List<GLTexture> consumedTextures) {
GLTexture texture = producedTextures.get(0);
drawVideoFrame(canvasGL, texture.getSurfaceTexture(), texture.getRawTexture());
}
});
addrEditText = (EditText) findViewById(R.id.ip_input_test);
instantVideoCamera = new InstantVideoCamera(Camera.CameraInfo.CAMERA_FACING_FRONT, 640, 480);
// instantVideoCamera = new InstantVideoCamera(Camera.CameraInfo.CAMERA_FACING_FRONT, 1280, 720);
handlerThread = new HandlerThread("StreamPublisherOpen");
handlerThread.start();
handler = new Handler(handlerThread.getLooper()) {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
// StreamPublisher.StreamPublisherParam streamPublisherParam = new StreamPublisher.StreamPublisherParam();
// StreamPublisher.StreamPublisherParam streamPublisherParam = new StreamPublisher.StreamPublisherParam(1080, 640, 9500 * 1000, 30, 1, 44100, 19200);
StreamPublisher.StreamPublisherParam streamPublisherParam = new StreamPublisher.StreamPublisherParam.Builder().setWidth(540).setHeight(750).setVideoBitRate(1500 * 1000).setFrameRate(30).setIframeInterval(1).setSamplingRate(44100).setAudioBitRate(32000).createStreamPublisherParam();
streamPublisherParam.outputFilePath = getExternalFilesDir(null) + "/test_flv_encode.flv";
// streamPublisherParam.outputFilePath = getExternalFilesDir(null) + "/test_mp4_encode.mp4";
streamPublisher.prepareEncoder(streamPublisherParam, new H264Encoder.OnDrawListener() {
@Override
public void onGLDraw(ICanvasGL canvasGL, List<GLTexture> producedTextures, List<GLTexture> consumedTextures) {
GLTexture texture = consumedTextures.get(0);
drawVideoFrame(canvasGL, texture.getSurfaceTexture(), texture.getRawTexture());
}
});
try {
streamPublisherParam.outputUrl = addrEditText.getText().toString();
streamPublisher.startPublish();
} catch (IOException e) {
e.printStackTrace();
runOnUiThread(new Runnable() {
@Override
public void run() {
((TextView)findViewById(R.id.test_camera_button)).setText("START");
}
});
}
}
};
streamPublisher = new CameraStreamPublisher(new RTMPStreamMuxer(), cameraPreviewTextureView, instantVideoCamera);
// streamPublisher = new CameraStreamPublisher(new MP4Muxer(), cameraPreviewTextureView, instantVideoCamera);
*/
}
private void initFrameHandlerHelper() {
/*
videoFrameHandlerHelper = new com.chillingvan.instantvideo.sample.test.VideoFrameHandlerHelper(getApplicationContext());
*/
}
/*
private void drawVideoFrame(ICanvasGL canvasGL, @Nullable SurfaceTexture outsideSurfaceTexture, @Nullable BasicTexture outsideTexture) {
// Here you can do video process
// 此处可以视频处理,例如加水印等等
if(textureFilterLT == null) {
textureFilterLT = new BasicTextureFilter();
}
if(textureFilterRT == null) {
textureFilterRT = new HueFilter(180);
}
int width = outsideTexture.getWidth();
int height = outsideTexture.getHeight();
canvasGL.drawSurfaceTexture(outsideTexture, outsideSurfaceTexture, 0, 0, width /2, height /2, textureFilterLT);
canvasGL.drawSurfaceTexture(outsideTexture, outsideSurfaceTexture, 0, height/2, width/2, height, textureFilterRT);
videoFrameHandlerHelper.initDrawHelper(width/2, height/2);
videoFrameHandlerHelper.drawText(canvasGL);
}
*/
@Override
protected void onResume() {
super.onResume();
// streamPublisher.resumeCamera();
}
@Override
protected void onPause() {
super.onPause();
/*
streamPublisher.pauseCamera();
if (streamPublisher.isStart()) {
streamPublisher.closeAll();
}
((TextView)findViewById(R.id.test_camera_button)).setText("START");
*/
}
@Override
protected void onDestroy() {
super.onDestroy();
//handlerThread.quitSafely();
}
public void clickStartTest(View view) {
TextView textView = (TextView) view;
/*
if (streamPublisher.isStart()) {
streamPublisher.closeAll();
textView.setText("START");
} else {
streamPublisher.resumeCamera();
handler.sendEmptyMessage(1);
textView.setText("STOP");
}
*/
}
}

@ -1,4 +0,0 @@
package com.xypower.mpapp;
public class Upgrader {
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save