|
|
|
//
|
|
|
|
// Created by Matthew on 2025/3/1.
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "RTSPRecorder.h"
|
|
|
|
#include <chrono>
|
|
|
|
#include <thread>
|
|
|
|
#include <android/log.h>
|
|
|
|
#include <errno.h>
|
|
|
|
extern "C" {
|
|
|
|
#include <libavformat/avformat.h>
|
|
|
|
#include <libavcodec/avcodec.h>
|
|
|
|
#include <libavutil/avutil.h>
|
|
|
|
#include <libavutil/opt.h>
|
|
|
|
#include <libavutil/time.h>
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define LOG_TAG "libcurl"
|
|
|
|
|
|
|
|
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__)
|
|
|
|
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
|
|
|
|
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
|
|
|
|
|
|
|
|
#include <libavutil/log.h>
|
|
|
|
#include <android/log.h>
|
|
|
|
|
|
|
|
void ffmpeg_log_callback(void *ptr, int level, const char *fmt, va_list vl) {
|
|
|
|
// Map FFmpeg log levels to Android log levels
|
|
|
|
int android_log_level;
|
|
|
|
switch (level) {
|
|
|
|
case AV_LOG_PANIC:
|
|
|
|
case AV_LOG_FATAL:
|
|
|
|
android_log_level = ANDROID_LOG_FATAL;
|
|
|
|
break;
|
|
|
|
case AV_LOG_ERROR:
|
|
|
|
android_log_level = ANDROID_LOG_ERROR;
|
|
|
|
break;
|
|
|
|
case AV_LOG_WARNING:
|
|
|
|
android_log_level = ANDROID_LOG_WARN;
|
|
|
|
break;
|
|
|
|
case AV_LOG_INFO:
|
|
|
|
android_log_level = ANDROID_LOG_INFO;
|
|
|
|
break;
|
|
|
|
case AV_LOG_VERBOSE:
|
|
|
|
android_log_level = ANDROID_LOG_VERBOSE;
|
|
|
|
break;
|
|
|
|
case AV_LOG_DEBUG:
|
|
|
|
case AV_LOG_TRACE:
|
|
|
|
android_log_level = ANDROID_LOG_DEBUG;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
android_log_level = ANDROID_LOG_INFO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Format the log message
|
|
|
|
char log_message[1024];
|
|
|
|
vsnprintf(log_message, sizeof(log_message), fmt, vl);
|
|
|
|
|
|
|
|
// Send the log message to logcat
|
|
|
|
__android_log_print(android_log_level, "FFmpeg", "%s", log_message);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int setup_output_streams(AVFormatContext *input_ctx, AVFormatContext *output_ctx) {
|
|
|
|
// Copy streams and fix time_base
|
|
|
|
for (unsigned int i = 0; i < input_ctx->nb_streams; i++) {
|
|
|
|
AVStream *in_stream = input_ctx->streams[i];
|
|
|
|
AVStream *out_stream = avformat_new_stream(output_ctx, NULL);
|
|
|
|
if (!out_stream) {
|
|
|
|
return AVERROR_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy codec parameters
|
|
|
|
int ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix time base
|
|
|
|
out_stream->time_base = in_stream->time_base;
|
|
|
|
|
|
|
|
// Clear any existing flags
|
|
|
|
out_stream->codecpar->codec_tag = 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_mp4_header(AVFormatContext *output_ctx) {
|
|
|
|
AVDictionary *opts = NULL;
|
|
|
|
|
|
|
|
// MP4 specific options
|
|
|
|
av_dict_set(&opts, "movflags", "faststart+frag_keyframe", 0);
|
|
|
|
av_dict_set(&opts, "brand", "mp42", 0);
|
|
|
|
|
|
|
|
// Write header
|
|
|
|
int ret = avformat_write_header(output_ctx, &opts);
|
|
|
|
if (ret < 0) {
|
|
|
|
char errbuf[AV_ERROR_MAX_STRING_SIZE];
|
|
|
|
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
|
|
|
|
fprintf(stderr, "Header write failed: %s (code: %d)\n", errbuf, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
av_dict_free(&opts);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void dumpRtmpToMp4(const char* rtmpUrl, const char* outputPath, uint32_t duration, net_handle_t netHandle)
|
|
|
|
{
|
|
|
|
AVFormatContext* inputFormatContext = nullptr;
|
|
|
|
AVFormatContext* outputFormatContext = nullptr;
|
|
|
|
AVPacket packet;
|
|
|
|
|
|
|
|
av_register_all();
|
|
|
|
avformat_network_init();
|
|
|
|
|
|
|
|
// Open input RTMP stream
|
|
|
|
if (avformat_open_input(&inputFormatContext, rtmpUrl, nullptr, nullptr) != 0) {
|
|
|
|
fprintf(stderr, "Could not open input file '%s'\n", rtmpUrl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve input stream information
|
|
|
|
if (avformat_find_stream_info(inputFormatContext, nullptr) < 0) {
|
|
|
|
fprintf(stderr, "Could not find stream information\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open output MP4 file
|
|
|
|
if (avformat_alloc_output_context2(&outputFormatContext, nullptr, "mp4", outputPath) < 0) {
|
|
|
|
fprintf(stderr, "Could not create output context\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy stream information from input to output
|
|
|
|
for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
|
|
|
|
AVStream* inStream = inputFormatContext->streams[i];
|
|
|
|
AVStream* outStream = avformat_new_stream(outputFormatContext, nullptr);
|
|
|
|
if (!outStream) {
|
|
|
|
fprintf(stderr, "Failed to allocate output stream\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avcodec_parameters_copy(outStream->codecpar, inStream->codecpar) < 0) {
|
|
|
|
fprintf(stderr, "Failed to copy codec parameters\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
outStream->codecpar->codec_tag = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open output file
|
|
|
|
if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
|
|
|
|
if (avio_open(&outputFormatContext->pb, outputPath, AVIO_FLAG_WRITE) < 0) {
|
|
|
|
fprintf(stderr, "Could not open output file '%s'\n", outputPath);
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write output file header
|
|
|
|
if (avformat_write_header(outputFormatContext, nullptr) < 0) {
|
|
|
|
fprintf(stderr, "Error occurred when writing header to output file\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start a thread to stop the streaming after the specified duration
|
|
|
|
std::thread stop_thread([&]() {
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(duration));
|
|
|
|
av_read_pause(inputFormatContext);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Read packets from input and write them to output
|
|
|
|
while (av_read_frame(inputFormatContext, &packet) >= 0) {
|
|
|
|
AVStream* inStream = inputFormatContext->streams[packet.stream_index];
|
|
|
|
AVStream* outStream = outputFormatContext->streams[packet.stream_index];
|
|
|
|
|
|
|
|
packet.pts = av_rescale_q_rnd(packet.pts, inStream->time_base, outStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
|
|
packet.dts = av_rescale_q_rnd(packet.dts, inStream->time_base, outStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
|
|
packet.duration = av_rescale_q(packet.duration, inStream->time_base, outStream->time_base);
|
|
|
|
packet.pos = -1;
|
|
|
|
|
|
|
|
if (av_interleaved_write_frame(outputFormatContext, &packet) < 0) {
|
|
|
|
fprintf(stderr, "Error muxing packet\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
av_packet_unref(&packet);
|
|
|
|
}
|
|
|
|
|
|
|
|
stop_thread.join();
|
|
|
|
|
|
|
|
// Write output file trailer
|
|
|
|
av_write_trailer(outputFormatContext);
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
if (outputFormatContext && !(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
|
|
|
|
avio_closep(&outputFormatContext->pb);
|
|
|
|
}
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void dumpRtspToMp4(const char* rtspUrl, const char* outputPath, uint32_t duration, const std::string& userName, const std::string& password, net_handle_t netHandle)
|
|
|
|
{
|
|
|
|
AVFormatContext* inputFormatContext = nullptr;
|
|
|
|
AVFormatContext* outputFormatContext = nullptr;
|
|
|
|
AVPacket packet;
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
|
|
|
|
// Set the custom log callback
|
|
|
|
av_log_set_callback(ffmpeg_log_callback);
|
|
|
|
av_log_set_level(AV_LOG_VERBOSE);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
std::string url = rtspUrl;
|
|
|
|
AVDictionary* options = NULL;
|
|
|
|
av_dict_set(&options, "rtsp_transport", "tcp", 0);
|
|
|
|
av_dict_set(&options, "stimeout", "5000000", 0);
|
|
|
|
if (!userName.empty())
|
|
|
|
{
|
|
|
|
av_dict_set(&options, "username", userName.c_str(), 0); // Replace with actual username
|
|
|
|
av_dict_set(&options, "password", password.c_str(), 0); // Replace with actual password
|
|
|
|
|
|
|
|
char auth[512] = { 0 };
|
|
|
|
snprintf(auth, sizeof(auth), "%s:%s@", userName.c_str(), password.c_str());
|
|
|
|
|
|
|
|
url.insert(url.begin() + 7, auth, auth + strlen(auth));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open input RTSP stream
|
|
|
|
int res = avformat_open_input(&inputFormatContext, url.c_str(), nullptr, &options);
|
|
|
|
av_dict_free(&options);
|
|
|
|
if (res != 0) {
|
|
|
|
char errbuf[AV_ERROR_MAX_STRING_SIZE];
|
|
|
|
av_strerror(res, errbuf, AV_ERROR_MAX_STRING_SIZE);
|
|
|
|
fprintf(stderr, "Could not open input: %s (error code: %d)\n", errbuf, res);
|
|
|
|
// fprintf(stderr, "Could not open input file '%s'\n", rtspUrl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Retrieve input stream information
|
|
|
|
if (avformat_find_stream_info(inputFormatContext, nullptr) < 0) {
|
|
|
|
// fprintf(stderr, "Could not find stream information\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open output MP4 file
|
|
|
|
if (avformat_alloc_output_context2(&outputFormatContext, nullptr, "mp4", outputPath) < 0) {
|
|
|
|
fprintf(stderr, "Could not create output context\n");
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy stream information from input to output
|
|
|
|
for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
|
|
|
|
AVStream* inStream = inputFormatContext->streams[i];
|
|
|
|
const AVCodecParameters *in_codecpar = inStream->codecpar;
|
|
|
|
|
|
|
|
// Skip audio streams
|
|
|
|
if (inStream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (in_codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
|
|
|
// Copy video stream as-is
|
|
|
|
const AVCodec *codec = avcodec_find_decoder(in_codecpar->codec_id);
|
|
|
|
AVStream *out_stream = avformat_new_stream(outputFormatContext, codec);
|
|
|
|
if (!out_stream) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
|
|
|
|
out_stream->codecpar->codec_tag = 0;
|
|
|
|
out_stream->time_base = (AVRational){1, 90000};
|
|
|
|
out_stream->avg_frame_rate = inStream->avg_frame_rate;
|
|
|
|
}
|
|
|
|
else if (in_codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|
|
|
// Setup AAC audio stream
|
|
|
|
const AVCodec *aac_encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
|
|
|
|
if (!aac_encoder) {
|
|
|
|
fprintf(stderr, "AAC encoder not found\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVStream *out_stream = avformat_new_stream(outputFormatContext, aac_encoder);
|
|
|
|
if (!out_stream) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set AAC parameters
|
|
|
|
out_stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
|
|
|
|
out_stream->codecpar->codec_id = AV_CODEC_ID_AAC;
|
|
|
|
out_stream->codecpar->sample_rate = in_codecpar->sample_rate;
|
|
|
|
out_stream->codecpar->format = AV_SAMPLE_FMT_FLTP;
|
|
|
|
out_stream->codecpar->channels = in_codecpar->channels;
|
|
|
|
out_stream->codecpar->channel_layout = av_get_default_channel_layout(in_codecpar->channels);
|
|
|
|
out_stream->codecpar->bit_rate = 128000;
|
|
|
|
out_stream->codecpar->frame_size = 1024; // AAC frame size
|
|
|
|
out_stream->time_base = (AVRational){1, in_codecpar->sample_rate};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open output file
|
|
|
|
if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
|
|
|
|
if (avio_open(&outputFormatContext->pb, outputPath, AVIO_FLAG_WRITE) < 0) {
|
|
|
|
fprintf(stderr, "Could not open output file '%s'\n", outputPath);
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AVDictionary *opts = NULL;
|
|
|
|
|
|
|
|
// Set output format options
|
|
|
|
av_dict_set(&opts, "movflags", "faststart+frag_keyframe", 0);
|
|
|
|
av_dict_set(&opts, "brand", "mp42", 0);
|
|
|
|
|
|
|
|
// Write output file header
|
|
|
|
res = avformat_write_header(outputFormatContext, &opts);
|
|
|
|
av_dict_free(&opts);
|
|
|
|
if (res < 0) {
|
|
|
|
char errbuf[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
|
|
|
av_strerror(res, errbuf, AV_ERROR_MAX_STRING_SIZE);
|
|
|
|
fprintf(stderr, "Error occurred when writing header to output file: %s (error code: %d)\n", errbuf, res);
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
// Start a thread to stop the streaming after the specified duration
|
|
|
|
std::thread stop_thread([&]() {
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(duration));
|
|
|
|
av_read_pause(inputFormatContext);
|
|
|
|
});
|
|
|
|
#endif
|
|
|
|
|
|
|
|
uint32_t framesToSkip = 16;
|
|
|
|
uint32_t framesSkipped = 0;
|
|
|
|
// Skip initial frames
|
|
|
|
while (framesSkipped < framesToSkip) {
|
|
|
|
if (av_read_frame(inputFormatContext, &packet) < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (packet.stream_index == 0) { // Video stream
|
|
|
|
framesSkipped++;
|
|
|
|
}
|
|
|
|
av_packet_unref(&packet);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto startTime = av_gettime();
|
|
|
|
// int64_t durationNs = (int64_t)duration * 1000000;
|
|
|
|
int64_t durationNs = (int64_t)(duration + 32) * 1000;
|
|
|
|
// Read packets from input and write them to output
|
|
|
|
while (1) {
|
|
|
|
|
|
|
|
if ((av_gettime() - startTime) >= durationNs) {
|
|
|
|
// printf("Duration limit reached (%d seconds)\n", ctx->duration_secs);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
AVStream* inStream = inputFormatContext->streams[packet.stream_index];
|
|
|
|
AVStream* outStream = outputFormatContext->streams[packet.stream_index];
|
|
|
|
|
|
|
|
packet.pts = av_rescale_q_rnd(packet.pts, inStream->time_base, outStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
|
|
packet.dts = av_rescale_q_rnd(packet.dts, inStream->time_base, outStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
|
|
packet.duration = av_rescale_q(packet.duration, inStream->time_base, outStream->time_base);
|
|
|
|
packet.pos = -1;
|
|
|
|
|
|
|
|
if (av_interleaved_write_frame(outputFormatContext, &packet) < 0) {
|
|
|
|
fprintf(stderr, "Error muxing packet\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (av_read_frame(inputFormatContext, &packet) < 0) break;
|
|
|
|
|
|
|
|
// Skip audio packets
|
|
|
|
if (inputFormatContext->streams[packet.stream_index]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
|
|
|
|
{
|
|
|
|
av_packet_unref(&packet);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adjust packet timebase
|
|
|
|
AVStream *in_stream = inputFormatContext->streams[packet.stream_index];
|
|
|
|
AVStream *out_stream = outputFormatContext->streams[packet.stream_index];
|
|
|
|
av_packet_rescale_ts(&packet, in_stream->time_base, out_stream->time_base);
|
|
|
|
packet.pos = -1;
|
|
|
|
|
|
|
|
res = av_write_frame(outputFormatContext, &packet);
|
|
|
|
|
|
|
|
av_packet_unref(&packet);
|
|
|
|
|
|
|
|
if (res < 0)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop_thread.join();
|
|
|
|
|
|
|
|
// Write output file trailer
|
|
|
|
av_write_trailer(outputFormatContext);
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
avformat_close_input(&inputFormatContext);
|
|
|
|
if (outputFormatContext && !(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
|
|
|
|
avio_closep(&outputFormatContext->pb);
|
|
|
|
}
|
|
|
|
avformat_free_context(outputFormatContext);
|
|
|
|
}
|