实现云台视频流

PtzNew
Matthew 3 months ago
parent b18b9f54e4
commit f0c6738887

@ -198,12 +198,20 @@ bool RtspForwarder::stop()
int RtspForwarder::run()
{
#ifndef NDEBUG
// Set the custom log callback
av_log_set_callback(ffmpeg_log_callback);
av_log_set_level(AV_LOG_DEBUG);
#endif
isRunning = true;
AVFormatContext* inputFormatContext = nullptr;
AVFormatContext* outputFormatContext = nullptr;
int ret;
int videoStreamIndex = -1;
int64_t startTime = AV_NOPTS_VALUE;
AVBSFContext* bsf_ctx = nullptr;
std::string url = inputUrl;
if (!m_userName.empty())
@ -218,17 +226,13 @@ int RtspForwarder::run()
AVDictionary* inputOptions = nullptr;
av_dict_set(&inputOptions, "rtsp_transport", "tcp", 0);
av_dict_set(&inputOptions, "stimeout", "5000000", 0); // 5 second timeout
av_dict_set(&inputOptions, "buffer_size", "1024000", 0); // 1MB buffer
// Output options
AVDictionary* outputOptions = nullptr;
av_dict_set(&outputOptions, "rtsp_transport", "tcp", 0);
av_dict_set(&outputOptions, "f", "rtsp", 0);
// av_dict_set(&inputOptions, "buffer_size", "1024000", 0); // 1MB buffer
std::cout << "Opening input: " << url << std::endl;
// Open input
ret = avformat_open_input(&inputFormatContext, url.c_str(), nullptr, &inputOptions);
av_dict_free(&inputOptions);
if (ret < 0) {
std::cerr << "Could not open input: " << av_err2str(ret) << std::endl;
return ret;
@ -256,57 +260,122 @@ int RtspForwarder::run()
return -1;
}
// Create stream mapping
std::vector<int> streamMapping(inputFormatContext->nb_streams, -1);
int outputStreamIdx = 0;
// Allocate output context
avformat_alloc_output_context2(&outputFormatContext, nullptr, "rtsp", outputUrl.c_str());
if (!outputFormatContext) {
ret = avformat_alloc_output_context2(&outputFormatContext, nullptr, "rtsp", outputUrl.c_str());
if ((ret < 0) || !outputFormatContext) {
std::cerr << "Could not create output context" << std::endl;
avformat_close_input(&inputFormatContext);
return AVERROR_UNKNOWN;
return false;
}
// Create output streams by copying from input
// FIXED VERSION - remove the redundant stream creation
for (unsigned i = 0; i < inputFormatContext->nb_streams; i++) {
AVStream* inStream = inputFormatContext->streams[i];
AVCodecParameters* inCodecpar = inStream->codecpar;
const AVCodecParameters *in_codecpar = inStream->codecpar;
// Skip non-video streams if needed
if (in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
streamMapping[i] = -1;
continue;
}
AVStream* outStream = avformat_new_stream(outputFormatContext, nullptr);
// Create only ONE stream per input stream
const AVCodec *codec = avcodec_find_decoder(in_codecpar->codec_id);
AVStream *outStream = avformat_new_stream(outputFormatContext, codec);
if (!outStream) {
std::cerr << "Failed to allocate output stream" << std::endl;
avformat_close_input(&inputFormatContext);
avformat_free_context(outputFormatContext);
return AVERROR_UNKNOWN;
return false;
}
ret = avcodec_parameters_copy(outStream->codecpar, in_codecpar);
outStream->codecpar->codec_tag = 0;
outStream->time_base = (AVRational){1, 90000};
outStream->avg_frame_rate = inStream->avg_frame_rate;
// Map input stream to output stream
streamMapping[i] = outputStreamIdx++;
}
ret = avcodec_parameters_copy(outStream->codecpar, inCodecpar);
const AVBitStreamFilter* filter = av_bsf_get_by_name("h264_mp4toannexb");
if (filter)
{
for (unsigned i = 0; i < outputFormatContext->nb_streams; i++) {
AVStream* stream = outputFormatContext->streams[i];
if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
ret = av_bsf_alloc(filter, &bsf_ctx);
if (ret < 0) {
std::cerr << "Failed to copy codec parameters" << std::endl;
avformat_close_input(&inputFormatContext);
avformat_free_context(outputFormatContext);
return ret;
std::cerr << "Failed to allocate bitstream filter context: " << av_err2str(ret) << std::endl;
return false;
}
// Fix codec tag
outStream->codecpar->codec_tag = 0;
// Copy parameters from input to bsf
ret = avcodec_parameters_copy(bsf_ctx->par_in, stream->codecpar);
if (ret < 0) {
std::cerr << "Failed to copy parameters to bsf: " << av_err2str(ret) << std::endl;
return false;
}
// Copy time base
outStream->time_base = inStream->time_base;
// Initialize the bsf context
ret = av_bsf_init(bsf_ctx);
if (ret < 0) {
std::cerr << "Failed to initialize bitstream filter: " << av_err2str(ret) << std::endl;
return false;
}
// Update output parameters
ret = avcodec_parameters_copy(stream->codecpar, bsf_ctx->par_out);
if (ret < 0) {
std::cerr << "Failed to copy parameters from bsf: " << av_err2str(ret) << std::endl;
return false;
}
break; // Only apply to the first H.264 stream
}
}
}
AVDictionary* outputOptions = nullptr;
av_dict_set(&outputOptions, "rtsp_transport", "tcp", 0);
av_dict_set(&outputOptions, "rtsp_flags", "filter_src", 0);
av_dict_set(&outputOptions, "timeout", "5000000", 0);
av_dict_set(&outputOptions, "allowed_media_types", "video", 0);
av_dict_set(&outputOptions, "buffer_size", "1024000", 0); // 1MB buffer
av_dict_set(&outputOptions, "fflags", "nobuffer", 0); // Reduce latency
av_dict_set(&outputOptions, "muxdelay", "0.1", 0); // Reduce delay
av_dict_set(&outputOptions, "max_delay", "500000", 0);
av_dict_set(&outputOptions, "preset", "ultrafast", 0);
av_dict_set(&outputOptions, "tune", "zerolatency", 0);
av_dict_set(&outputOptions, "rtsp_flags", "prefer_tcp", 0);
// Open output
if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&outputFormatContext->pb, outputUrl.c_str(), AVIO_FLAG_WRITE);
// Output options
// ret = avio_open(&outputFormatContext->pb, outputUrl.c_str(), AVIO_FLAG_WRITE);
ret = avio_open2(&outputFormatContext->pb, outputFormatContext->url, AVIO_FLAG_WRITE, NULL, &outputOptions);
if (ret < 0) {
std::cerr << "Could not open output URL: " << av_err2str(ret) << std::endl;
char errbuf[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Could not open output URL: " << errbuf << std::endl;
avformat_close_input(&inputFormatContext);
avformat_free_context(outputFormatContext);
av_dict_free(&outputOptions);
return ret;
}
}
// Write header
ret = avformat_write_header(outputFormatContext, &outputOptions);
av_dict_free(&outputOptions);
if (ret < 0) {
std::cerr << "Error writing header: " << av_err2str(ret) << std::endl;
char errbuf[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Error writing header: " << errbuf << std::endl;
avformat_close_input(&inputFormatContext);
if (!(outputFormatContext->oformat->flags & AVFMT_NOFILE))
avio_closep(&outputFormatContext->pb);
@ -316,6 +385,7 @@ int RtspForwarder::run()
// Main loop - read and write packets
AVPacket packet;
AVMediaType medaiType;
while (isRunning) {
ret = av_read_frame(inputFormatContext, &packet);
if (ret < 0) {
@ -333,6 +403,27 @@ int RtspForwarder::run()
break;
}
// Later when writing packets:
int original_stream_index = packet.stream_index;
if (streamMapping[original_stream_index] >= 0) {
packet.stream_index = streamMapping[original_stream_index];
// Write packet...
} else {
// Skip this packet
av_packet_unref(&packet);
continue;
}
// Skip audio packets
medaiType = inputFormatContext->streams[original_stream_index]->codecpar->codec_type;
if (medaiType == AVMEDIA_TYPE_AUDIO || medaiType == AVMEDIA_TYPE_DATA)
{
av_packet_unref(&packet);
continue;
}
#if 0
// Fix timestamps if enabled
if (fixTimestamps) {
// Handle timestamp issues similar to FFmpeg warning
@ -371,6 +462,76 @@ int RtspForwarder::run()
std::cerr << "Error writing frame: " << av_err2str(ret) << std::endl;
break;
}
#endif
AVStream *in_stream = inputFormatContext->streams[original_stream_index];
AVStream *out_stream = outputFormatContext->streams[packet.stream_index];
av_packet_rescale_ts(&packet, in_stream->time_base, out_stream->time_base);
// CRITICAL: Fix timestamp issues
if (packet.dts != AV_NOPTS_VALUE && packet.pts != AV_NOPTS_VALUE && packet.dts > packet.pts) {
packet.dts = packet.pts;
}
// Handle missing timestamps
if (packet.pts == AV_NOPTS_VALUE) {
if (startTime == AV_NOPTS_VALUE) {
startTime = av_gettime();
}
packet.pts = av_rescale_q(av_gettime() - startTime,
AV_TIME_BASE_Q,
out_stream->time_base);
packet.dts = packet.pts;
}
packet.pos = -1;
// Apply bitstream filter if it's H.264
if (bsf_ctx && out_stream->codecpar->codec_id == AV_CODEC_ID_H264) {
ret = av_bsf_send_packet(bsf_ctx, &packet);
if (ret < 0) {
std::cerr << "Error sending packet to bitstream filter: " << av_err2str(ret) << std::endl;
break;
}
while (ret >= 0) {
ret = av_bsf_receive_packet(bsf_ctx, &packet);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
// Need more input or end of file
break;
} else if (ret < 0) {
std::cerr << "Error receiving packet from bitstream filter: " << av_err2str(ret) << std::endl;
break;
}
// Write the filtered packet
ret = av_interleaved_write_frame(outputFormatContext, &packet);
av_packet_unref(&packet);
if (ret < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Error writing frame: " << errbuf << std::endl;
break;
}
}
} else {
// Write the packet without filtering
ret = av_interleaved_write_frame(outputFormatContext, &packet);
av_packet_unref(&packet);
if (ret < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Error writing frame: " << errbuf << std::endl;
break;
}
}
}
cleanup:
// Free the bitstream filter context
if (bsf_ctx) {
av_bsf_free(&bsf_ctx);
}
// Write trailer
@ -382,5 +543,5 @@ int RtspForwarder::run()
avio_closep(&outputFormatContext->pb);
avformat_free_context(outputFormatContext);
return 0;
return ret;
}

Loading…
Cancel
Save