File ffmpeg-7.patch of Package d2vwitch
From 22ad84fb93cdc593e7eb474f5fb97515fe5ff1bc Mon Sep 17 00:00:00 2001
From: TheGreatMcPain <james@thegreatmcpain.xyz>
Date: Fri, 24 Jan 2025 13:41:59 -0600
Subject: [PATCH] Updates for FFmpeg7
---
src/Audio.cpp | 55 +++++++++++++++----------------
src/D2V.cpp | 85 +++++++++++++++++++++++-------------------------
src/D2VWitch.cpp | 6 ++--
src/FFMPEG.cpp | 2 --
4 files changed, 70 insertions(+), 78 deletions(-)
diff --git a/src/Audio.cpp b/src/Audio.cpp
index f859a2c..3fb0e28 100644
--- a/src/Audio.cpp
+++ b/src/Audio.cpp
@@ -84,9 +84,8 @@ AVFormatContext *openWave64(const std::string &path, const AVCodecParameters *in
out_ctx->codec_id = codec_id;
out_ctx->codec_tag = 0x0001;
out_ctx->sample_rate = in_par->sample_rate;
- out_ctx->channels = in_par->channels;
out_ctx->sample_fmt = static_cast<AVSampleFormat>(in_par->format);
- out_ctx->channel_layout = in_par->channel_layout;
+ out_ctx->ch_layout = in_par->ch_layout;
ret = avcodec_open2(out_ctx, pcm_codec, nullptr);
if (ret < 0) {
@@ -150,17 +149,18 @@ const char *suggestAudioFileExtension(AVCodecID codec_id) {
return extension;
}
-
+/*
int64_t getChannelLayout(AVCodecParameters *avpar) {
- int64_t channel_layout = avpar->channel_layout;
+ int64_t channel_layout = avpar->ch_layout.nb_channels;
if (channel_layout == 0) {
- int64_t channels = avpar->channels;
+ int64_t channels = avpar->ch_layout.nb_channels;
channel_layout = av_get_default_channel_layout(channels);
}
return channel_layout;
}
+*/
bool calculateAudioDelays(FakeFile &fake_file, int video_stream_id, AudioDelayMap &audio_delay_map, int64_t *first_video_keyframe_pos, std::string &error) {
@@ -216,8 +216,7 @@ bool calculateAudioDelays(FakeFile &fake_file, int video_stream_id, AudioDelayMa
bool second_keyframe_reached = false;
- AVPacket packet;
- av_init_packet(&packet);
+ AVPacket *packet = av_packet_alloc();
struct AudioPacketDetails {
int64_t pos;
@@ -226,51 +225,51 @@ bool calculateAudioDelays(FakeFile &fake_file, int video_stream_id, AudioDelayMa
std::unordered_map<int, std::vector<AudioPacketDetails> > audio_packet_details_map;
- // av_read_frame may not return packets from different streams in order (packet.pos always increasing)
- while ((audio_streams_left != 0 || !second_keyframe_reached) && av_read_frame(f.fctx, &packet) == 0) {
- if (packet.stream_index == video_stream_index) {
- AVCodecID codec_id = f.fctx->streams[packet.stream_index]->codecpar->codec_id;
+ // av_read_frame may not return packets from different streams in order (packet->pos always increasing)
+ while ((audio_streams_left != 0 || !second_keyframe_reached) && av_read_frame(f.fctx, packet) == 0) {
+ if (packet->stream_index == video_stream_index) {
+ AVCodecID codec_id = f.fctx->streams[packet->stream_index]->codecpar->codec_id;
if (codec_id == AV_CODEC_ID_H264) {
uint8_t *output_buffer; /// free this?
int output_buffer_size;
- while (packet.size) {
+ while (packet->size) {
int parsed_bytes = av_parser_parse2(f.parser, f.avctx, &output_buffer, &output_buffer_size,
- packet.data, packet.size,
- packet.pts, packet.dts, packet.pos);
+ packet->data, packet->size,
+ packet->pts, packet->dts, packet->pos);
- packet.data += parsed_bytes;
- packet.size -= parsed_bytes;
+ packet->data += parsed_bytes;
+ packet->size -= parsed_bytes;
}
} else {
- d2vWitchParseMPEG12Data(f.parser, f.avctx, packet.data, packet.size);
+ d2vWitchParseMPEG12Data(f.parser, f.avctx, packet->data, packet->size);
}
if (f.parser->width > 0 && f.parser->height > 0) {
if (f.parser->key_frame) {
if (first_video_pts == AV_NOPTS_VALUE) {
- first_video_pts = packet.pts;
- *first_video_keyframe_pos = packet.pos;
+ first_video_pts = packet->pts;
+ *first_video_keyframe_pos = packet->pos;
} else {
second_keyframe_reached = true;
}
} else if (first_video_pts != AV_NOPTS_VALUE) {
- if (packet.pts < first_video_pts && packet.pts != AV_NOPTS_VALUE) {
- first_video_pts = packet.pts;
+ if (packet->pts < first_video_pts && packet->pts != AV_NOPTS_VALUE) {
+ first_video_pts = packet->pts;
}
}
}
} else {
- if (f.fctx->streams[packet.stream_index]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
- int id = f.fctx->streams[packet.stream_index]->id;
+ if (f.fctx->streams[packet->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
+ int id = f.fctx->streams[packet->stream_index]->id;
- AVRational time_base = f.fctx->streams[packet.stream_index]->time_base;
- int64_t pts = packet.pts * 1000 * time_base.num / time_base.den;
+ AVRational time_base = f.fctx->streams[packet->stream_index]->time_base;
+ int64_t pts = packet->pts * 1000 * time_base.num / time_base.den;
if (first_video_pts == AV_NOPTS_VALUE) {
// We haven't reached the first video keyframe yet, so just buffer the audio packet details.
- audio_packet_details_map[id].push_back({ packet.pos, pts });
+ audio_packet_details_map[id].push_back({ packet->pos, pts });
} else {
auto latest_packet = audio_packet_details_map[id].crbegin();
@@ -278,13 +277,13 @@ bool calculateAudioDelays(FakeFile &fake_file, int video_stream_id, AudioDelayMa
if (audio_streams_left > 0)
audio_streams_left--;
} else {
- audio_packet_details_map[id].push_back({ packet.pos, pts });
+ audio_packet_details_map[id].push_back({ packet->pos, pts });
}
}
}
}
- av_packet_unref(&packet);
+ av_packet_unref(packet);
}
AVRational time_base = f.fctx->streams[video_stream_index]->time_base;
diff --git a/src/D2V.cpp b/src/D2V.cpp
index b6129ed..16203d7 100644
--- a/src/D2V.cpp
+++ b/src/D2V.cpp
@@ -447,15 +447,14 @@ bool D2V::handleAudioPacket(AVPacket *packet) {
break;
}
- AVPacket pkt_out;
- av_init_packet(&pkt_out);
- pkt_out.data = frame->data[0];
- pkt_out.size = frame->nb_samples * frame->channels * av_get_bytes_per_sample((AVSampleFormat)frame->format);
- pkt_out.stream_index = 0;
- pkt_out.pts = 0;
- pkt_out.dts = 0;
-
- av_write_frame(w64_ctx, &pkt_out);
+ AVPacket *pkt_out = av_packet_alloc();
+ pkt_out->data = frame->data[0];
+ pkt_out->size = frame->nb_samples * frame->ch_layout.nb_channels * av_get_bytes_per_sample((AVSampleFormat)frame->format);
+ pkt_out->stream_index = 0;
+ pkt_out->pts = 0;
+ pkt_out->dts = 0;
+
+ av_write_frame(w64_ctx, pkt_out);
};
av_frame_unref(frame);
@@ -530,10 +529,9 @@ std::atomic_bool stop_processing(false);
void D2V::index() {
- AVPacket packet;
- av_init_packet(&packet);
+ AVPacket *packet = av_packet_alloc();
- while (av_read_frame(f->fctx, &packet) == 0) {
+ while (av_read_frame(f->fctx, packet) == 0) {
if (stop_processing) {
stop_processing = false;
result = ProcessingCancelled;
@@ -544,28 +542,28 @@ void D2V::index() {
// Apparently we might receive packets from streams with AVDISCARD_ALL set,
// and also from streams discovered late, probably.
- if (packet.stream_index != video_stream->index &&
- !audio_files.count(packet.stream_index)) {
- av_packet_unref(&packet);
+ if (packet->stream_index != video_stream->index &&
+ !audio_files.count(packet->stream_index)) {
+ av_packet_unref(packet);
continue;
}
bool okay = true;
- if (packet.stream_index == video_stream->index)
- okay = handleVideoPacket(&packet);
+ if (packet->stream_index == video_stream->index)
+ okay = handleVideoPacket(packet);
else
- okay = handleAudioPacket(&packet);
+ okay = handleAudioPacket(packet);
if (!okay) {
- av_packet_unref(&packet);
+ av_packet_unref(packet);
result = ProcessingError;
fclose(d2v_file);
closeAudioFiles(audio_files, f->fctx);
return;
}
- av_packet_unref(&packet);
+ av_packet_unref(packet);
}
@@ -622,7 +620,7 @@ void D2V::index() {
return;
}
- av_init_packet(&packet);
+ packet = av_packet_alloc();
for (size_t i = 0; i < lines.size(); ) {
// Report progress because this takes a while. Especially with slow hard drives, probably.
@@ -646,13 +644,13 @@ void D2V::index() {
do {
- av_packet_unref(&packet);
- av_read_frame(f2.fctx, &packet);
- } while (f2.fctx->streams[packet.stream_index]->id != video_stream->id);
+ av_packet_unref(packet);
+ av_read_frame(f2.fctx, packet);
+ } while (f2.fctx->streams[packet->stream_index]->id != video_stream->id);
- int64_t position = packet.pos;
+ int64_t position = packet->pos;
- av_packet_unref(&packet);
+ av_packet_unref(packet);
bool invalid_seek_point = position != 0;
@@ -672,13 +670,13 @@ void D2V::index() {
do {
- av_packet_unref(&packet);
- av_read_frame(f2.fctx, &packet);
- } while (f2.fctx->streams[packet.stream_index]->id != video_stream->id);
+ av_packet_unref(packet);
+ av_read_frame(f2.fctx, packet);
+ } while (f2.fctx->streams[packet->stream_index]->id != video_stream->id);
- position = packet.pos;
+ position = packet->pos;
- av_packet_unref(&packet);
+ av_packet_unref(packet);
if (position == target - middle) { // middle is good
if (log_message)
@@ -801,10 +799,9 @@ void D2V::demuxVideo(FILE *video_file, int64_t start_gop_position, int64_t end_g
f->seek(start_gop_position);
- AVPacket packet;
- av_init_packet(&packet);
+ AVPacket *packet = av_packet_alloc();
- while (av_read_frame(f->fctx, &packet) == 0) {
+ while (av_read_frame(f->fctx, packet) == 0) {
if (stop_processing) {
stop_processing = false;
result = ProcessingCancelled;
@@ -814,19 +811,19 @@ void D2V::demuxVideo(FILE *video_file, int64_t start_gop_position, int64_t end_g
// Apparently we might receive packets from streams with AVDISCARD_ALL set,
// and also from streams discovered late, probably.
- if (packet.stream_index != video_stream->index ||
- packet.pos < start_gop_position) {
- av_packet_unref(&packet);
+ if (packet->stream_index != video_stream->index ||
+ packet->pos < start_gop_position) {
+ av_packet_unref(packet);
continue;
- } else if (packet.pos >= end_gop_position) {
- av_packet_unref(&packet);
+ } else if (packet->pos >= end_gop_position) {
+ av_packet_unref(packet);
break;
}
if (progress_report)
- progress_report(packet.pos - start_gop_position, end_gop_position - start_gop_position, progress_data);
+ progress_report(packet->pos - start_gop_position, end_gop_position - start_gop_position, progress_data);
- if (fwrite(packet.data, 1, packet.size, video_file) < (size_t)packet.size) {
+ if (fwrite(packet->data, 1, packet->size, video_file) < (size_t)packet->size) {
char id[20] = { 0 };
snprintf(id, 19, "%x", video_stream->id);
error = "Failed to write video packet from stream id ";
@@ -835,14 +832,14 @@ void D2V::demuxVideo(FILE *video_file, int64_t start_gop_position, int64_t end_g
result = ProcessingError;
- av_packet_unref(&packet);
+ av_packet_unref(packet);
fclose(video_file);
return;
}
- av_packet_unref(&packet);
+ av_packet_unref(packet);
}
fclose(video_file);
@@ -994,7 +991,7 @@ std::string suggestAudioTrackSuffix(const AVStream *stream, const AudioDelayMap
suggestion += id;
char channels[512] = { 0 };
- av_get_channel_layout_string(channels, 512, 0, getChannelLayout(stream->codecpar));
+ av_channel_layout_describe(&stream->codecpar->ch_layout, channels, 512);
suggestion += " ";
suggestion += channels;
diff --git a/src/D2VWitch.cpp b/src/D2VWitch.cpp
index e5273f6..6b4ea45 100644
--- a/src/D2VWitch.cpp
+++ b/src/D2VWitch.cpp
@@ -231,18 +231,16 @@ void printInfo(const AVFormatContext *fctx, const FakeFile &fake_file) {
if (desc)
type = desc->long_name ? desc->long_name : desc->name;
- int64_t bit_rate, channel_layout, sample_rate;
+ int64_t bit_rate, sample_rate;
if (av_opt_get_int(fctx->streams[i]->codecpar, "ab", 0, &bit_rate) < 0)
bit_rate = -1;
- channel_layout = getChannelLayout(fctx->streams[i]->codecpar);
-
if (av_opt_get_int(fctx->streams[i]->codecpar, "ar", 0, &sample_rate) < 0)
sample_rate = -1;
char channels[512] = { 0 };
- av_get_channel_layout_string(channels, 512, 0, channel_layout);
+ av_channel_layout_describe(&fctx->streams[i]->codecpar->ch_layout, channels, 512);
fprintf(stderr, " Id: %x, type: %s, %d kbps, %s, %d Hz\n",
fctx->streams[i]->id,
diff --git a/src/FFMPEG.cpp b/src/FFMPEG.cpp
index f60d94b..96715ba 100644
--- a/src/FFMPEG.cpp
+++ b/src/FFMPEG.cpp
@@ -191,7 +191,6 @@ void FFMPEG::deinitVideoCodec() {
if (avctx) {
- avcodec_close(avctx);
avcodec_free_context(&avctx);
}
}
@@ -208,7 +207,6 @@ void FFMPEG::cleanup() {
for (auto it = audio_ctx.begin(); it != audio_ctx.end(); it++) {
- avcodec_close(it->second);
avcodec_free_context(&it->second);
}
}