27 #define ENABLE_VAAPI 0 
   30 #define MAX_SUPPORTED_WIDTH 1950 
   31 #define MAX_SUPPORTED_HEIGHT 1100 
   34 #include "libavutil/hwcontext_vaapi.h" 
   36 typedef struct VAAPIDecodeContext {
 
   38      VAEntrypoint va_entrypoint;
 
   40      VAContextID va_context;
 
   42 #if FF_API_STRUCT_VAAPI_CONTEXT 
   45     struct vaapi_context *old_context;
 
   46     AVBufferRef *device_ref;
 
   50      AVHWDeviceContext *device;
 
   51      AVVAAPIDeviceContext *hwctx;
 
   53      AVHWFramesContext *frames;
 
   54      AVVAAPIFramesContext *hwfc;
 
   56      enum AVPixelFormat surface_format;
 
   59 #endif // ENABLE_VAAPI 
   60 #endif // USE_HW_ACCEL 
   72         : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0), NO_PTS_OFFSET(-99999),
 
   73           path(
path), is_video_seek(true), check_interlace(false), check_fps(false), enable_seek(true), is_open(false),
 
   74           seek_audio_frame_found(0), seek_video_frame_found(0),is_duration_known(false), largest_frame_processed(0),
 
   76           video_pts(0), pFormatCtx(NULL), videoStream(-1), audioStream(-1), pCodecCtx(NULL), aCodecCtx(NULL),
 
   77         pStream(NULL), aStream(NULL), pFrame(NULL), previous_packet_location{-1,0},
 
   85     pts_offset_seconds = NO_PTS_OFFSET;
 
   86     video_pts_seconds = NO_PTS_OFFSET;
 
   87     audio_pts_seconds = NO_PTS_OFFSET;
 
   90     working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
 
   91     final_cache.SetMaxBytesFromInfo(max_concurrent_frames * 2, info.width, info.height, info.sample_rate, info.channels);
 
  116     if (abs(diff) <= amount)
 
  127 static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx, 
const enum AVPixelFormat *pix_fmts)
 
  129     const enum AVPixelFormat *p;
 
  131     for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
 
  133 #if defined(__linux__) 
  135             case AV_PIX_FMT_VAAPI:
 
  140             case AV_PIX_FMT_VDPAU:
 
  148             case AV_PIX_FMT_DXVA2_VLD:
 
  153             case AV_PIX_FMT_D3D11:
 
  159 #if defined(__APPLE__) 
  161             case AV_PIX_FMT_VIDEOTOOLBOX:
 
  168             case AV_PIX_FMT_CUDA:
 
  184     return AV_PIX_FMT_NONE;
 
  187 int FFmpegReader::IsHardwareDecodeSupported(
int codecid)
 
  191         case AV_CODEC_ID_H264:
 
  192         case AV_CODEC_ID_MPEG2VIDEO:
 
  193         case AV_CODEC_ID_VC1:
 
  194         case AV_CODEC_ID_WMV1:
 
  195         case AV_CODEC_ID_WMV2:
 
  196         case AV_CODEC_ID_WMV3:
 
  205 #endif // USE_HW_ACCEL 
  211         const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
 
  221         if (avformat_open_input(&pFormatCtx, 
path.c_str(), NULL, NULL) != 0)
 
  225         if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
 
  232         packet_status.
reset(
true);
 
  235         for (
unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
 
  237             if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
 
  244             if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
 
  251         if (videoStream == -1 && audioStream == -1)
 
  255         if (videoStream != -1) {
 
  260             pStream = pFormatCtx->streams[videoStream];
 
  266             const AVCodec *pCodec = avcodec_find_decoder(codecId);
 
  267             AVDictionary *
opts = NULL;
 
  268             int retry_decode_open = 2;
 
  273                 if (
hw_de_on && (retry_decode_open==2)) {
 
  275                     hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
 
  278                 retry_decode_open = 0;
 
  283                 if (pCodec == NULL) {
 
  284                     throw InvalidCodec(
"A valid video codec could not be found for this file.", 
path);
 
  288                 av_dict_set(&
opts, 
"strict", 
"experimental", 0);
 
  292                     int i_decoder_hw = 0;
 
  294                     char *adapter_ptr = NULL;
 
  297                     fprintf(stderr, 
"Hardware decoding device number: %d\n", adapter_num);
 
  300                     pCodecCtx->get_format = get_hw_dec_format;
 
  302                     if (adapter_num < 3 && adapter_num >=0) {
 
  303 #if defined(__linux__) 
  304                         snprintf(adapter,
sizeof(adapter),
"/dev/dri/renderD%d", adapter_num+128);
 
  305                         adapter_ptr = adapter;
 
  307                         switch (i_decoder_hw) {
 
  309                                     hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
 
  312                                     hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
 
  315                                     hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
 
  318                                     hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
 
  321                                     hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
 
  325 #elif defined(_WIN32) 
  328                         switch (i_decoder_hw) {
 
  330                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
 
  333                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
 
  336                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
 
  339                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
 
  342                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
 
  345 #elif defined(__APPLE__) 
  348                         switch (i_decoder_hw) {
 
  350                                 hw_de_av_device_type =  AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
 
  353                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
 
  356                                 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
 
  366 #if defined(__linux__) 
  367                     if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
 
  368 #elif defined(_WIN32) 
  369                     if( adapter_ptr != NULL ) {
 
  370 #elif defined(__APPLE__) 
  371                     if( adapter_ptr != NULL ) {
 
  380                     hw_device_ctx = NULL;
 
  382                     if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
 
  383                         if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
 
  418 #endif // USE_HW_ACCEL 
  425                     pCodecCtx->thread_type &= ~FF_THREAD_FRAME;
 
  429                 int avcodec_return = avcodec_open2(pCodecCtx, pCodec, &
opts);
 
  430                 if (avcodec_return < 0) {
 
  431                     std::stringstream avcodec_error_msg;
 
  432                     avcodec_error_msg << 
"A video codec was found, but could not be opened. Error: " << av_err2string(avcodec_return);
 
  438                     AVHWFramesConstraints *constraints = NULL;
 
  439                     void *hwconfig = NULL;
 
  440                     hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
 
  444                     ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
 
  445                     constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
 
  446 #endif // ENABLE_VAAPI 
  448                         if (pCodecCtx->coded_width < constraints->min_width     ||
 
  449                                 pCodecCtx->coded_height < constraints->min_height ||
 
  450                                 pCodecCtx->coded_width > constraints->max_width     ||
 
  451                                 pCodecCtx->coded_height > constraints->max_height) {
 
  454                             retry_decode_open = 1;
 
  457                                 av_buffer_unref(&hw_device_ctx);
 
  458                                 hw_device_ctx = NULL;
 
  463                             ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n", 
"Min width :", constraints->min_width, 
"Min Height :", constraints->min_height, 
"MaxWidth :", constraints->max_width, 
"MaxHeight :", constraints->max_height, 
"Frame width :", pCodecCtx->coded_width, 
"Frame height :", pCodecCtx->coded_height);
 
  464                             retry_decode_open = 0;
 
  466                         av_hwframe_constraints_free(&constraints);
 
  479                         if (pCodecCtx->coded_width < 0      ||
 
  480                                 pCodecCtx->coded_height < 0     ||
 
  481                                 pCodecCtx->coded_width > max_w ||
 
  482                                 pCodecCtx->coded_height > max_h ) {
 
  483                             ZmqLogger::Instance()->
AppendDebugMethod(
"DIMENSIONS ARE TOO LARGE for hardware acceleration\n", 
"Max Width :", max_w, 
"Max Height :", max_h, 
"Frame width :", pCodecCtx->coded_width, 
"Frame height :", pCodecCtx->coded_height);
 
  485                             retry_decode_open = 1;
 
  488                                 av_buffer_unref(&hw_device_ctx);
 
  489                                 hw_device_ctx = NULL;
 
  493                             ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n", 
"Max Width :", max_w, 
"Max Height :", max_h, 
"Frame width :", pCodecCtx->coded_width, 
"Frame height :", pCodecCtx->coded_height);
 
  494                             retry_decode_open = 0;
 
  502                 retry_decode_open = 0;
 
  503 #endif // USE_HW_ACCEL 
  504             } 
while (retry_decode_open); 
 
  513         if (audioStream != -1) {
 
  518             aStream = pFormatCtx->streams[audioStream];
 
  524             const AVCodec *aCodec = avcodec_find_decoder(codecId);
 
  530             if (aCodec == NULL) {
 
  531                 throw InvalidCodec(
"A valid audio codec could not be found for this file.", 
path);
 
  535             AVDictionary *
opts = NULL;
 
  536             av_dict_set(&
opts, 
"strict", 
"experimental", 0);
 
  539             if (avcodec_open2(aCodecCtx, aCodec, &
opts) < 0)
 
  540                 throw InvalidCodec(
"An audio codec was found, but could not be opened.", 
path);
 
  550         AVDictionaryEntry *tag = NULL;
 
  551         while ((tag = av_dict_get(pFormatCtx->metadata, 
"", tag, AV_DICT_IGNORE_SUFFIX))) {
 
  552             QString str_key = tag->key;
 
  553             QString str_value = tag->value;
 
  554             info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
 
  558         for (
unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
 
  559             AVStream* st = pFormatCtx->streams[i];
 
  560             if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 
  562                 for (
int j = 0; j < st->nb_side_data; j++) {
 
  563                     AVPacketSideData *sd = &st->side_data[j];
 
  566                     if (sd->type == AV_PKT_DATA_DISPLAYMATRIX &&
 
  567                         sd->size >= 9 * 
sizeof(int32_t) &&
 
  570                         double rotation = -av_display_rotation_get(
 
  571                             reinterpret_cast<int32_t *
>(sd->data));
 
  572                         if (std::isnan(rotation)) rotation = 0;
 
  576                     else if (sd->type == AV_PKT_DATA_SPHERICAL) {
 
  581                         const AVSphericalMapping* map =
 
  582                             reinterpret_cast<const AVSphericalMapping*
>(sd->data);
 
  585                         const char* proj_name = av_spherical_projection_name(map->projection);
 
  591                         auto to_deg = [](int32_t v){
 
  592                             return (
double)v / 65536.0;
 
  594                         info.
metadata[
"spherical_yaw"]   = std::to_string(to_deg(map->yaw));
 
  595                         info.
metadata[
"spherical_pitch"] = std::to_string(to_deg(map->pitch));
 
  596                         info.
metadata[
"spherical_roll"]  = std::to_string(to_deg(map->roll));
 
  604         previous_packet_location.
frame = -1;
 
  636         const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
 
  642         AVPacket *recent_packet = packet;
 
  647         int max_attempts = 128;
 
  652                                                      "attempts", attempts);
 
  664             RemoveAVPacket(recent_packet);
 
  669             if(avcodec_is_open(pCodecCtx)) {
 
  670                 avcodec_flush_buffers(pCodecCtx);
 
  676                     av_buffer_unref(&hw_device_ctx);
 
  677                     hw_device_ctx = NULL;
 
  680 #endif // USE_HW_ACCEL 
  681             if (img_convert_ctx) {
 
  682                 sws_freeContext(img_convert_ctx);
 
  683                 img_convert_ctx = 
nullptr;
 
  685             if (pFrameRGB_cached) {
 
  692             if(avcodec_is_open(aCodecCtx)) {
 
  693                 avcodec_flush_buffers(aCodecCtx);
 
  705         working_cache.
Clear();
 
  708         avformat_close_input(&pFormatCtx);
 
  709         av_freep(&pFormatCtx);
 
  714         largest_frame_processed = 0;
 
  715         seek_audio_frame_found = 0;
 
  716         seek_video_frame_found = 0;
 
  717         current_video_frame = 0;
 
  718         last_video_frame.reset();
 
  722 bool FFmpegReader::HasAlbumArt() {
 
  726     return pFormatCtx && videoStream >= 0 && pFormatCtx->streams[videoStream]
 
  727         && (pFormatCtx->streams[videoStream]->disposition & AV_DISPOSITION_ATTACHED_PIC);
 
  730 void FFmpegReader::UpdateAudioInfo() {
 
  747     info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
 
  778     if (aStream->duration > 0 && aStream->duration > 
info.
duration) {
 
  781     } 
else if (pFormatCtx->duration > 0 && 
info.
duration <= 0.0f) {
 
  783         info.
duration = float(pFormatCtx->duration) / AV_TIME_BASE;
 
  826     AVDictionaryEntry *tag = NULL;
 
  827     while ((tag = av_dict_get(aStream->metadata, 
"", tag, AV_DICT_IGNORE_SUFFIX))) {
 
  828         QString str_key = tag->key;
 
  829         QString str_value = tag->value;
 
  830         info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
 
  834 void FFmpegReader::UpdateVideoInfo() {
 
  842     info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
 
  849     AVRational framerate = av_guess_frame_rate(pFormatCtx, pStream, NULL);
 
  861     if (pStream->sample_aspect_ratio.num != 0) {
 
  884     if (!check_interlace) {
 
  885         check_interlace = 
true;
 
  887         switch(field_order) {
 
  888             case AV_FIELD_PROGRESSIVE:
 
  901             case AV_FIELD_UNKNOWN:
 
  903                 check_interlace = 
false;
 
  918     if (
info.
duration <= 0.0f && pFormatCtx->duration >= 0) {
 
  920         info.
duration = float(pFormatCtx->duration) / AV_TIME_BASE;
 
  930     if (
info.
duration <= 0.0f && pStream->duration == AV_NOPTS_VALUE && pFormatCtx->duration == AV_NOPTS_VALUE) {
 
  944         if (pFormatCtx && pFormatCtx->iformat && strcmp(pFormatCtx->iformat->name, 
"gif") == 0)
 
  946             if (pStream->nb_frames > 1) {
 
  961         is_duration_known = 
false;
 
  964         is_duration_known = 
true;
 
  974     AVDictionaryEntry *tag = NULL;
 
  975     while ((tag = av_dict_get(pStream->metadata, 
"", tag, AV_DICT_IGNORE_SUFFIX))) {
 
  976         QString str_key = tag->key;
 
  977         QString str_value = tag->value;
 
  978         info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
 
  983     return this->is_duration_known;
 
  989         throw ReaderClosed(
"The FFmpegReader is closed.  Call Open() before calling this method.", 
path);
 
  992     if (requested_frame < 1)
 
  998         throw InvalidFile(
"Could not detect the duration of the video or audio stream.", 
path);
 
 1014         const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
 
 1028             int64_t diff = requested_frame - last_frame;
 
 1029             if (diff >= 1 && diff <= 20) {
 
 1031                 frame = ReadStream(requested_frame);
 
 1036                     Seek(requested_frame);
 
 1045                 frame = ReadStream(requested_frame);
 
 1053 std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
 
 1055     bool check_seek = 
false;
 
 1056     int packet_error = -1;
 
 1066             CheckWorkingFrames(requested_frame);
 
 1071         if (is_cache_found) {
 
 1075         if (!hold_packet || !packet) {
 
 1077             packet_error = GetNextPacket();
 
 1078             if (packet_error < 0 && !packet) {
 
 1089             check_seek = CheckSeek(
false);
 
 1101         if ((
info.
has_video && packet && packet->stream_index == videoStream) ||
 
 1105             ProcessVideoPacket(requested_frame);
 
 1108         if ((
info.
has_audio && packet && packet->stream_index == audioStream) ||
 
 1112             ProcessAudioPacket(requested_frame);
 
 1117         if ((!
info.
has_video && packet && packet->stream_index == videoStream) ||
 
 1118             (!
info.
has_audio && packet && packet->stream_index == audioStream)) {
 
 1120             if (packet->stream_index == videoStream) {
 
 1122             } 
else if (packet->stream_index == audioStream) {
 
 1128             RemoveAVPacket(packet);
 
 1138             ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (force EOF)", 
"packets_read", packet_status.
packets_read(), 
"packets_decoded", packet_status.
packets_decoded(), 
"packets_eof", packet_status.
packets_eof, 
"video_eof", packet_status.
video_eof, 
"audio_eof", packet_status.
audio_eof, 
"end_of_file", packet_status.
end_of_file);
 
 1155                                           "largest_frame_processed", largest_frame_processed,
 
 1156                                           "Working Cache Count", working_cache.
Count());
 
 1165         CheckWorkingFrames(requested_frame);
 
 1181             std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
 
 1184             if (!frame->has_image_data) {
 
 1189             frame->AddAudioSilence(samples_in_frame);
 
 1194             std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
 
 1196             f->AddAudioSilence(samples_in_frame);
 
 1204 int FFmpegReader::GetNextPacket() {
 
 1205     int found_packet = 0;
 
 1206     AVPacket *next_packet;
 
 1207     next_packet = 
new AVPacket();
 
 1208     found_packet = av_read_frame(pFormatCtx, next_packet);
 
 1212         RemoveAVPacket(packet);
 
 1215     if (found_packet >= 0) {
 
 1217         packet = next_packet;
 
 1220         if (packet->stream_index == videoStream) {
 
 1222         } 
else if (packet->stream_index == audioStream) {
 
 1231     return found_packet;
 
 1235 bool FFmpegReader::GetAVFrame() {
 
 1236     int frameFinished = 0;
 
 1242     int send_packet_err = 0;
 
 1243     int64_t send_packet_pts = 0;
 
 1244     if ((packet && packet->stream_index == videoStream) || !packet) {
 
 1245         send_packet_err = avcodec_send_packet(pCodecCtx, packet);
 
 1247         if (packet && send_packet_err >= 0) {
 
 1248             send_packet_pts = GetPacketPTS();
 
 1249             hold_packet = 
false;
 
 1258     #endif // USE_HW_ACCEL 
 1259         if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
 
 1260             ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAVFrame (send packet: Not sent [" + av_err2string(send_packet_err) + 
"])", 
"send_packet_err", send_packet_err, 
"send_packet_pts", send_packet_pts);
 
 1261             if (send_packet_err == AVERROR(EAGAIN)) {
 
 1263                 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAVFrame (send packet: AVERROR(EAGAIN): user must read output with avcodec_receive_frame()", 
"send_packet_pts", send_packet_pts);
 
 1265             if (send_packet_err == AVERROR(EINVAL)) {
 
 1266                 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAVFrame (send packet: AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush", 
"send_packet_pts", send_packet_pts);
 
 1268             if (send_packet_err == AVERROR(ENOMEM)) {
 
 1269                 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAVFrame (send packet: AVERROR(ENOMEM): failed to add packet to internal queue, or legitimate decoding errors", 
"send_packet_pts", send_packet_pts);
 
 1276         int receive_frame_err = 0;
 
 1277         AVFrame *next_frame2;
 
 1283 #endif // USE_HW_ACCEL 
 1285             next_frame2 = next_frame;
 
 1288         while (receive_frame_err >= 0) {
 
 1289             receive_frame_err = avcodec_receive_frame(pCodecCtx, next_frame2);
 
 1291             if (receive_frame_err != 0) {
 
 1292                 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAVFrame (receive frame: frame not ready yet from decoder [\" + av_err2string(receive_frame_err) + \"])", 
"receive_frame_err", receive_frame_err, 
"send_packet_pts", send_packet_pts);
 
 1294                 if (receive_frame_err == AVERROR_EOF) {
 
 1296                             "FFmpegReader::GetAVFrame (receive frame: AVERROR_EOF: EOF detected from decoder, flushing buffers)", 
"send_packet_pts", send_packet_pts);
 
 1297                     avcodec_flush_buffers(pCodecCtx);
 
 1300                 if (receive_frame_err == AVERROR(EINVAL)) {
 
 1302                             "FFmpegReader::GetAVFrame (receive frame: AVERROR(EINVAL): invalid frame received, flushing buffers)", 
"send_packet_pts", send_packet_pts);
 
 1303                     avcodec_flush_buffers(pCodecCtx);
 
 1305                 if (receive_frame_err == AVERROR(EAGAIN)) {
 
 1307                             "FFmpegReader::GetAVFrame (receive frame: AVERROR(EAGAIN): output is not available in this state - user must try to send new input)", 
"send_packet_pts", send_packet_pts);
 
 1309                 if (receive_frame_err == AVERROR_INPUT_CHANGED) {
 
 1311                             "FFmpegReader::GetAVFrame (receive frame: AVERROR_INPUT_CHANGED: current decoded frame has changed parameters with respect to first decoded frame)", 
"send_packet_pts", send_packet_pts);
 
 1322                 if (next_frame2->format == hw_de_av_pix_fmt) {
 
 1323                     next_frame->format = AV_PIX_FMT_YUV420P;
 
 1324                     if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
 
 1327                     if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
 
 1333 #endif // USE_HW_ACCEL 
 1335                 next_frame = next_frame2;
 
 1347             av_image_copy(pFrame->data, pFrame->linesize, (
const uint8_t**)next_frame->data, next_frame->linesize,
 
 1354             if (next_frame->pts != AV_NOPTS_VALUE) {
 
 1357                 video_pts = next_frame->pts;
 
 1358             } 
else if (next_frame->pkt_dts != AV_NOPTS_VALUE) {
 
 1360                 video_pts = next_frame->pkt_dts;
 
 1364                     "FFmpegReader::GetAVFrame (Successful frame received)", 
"video_pts", video_pts, 
"send_packet_pts", send_packet_pts);
 
 1373     #endif // USE_HW_ACCEL 
 1375         avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
 
 1381         if (frameFinished) {
 
 1385             av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, 
info.
width,
 
 1388 #endif // IS_FFMPEG_3_2 
 1394     return frameFinished;
 
 1398 bool FFmpegReader::CheckSeek(
bool is_video) {
 
 1403         if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
 
 1411         int64_t max_seeked_frame = std::max(seek_audio_frame_found, seek_video_frame_found);
 
 1414         if (max_seeked_frame >= seeking_frame) {
 
 1417                                             "is_video_seek", is_video_seek,
 
 1418                                             "max_seeked_frame", max_seeked_frame,
 
 1419                                             "seeking_frame", seeking_frame,
 
 1420                                             "seeking_pts", seeking_pts,
 
 1421                                             "seek_video_frame_found", seek_video_frame_found,
 
 1422                                             "seek_audio_frame_found", seek_audio_frame_found);
 
 1425             Seek(seeking_frame - (10 * seek_count * seek_count));
 
 1429                                             "is_video_seek", is_video_seek,
 
 1430                                             "packet->pts", GetPacketPTS(),
 
 1431                                             "seeking_pts", seeking_pts,
 
 1432                                             "seeking_frame", seeking_frame,
 
 1433                                             "seek_video_frame_found", seek_video_frame_found,
 
 1434                                             "seek_audio_frame_found", seek_audio_frame_found);
 
 1448 void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
 
 1451     int frame_finished = GetAVFrame();
 
 1454     if (!frame_finished) {
 
 1457             RemoveAVFrame(pFrame);
 
 1463     int64_t current_frame = ConvertVideoPTStoFrame(video_pts);
 
 1466     if (!seek_video_frame_found && is_seeking)
 
 1467         seek_video_frame_found = current_frame;
 
 1473     working_cache.
Add(CreateFrame(requested_frame));
 
 1485     AVFrame *pFrameRGB = pFrameRGB_cached;
 
 1488         if (pFrameRGB == 
nullptr)
 
 1490         pFrameRGB_cached = pFrameRGB;
 
 1493     uint8_t *buffer = 
nullptr;
 
 1514             max_width = std::max(
float(max_width), max_width * max_scale_x);
 
 1515             max_height = std::max(
float(max_height), max_height * max_scale_y);
 
 1521             QSize width_size(max_width * max_scale_x,
 
 1524                               max_height * max_scale_y);
 
 1526             if (width_size.width() >= max_width && width_size.height() >= max_height) {
 
 1527                 max_width = std::max(max_width, width_size.width());
 
 1528                 max_height = std::max(max_height, width_size.height());
 
 1530                 max_width = std::max(max_width, height_size.width());
 
 1531                 max_height = std::max(max_height, height_size.height());
 
 1538             float preview_ratio = 1.0;
 
 1545             max_width = 
info.
width * max_scale_x * preview_ratio;
 
 1546             max_height = 
info.
height * max_scale_y * preview_ratio;
 
 1551     int original_height = height;
 
 1552     if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
 
 1554         float ratio = float(width) / float(height);
 
 1555         int possible_width = round(max_height * ratio);
 
 1556         int possible_height = round(max_width / ratio);
 
 1558         if (possible_width <= max_width) {
 
 1560             width = possible_width;
 
 1561             height = max_height;
 
 1565             height = possible_height;
 
 1570     const int bytes_per_pixel = 4;
 
 1571     int raw_buffer_size = (width * height * bytes_per_pixel) + 128;
 
 1574     constexpr 
size_t ALIGNMENT = 32;  
 
 1575     int buffer_size = ((raw_buffer_size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
 
 1576     buffer = (
unsigned char*) aligned_malloc(buffer_size, ALIGNMENT);
 
 1581     int scale_mode = SWS_FAST_BILINEAR;
 
 1583         scale_mode = SWS_BICUBIC;
 
 1585     img_convert_ctx = sws_getCachedContext(img_convert_ctx, 
info.
width, 
info.
height, 
AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, height, 
PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
 
 1586     if (!img_convert_ctx)
 
 1590     sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
 
 1591               original_height, pFrameRGB->data, pFrameRGB->linesize);
 
 1594     std::shared_ptr<Frame> f = CreateFrame(current_frame);
 
 1599         f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer);
 
 1602         f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer);
 
 1606     working_cache.
Add(f);
 
 1609     last_video_frame = f;
 
 1615     RemoveAVFrame(pFrame);
 
 1621     ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessVideoPacket (After)", 
"requested_frame", requested_frame, 
"current_frame", current_frame, 
"f->number", f->number, 
"video_pts_seconds", video_pts_seconds);
 
 1625 void FFmpegReader::ProcessAudioPacket(int64_t requested_frame) {
 
 1628     if (packet && packet->pts != AV_NOPTS_VALUE) {
 
 1630         location = GetAudioPTSLocation(packet->pts);
 
 1633         if (!seek_audio_frame_found && is_seeking)
 
 1634             seek_audio_frame_found = location.
frame;
 
 1641     working_cache.
Add(CreateFrame(requested_frame));
 
 1645                                           "requested_frame", requested_frame,
 
 1646                                           "target_frame", location.
frame,
 
 1650     int frame_finished = 0;
 
 1654     int packet_samples = 0;
 
 1658         int send_packet_err =  avcodec_send_packet(aCodecCtx, packet);
 
 1659         if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
 
 1663             int receive_frame_err = avcodec_receive_frame(aCodecCtx, audio_frame);
 
 1664             if (receive_frame_err >= 0) {
 
 1667             if (receive_frame_err == AVERROR_EOF) {
 
 1671             if (receive_frame_err == AVERROR(EINVAL) || receive_frame_err == AVERROR_EOF) {
 
 1673                 avcodec_flush_buffers(aCodecCtx);
 
 1675             if (receive_frame_err != 0) {
 
 1680         int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
 
 1683     if (frame_finished) {
 
 1689         audio_pts = audio_frame->pts;
 
 1692         location = GetAudioPTSLocation(audio_pts);
 
 1695         int plane_size = -1;
 
 1701         data_size = av_samples_get_buffer_size(&plane_size, nb_channels,
 
 1705         packet_samples = audio_frame->nb_samples * nb_channels;
 
 1714     int pts_remaining_samples = packet_samples / 
info.
channels; 
 
 1717     if (pts_remaining_samples == 0) {
 
 1719                                            "packet_samples", packet_samples,
 
 1721                                            "pts_remaining_samples", pts_remaining_samples);
 
 1725     while (pts_remaining_samples) {
 
 1730         int samples = samples_per_frame - previous_packet_location.
sample_start;
 
 1731         if (samples > pts_remaining_samples)
 
 1732             samples = pts_remaining_samples;
 
 1735         pts_remaining_samples -= samples;
 
 1737         if (pts_remaining_samples > 0) {
 
 1739             previous_packet_location.
frame++;
 
 1748                                           "packet_samples", packet_samples,
 
 1756     audio_converted->nb_samples = audio_frame->nb_samples;
 
 1757     av_samples_alloc(audio_converted->data, audio_converted->linesize, 
info.
channels, audio_frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);
 
 1773     av_opt_set_int(avr, 
"out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
 
 1782                              audio_converted->data,       
 
 1783                              audio_converted->linesize[0],   
 
 1784                              audio_converted->nb_samples,   
 
 1786                              audio_frame->linesize[0],     
 
 1787                              audio_frame->nb_samples);     
 
 1790     int64_t starting_frame_number = -1;
 
 1791     for (
int channel_filter = 0; channel_filter < 
info.
channels; channel_filter++) {
 
 1793         starting_frame_number = location.
frame;
 
 1794         int channel_buffer_size = nb_samples;
 
 1795         auto *channel_buffer = (
float *) (audio_converted->data[channel_filter]);
 
 1799         int remaining_samples = channel_buffer_size;
 
 1800         while (remaining_samples > 0) {
 
 1805             int samples = std::fmin(samples_per_frame - start, remaining_samples);
 
 1808             std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
 
 1811             f->AddAudio(
true, channel_filter, start, channel_buffer, samples, 1.0f);
 
 1815                                             "frame", starting_frame_number,
 
 1818                                             "channel", channel_filter,
 
 1819                                             "samples_per_frame", samples_per_frame);
 
 1822             working_cache.
Add(f);
 
 1825             remaining_samples -= samples;
 
 1828             if (remaining_samples > 0)
 
 1829                 channel_buffer += samples;
 
 1832             starting_frame_number++;
 
 1840     av_free(audio_converted->data[0]);
 
 1849                                           "requested_frame", requested_frame,
 
 1850                                           "starting_frame", location.
frame,
 
 1851                                           "end_frame", starting_frame_number - 1,
 
 1852                                           "audio_pts_seconds", audio_pts_seconds);
 
 1858 void FFmpegReader::Seek(int64_t requested_frame) {
 
 1860     if (requested_frame < 1)
 
 1861         requested_frame = 1;
 
 1864     if (requested_frame > largest_frame_processed && packet_status.
end_of_file) {
 
 1871                                           "requested_frame", requested_frame,
 
 1872                                           "seek_count", seek_count,
 
 1873                                           "last_frame", last_frame);
 
 1876     working_cache.
Clear();
 
 1880     video_pts_seconds = NO_PTS_OFFSET;
 
 1882     audio_pts_seconds = NO_PTS_OFFSET;
 
 1883     hold_packet = 
false;
 
 1885     current_video_frame = 0;
 
 1886     largest_frame_processed = 0;
 
 1891     packet_status.
reset(
false);
 
 1897     int buffer_amount = std::max(max_concurrent_frames, 8);
 
 1898     if (requested_frame - buffer_amount < 20) {
 
 1912         if (seek_count == 1) {
 
 1915             seeking_pts = ConvertFrameToVideoPTS(1);
 
 1917         seek_audio_frame_found = 0; 
 
 1918         seek_video_frame_found = 0; 
 
 1922         bool seek_worked = 
false;
 
 1923         int64_t seek_target = 0;
 
 1927             seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
 
 1929                 fprintf(stderr, 
"%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
 
 1932                 is_video_seek = 
true;
 
 1939             seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
 
 1941                 fprintf(stderr, 
"%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
 
 1944                 is_video_seek = 
false;
 
 1953                 avcodec_flush_buffers(aCodecCtx);
 
 1957                 avcodec_flush_buffers(pCodecCtx);
 
 1960             previous_packet_location.
frame = -1;
 
 1965             if (seek_count == 1) {
 
 1967                 seeking_pts = seek_target;
 
 1968                 seeking_frame = requested_frame;
 
 1970             seek_audio_frame_found = 0; 
 
 1971             seek_video_frame_found = 0; 
 
 1999 int64_t FFmpegReader::GetPacketPTS() {
 
 2001         int64_t current_pts = packet->pts;
 
 2002         if (current_pts == AV_NOPTS_VALUE && packet->dts != AV_NOPTS_VALUE)
 
 2003             current_pts = packet->dts;
 
 2009         return AV_NOPTS_VALUE;
 
 2014 void FFmpegReader::UpdatePTSOffset() {
 
 2015     if (pts_offset_seconds != NO_PTS_OFFSET) {
 
 2019     pts_offset_seconds = 0.0;
 
 2020     double video_pts_offset_seconds = 0.0;
 
 2021     double audio_pts_offset_seconds = 0.0;
 
 2023     bool has_video_pts = 
false;
 
 2026         has_video_pts = 
true;
 
 2028     bool has_audio_pts = 
false;
 
 2031         has_audio_pts = 
true;
 
 2035     while (!has_video_pts || !has_audio_pts) {
 
 2037         if (GetNextPacket() < 0)
 
 2042         int64_t pts = GetPacketPTS();
 
 2045         if (!has_video_pts && packet->stream_index == videoStream) {
 
 2051             if (std::abs(video_pts_offset_seconds) <= 10.0) {
 
 2052                 has_video_pts = 
true;
 
 2055         else if (!has_audio_pts && packet->stream_index == audioStream) {
 
 2061             if (std::abs(audio_pts_offset_seconds) <= 10.0) {
 
 2062                 has_audio_pts = 
true;
 
 2068     if (has_video_pts && has_audio_pts) {
 
 2080         pts_offset_seconds = std::max(video_pts_offset_seconds, audio_pts_offset_seconds);
 
 2085 int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
 
 2087     int64_t previous_video_frame = current_video_frame;
 
 2096     if (current_video_frame == 0)
 
 2097         current_video_frame = frame;
 
 2101         if (frame == previous_video_frame) {
 
 2106             current_video_frame++;
 
 2115 int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
 
 2117     double seconds = (double(frame_number - 1) / 
info.
fps.
ToDouble()) + pts_offset_seconds;
 
 2127 int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
 
 2129     double seconds = (double(frame_number - 1) / 
info.
fps.
ToDouble()) + pts_offset_seconds;
 
 2139 AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
 
 2147     int64_t whole_frame = int64_t(frame);
 
 2150     double sample_start_percentage = frame - double(whole_frame);
 
 2156     int sample_start = round(
double(samples_per_frame) * sample_start_percentage);
 
 2159     if (whole_frame < 1)
 
 2161     if (sample_start < 0)
 
 2168     if (previous_packet_location.
frame != -1) {
 
 2169         if (location.
is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
 
 2170             int64_t orig_frame = location.
frame;
 
 2175             location.
frame = previous_packet_location.
frame;
 
 2178             ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)", 
"Source Frame", orig_frame, 
"Source Audio Sample", orig_start, 
"Target Frame", location.
frame, 
"Target Audio Sample", location.
sample_start, 
"pts", pts);
 
 2187     previous_packet_location = location;
 
 2194 std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
 
 2196     std::shared_ptr<Frame> output = working_cache.
GetFrame(requested_frame);
 
 2200         output = working_cache.
GetFrame(requested_frame);
 
 2201         if(output) 
return output;
 
 2209         working_cache.
Add(output);
 
 2212         if (requested_frame > largest_frame_processed)
 
 2213             largest_frame_processed = requested_frame;
 
 2220 bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
 
 2223     bool seek_trash = 
false;
 
 2224     int64_t max_seeked_frame = seek_audio_frame_found; 
 
 2225     if (seek_video_frame_found > max_seeked_frame) {
 
 2226         max_seeked_frame = seek_video_frame_found;
 
 2228     if ((
info.
has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
 
 2229         (
info.
has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
 
 2237 void FFmpegReader::CheckWorkingFrames(int64_t requested_frame) {
 
 2240     const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
 
 2243     std::vector<std::shared_ptr<openshot::Frame>> working_frames = working_cache.
GetFrames();
 
 2244     std::vector<std::shared_ptr<openshot::Frame>>::iterator working_itr;
 
 2247     for(working_itr = working_frames.begin(); working_itr != working_frames.end(); ++working_itr)
 
 2250         std::shared_ptr<Frame> f = *working_itr;
 
 2253         if (!f || f->number > requested_frame) {
 
 2259         double frame_pts_seconds = (double(f->number - 1) / 
info.
fps.
ToDouble()) + pts_offset_seconds;
 
 2260         double recent_pts_seconds = std::max(video_pts_seconds, audio_pts_seconds);
 
 2263         bool is_video_ready = 
false;
 
 2264         bool is_audio_ready = 
false;
 
 2265         double recent_pts_diff = recent_pts_seconds - frame_pts_seconds;
 
 2266         if ((frame_pts_seconds <= video_pts_seconds)
 
 2267             || (recent_pts_diff > 1.5)
 
 2271             is_video_ready = 
true;
 
 2273                                             "frame_number", f->number, 
 
 2274                                             "frame_pts_seconds", frame_pts_seconds, 
 
 2275                                             "video_pts_seconds", video_pts_seconds, 
 
 2276                                             "recent_pts_diff", recent_pts_diff);
 
 2280                 for (int64_t previous_frame = requested_frame - 1; previous_frame > 0; previous_frame--) {
 
 2282                     if (previous_frame_instance && previous_frame_instance->has_image_data) {
 
 2284                         f->AddImage(std::make_shared<QImage>(previous_frame_instance->GetImage()->copy()));
 
 2289                 if (last_video_frame && !f->has_image_data) {
 
 2291                     f->AddImage(std::make_shared<QImage>(last_video_frame->GetImage()->copy()));
 
 2292                 } 
else if (!f->has_image_data) {
 
 2293                     f->AddColor(
"#000000");
 
 2298         double audio_pts_diff = audio_pts_seconds - frame_pts_seconds;
 
 2299         if ((frame_pts_seconds < audio_pts_seconds && audio_pts_diff > 1.0)
 
 2300            || (recent_pts_diff > 1.5)
 
 2305             is_audio_ready = 
true;
 
 2307                                             "frame_number", f->number, 
 
 2308                                             "frame_pts_seconds", frame_pts_seconds, 
 
 2309                                             "audio_pts_seconds", audio_pts_seconds, 
 
 2310                                             "audio_pts_diff", audio_pts_diff, 
 
 2311                                             "recent_pts_diff", recent_pts_diff);
 
 2313         bool is_seek_trash = IsPartialFrame(f->number);
 
 2321                                            "frame_number", f->number, 
 
 2322                                            "is_video_ready", is_video_ready, 
 
 2323                                            "is_audio_ready", is_audio_ready, 
 
 2329         if ((!packet_status.
end_of_file && is_video_ready && is_audio_ready) || packet_status.
end_of_file || is_seek_trash) {
 
 2332                                             "requested_frame", requested_frame, 
 
 2333                                             "f->number", f->number, 
 
 2334                                             "is_seek_trash", is_seek_trash, 
 
 2335                                             "Working Cache Count", working_cache.
Count(), 
 
 2339             if (!is_seek_trash) {
 
 2344                 working_cache.
Remove(f->number);
 
 2347                 last_frame = f->number;
 
 2350                 working_cache.
Remove(f->number);
 
 2357     working_frames.clear();
 
 2358     working_frames.shrink_to_fit();
 
 2362 void FFmpegReader::CheckFPS() {
 
 2370     int frames_per_second[3] = {0,0,0};
 
 2371     int max_fps_index = 
sizeof(frames_per_second) / 
sizeof(frames_per_second[0]);
 
 2374     int all_frames_detected = 0;
 
 2375     int starting_frames_detected = 0;
 
 2380         if (GetNextPacket() < 0)
 
 2385         if (packet->stream_index == videoStream) {
 
 2388             fps_index = int(video_seconds); 
 
 2391             if (fps_index >= 0 && fps_index < max_fps_index) {
 
 2393                 starting_frames_detected++;
 
 2394                 frames_per_second[fps_index]++;
 
 2398             all_frames_detected++;
 
 2403     float avg_fps = 30.0;
 
 2404     if (starting_frames_detected > 0 && fps_index > 0) {
 
 2405         avg_fps = float(starting_frames_detected) / std::min(fps_index, max_fps_index);
 
 2409     if (avg_fps < 8.0) {
 
 2418     if (all_frames_detected > 0) {
 
 2432 void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
 
 2436         av_freep(&remove_frame->data[0]);
 
 2444 void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
 
 2449     delete remove_packet;
 
 2464     root[
"type"] = 
"FFmpegReader";
 
 2465     root[
"path"] = 
path;
 
 2480     catch (
const std::exception& e) {
 
 2482         throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
 
 2493     if (!root[
"path"].isNull())
 
 2494         path = root[
"path"].asString();