// Copyright (c) 2014 The Foundry Visionmongers Ltd. All Rights Reserved. #include "Build/fnBuild.h" #include "DDImage/Version.h" #include "DDImage/ddImageVersion.h" #include "DDImage/DDString.h" #include "DDImage/Row.h" #include "DDImage/Knobs.h" #include "DDImage/LUT.h" #include "DDImage/Enumeration_KnobI.h" #include "mov64Writer.h" #include "codecWhitelist.h" #ifndef INT64_C # define INT64_C(c) (c ## LL) #endif #ifndef UINT64_C # define UINT64_C(c) (c ## ULL) #endif #include #define FN_MOV64WRITER_PRINT_CODECS 0 using namespace DD::Image; #define SWAP32(x) ((((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) | (((x) & 0x0000ff00) << 8) | (((x) &0x000000ff) << 24)) //////////////////////////////////////////////////////////////////////////////// // FnAVFrame // Wrapper class for the ffmpeg AVFrame structure. // In order negate the chances of memory leaks when using ffmpeg this class // provides the initialisation, allocation and release of memory associated // with an AVFrame structure. // // Note that this has been designed to be a drop in replacement for host // managed memory for colourspace, pixel format and sample format conversions. // It is not designed to be used with avcodec_decode_video2 as the underlying // decoder usually manages memory. // // Example usage: // // Audio // // FnAVFrame avFrame; // ret = avFrame.alloc(channels, nb_samples, _avSampleFormat, 1); // : // ret = swr_convert(_swrContext, avFrame->data, ...); // : // ret = avcodec_encode_audio2(avCodecContext, &pkt, avFrame, &gotPacket); // : // // Video // // FnAVFrame avFrame; // ret = avFrame.alloc(width(), height(), pixelFormatCodec, 1); // if (!ret) { // : // sws_scale(convertCtx, ..., avFrame->data, avFrame->linesize); // : // } // // IMPORTANT // This class has been purposefully designed NOT to have parameterised // constructors or assignment operators. The reason for this is due to the // complexity of managing the lifetime of the structure and its associated // memory buffers. // FnAVFrame::~FnAVFrame() { // The most important part of this class. // Two deallocations may be required, one from the AVFrame structure // and one for any image data, AVFrame::data. deallocateAVFrameData(); av_frame_free(&_avFrame); } //////////////////////////////////////////////////////////////////////////////// // alloc // VIDEO SPECIFIC. // Allocate a buffer or buffers for the AVFrame structure. How many // buffers depends upon |avPixelFormat| which is a VIDEO format. // // @param width Frame width in pixels. // @param height Frame height in pixels. // @param avPixelFormat An AVPixelFormat enumeration for the frame pixel format. // @param align Buffer byte alignment. // // @return 0 if successful. // <0 otherwise. // int FnAVFrame::alloc(int width, int height, enum AVPixelFormat avPixelFormat, int align) { int ret = 0; if (!_avFrame) _avFrame = av_frame_alloc(); if (_avFrame) { deallocateAVFrameData(); // In case this method is called multiple times on the same object. // av_image_alloc will return the size of the buffer in bytes if successful, // otherwise it will return <0. int bufferSize = av_image_alloc(_avFrame->data, _avFrame->linesize, width, height, avPixelFormat, align); if (bufferSize > 0) { // Set the frame fields for a video buffer as some // encoders rely on them, e.g. Lossless JPEG. _avFrame->width = width; _avFrame->height = height; _avFrame->format = (int)avPixelFormat; ret = 0; } else { ret = -1; } } else { // Failed to allocate an AVFrame. ret = -2; } return ret; } //////////////////////////////////////////////////////////////////////////////// // alloc // AUDIO SPECIFIC. // Allocate a buffer or buffers for the AVFrame structure. How many // buffers depends upon |avSampleFormat| which is an AUDIO format. // // @param nbChannels The number of audio channels. // @param nbSamples The number of audio samples that the buffer will hold. // @param avSampleFormat An AVSampleFormat enumeration for the audio format. // @param align Buffer byte alignment. // // @return 0 if successful. // <0 otherwise. // int FnAVFrame::alloc(int nbChannels, int nbSamples, enum AVSampleFormat avSampleFormat, int align) { int ret = 0; if (!_avFrame) _avFrame = av_frame_alloc(); if (_avFrame) { deallocateAVFrameData(); // In case this method is called multiple times on the same object. // av_samples_alloc will return >= if successful, otherwise it will return <0. ret = av_samples_alloc(_avFrame->data, _avFrame->linesize, nbChannels, nbSamples, avSampleFormat, align); if (ret >= 0) { // Set the frame fields for an audio buffer as some // encoders rely on them. _avFrame->nb_samples = nbSamples; _avFrame->format = (int)avSampleFormat; ret = 0; } else { ret = -1; } } else { // Failed to allocate an AVFrame. ret = -2; } return ret; } // Release any memory allocated to the data member variable of // the AVFrame structure. void FnAVFrame::deallocateAVFrameData() { if (_avFrame && _avFrame->data) av_freep(_avFrame->data); } //////////////////////////////////////////////////////////////////////////////// // AudioReader // Class that uses the avformat, avcodec, avutil and swresample libraries to // read, decode and reformat audio data from a file that contains audio. // // This class is used if an optional audio file has been specified to be used // with the mov64Writer. The audio data is interleaved with the video frames // by the mov64Writer. // // The mov64Writer can specify an 'offset' for the audio in order to advance // or delay the audio with respect to the video. If the audio is delayed then // the reader will create silence, keep track of the read position and provide // 'real' audio at the appropriate point. If the audio is advanced then a // seek operation is performed into the file and reads will begin from that // position. // int AudioReader::open(std::string filename) { int ret = avformat_open_input(&_avFormatContext, filename.c_str(), NULL, NULL); if (!ret) { // Dump file meta data (looking for timecode). // Can also do this on each stream. //AVDictionaryEntry* tag = 0; //while ((tag = av_dict_get(_avFormatContext->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) // std::cout << tag->key << "=" << tag->value << std::endl; ret = avformat_find_stream_info(_avFormatContext, NULL); if (!ret) { // Dump information about file onto standard error //av_dump_format(_avFormatContext, 0, filename.c_str(), 0); ret = av_find_best_stream(_avFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); if (ret >= 0) { int streamIndex = ret; _avStreamAudio = _avFormatContext->streams[streamIndex]; AVCodecContext* avCodecContext = _avStreamAudio->codec; AVCodec* avCodec = avcodec_find_decoder(avCodecContext->codec_id); if (avCodec) { _avFrameAudio = av_frame_alloc(); ret = avcodec_open2(avCodecContext, avCodec, NULL); if (!ret) { ret = setSampleFormat(_avSampleFormat); // Configure a resampling context if needed for the source format. } else { DEBUG_AUDIO_READER("failed to open decoder for codec_id " << avCodecContext->codec_id); } } else { DEBUG_AUDIO_READER("failed to find decoder for codec_id " << avCodecContext->codec_id); ret = -1; } } else { DEBUG_AUDIO_READER("unable to find audio stream in file"); } } } return ret; } //////////////////////////////////////////////////////////////////////////////// // read // Read, decode and resample some audio data from the source file. The format // of the data will be in the desired target format. // // @param avFrame A reference to an FnAVFrame object. This is an input and // output parameter. An UNINITIALIZED FnAVFrame is passed to // this method which allocs an appropriately sized buffer. The // audio is read and converted into this buffer which the caller // can then use. When the FnAVFrame object is destroyed it will // release the buffer. // // @return <0 if unsuccessful, otherwise the number of samples read. int AudioReader::read(FnAVFrame& avFrame) { int ret = 0; if (_avFormatContext && _avStreamAudio) { bool readFrame = true; while (readFrame) { AVCodecContext* avCodecContext = _avStreamAudio->codec; if (0 > _readPosition) { // The audio has been delayed so read from the silence // buffer. // Convert from stream time to a sample count. int nbSamples = (int)(avCodecContext->sample_rate * (0 - _readPosition) * av_q2d(_avStreamAudio->time_base)); // The QuickTime MOV guidelines from Apple recommend writing // audio in 0.5s chunks. So If there is more than 0.5s of // silence then then provide 0.5s of audio. Otherwise provide // the remaining silence before the start of the audio file. if (nbSamples > (avCodecContext->sample_rate >> 1)) nbSamples = avCodecContext->sample_rate >> 1; ret = avFrame.alloc(avCodecContext->channels, nbSamples, _avSampleFormat, 1); if (!ret) { ret = av_samples_set_silence(avFrame->data, // audio buffer 0, // offset nbSamples, avCodecContext->channels, _avSampleFormat); if (!ret) { // Update the read position. Convert the sample count into a stream time. _readPosition += (int64_t)((double)nbSamples / (avCodecContext->sample_rate * av_q2d(_avStreamAudio->time_base))); // Return the number of samples in the buffer of silence. ret = nbSamples; readFrame = false; } } } else { AVPacket avPacket = {0}; av_init_packet(&avPacket); ret = av_read_frame(_avFormatContext, &avPacket); if (ret >= 0) { if (avPacket.stream_index == _avStreamAudio->index) { // Decode audio frame. int haveDecodedFrame = 0; ret = avcodec_decode_audio4(avCodecContext, _avFrameAudio, &haveDecodedFrame, &avPacket); if (ret >= 0) { if (haveDecodedFrame) { readFrame = false; ret = avFrame.alloc(avCodecContext->channels, _avFrameAudio->nb_samples, _avSampleFormat, 1); if (!ret) { if (_swrContext) { // Convert from the source sample format to the target // sample format. ret = swr_convert(_swrContext, avFrame->data, // out _avFrameAudio->nb_samples, // out count const_cast(_avFrameAudio->extended_data), // in _avFrameAudio->nb_samples); // in count } else { // No format conversion is required so perform a straight copy. ret = av_samples_copy(avFrame->data, // dst _avFrameAudio->extended_data, // src 0, // dst_offset 0, // src_offset _avFrameAudio->nb_samples, // nb_samples avCodecContext->channels, // nb_channels _avSampleFormat); // sample_fmt } // Update the read position. Convert the sample count into a stream time. _readPosition += (int64_t)(_avFrameAudio->nb_samples * av_q2d(_avStreamAudio->time_base)); // Return the number of samples in the buffer. ret = _avFrameAudio->nb_samples; } else { DEBUG_AUDIO_READER("failed to allocate sample conversion buffer"); ret = -3; } } } else { DEBUG_AUDIO_READER("error decoding audio frame"); ret = -2; } } } av_free_packet(&avPacket); } if (ret < 0) readFrame = false; } } else { DEBUG_AUDIO_READER("invalid contexts, call AudioReader::open"); ret = -1; } return ret; } void AudioReader::close() { _readPosition = 0; if (_swrContext) { swr_free(&_swrContext); _swrContext = NULL; } if (_avFrameAudio) { av_frame_free(&_avFrameAudio); _avStreamAudio = NULL; } if (_avStreamAudio) { avcodec_close(_avStreamAudio->codec); _avStreamAudio = NULL; } if (_avFormatContext) { avformat_close_input(&_avFormatContext); _avFormatContext = NULL; } } int AudioReader::setSampleFormat(AVSampleFormat avSampleFormat) { int ret = 0; _avSampleFormat = avSampleFormat; // Set the target sample format. // If there is a valid audio codec context, reconfigure the // resampling context by comparing the source format to // the desired target format. if (_avStreamAudio) { AVCodecContext* avCodecContext = _avStreamAudio->codec; // For an input format that does not match the output format, // allocate a resample context to perform the conversion. if (avCodecContext->sample_fmt != _avSampleFormat) { _swrContext = swr_alloc(); if (_swrContext) { // Configure the resample context to convert from the // source sample rate and format to the desired // target sample rate and format. av_opt_set_channel_layout(_swrContext, "in_channel_layout", av_get_default_channel_layout(avCodecContext->channels), 0); av_opt_set_channel_layout(_swrContext, "out_channel_layout", av_get_default_channel_layout(avCodecContext->channels), 0); av_opt_set_int(_swrContext, "in_sample_rate", avCodecContext->sample_rate, 0); av_opt_set_int(_swrContext, "out_sample_rate", avCodecContext->sample_rate, 0); av_opt_set_sample_fmt(_swrContext, "in_sample_fmt", avCodecContext->sample_fmt, 0); av_opt_set_sample_fmt(_swrContext, "out_sample_fmt", _avSampleFormat, 0); ret = swr_init(_swrContext); if (ret) { DEBUG_AUDIO_READER("error initialising context"); ret = -3; } } else { DEBUG_AUDIO_READER("failed to allocate context"); ret = -2; } } else { swr_free(&_swrContext); _swrContext = NULL; } } return ret; } //////////////////////////////////////////////////////////////////////////////// // getSampleRate // Return the sample rate of the audio in the source file. // // @return The sample rate if a file has been successfully opened and contains // audio. Zero otherwise. // int AudioReader::getSampleRate() { int sampleRate = 0; if (_avStreamAudio) { AVCodecContext* avCodecContext = _avStreamAudio->codec; sampleRate = avCodecContext->sample_rate; } return sampleRate; } //////////////////////////////////////////////////////////////////////////////// // getNumberOfChannels // Return the channel count of the audio in the source file. // // @return The channel count if a file has been successfully opened and contains // audio. Zero otherwise. // int AudioReader::getNumberOfChannels() { int numberOfChannels = 0; if (_avStreamAudio) { AVCodecContext* avCodecContext = _avStreamAudio->codec; numberOfChannels = avCodecContext->channels; } return numberOfChannels; } //////////////////////////////////////////////////////////////////////////////// // getDuration // Return the duration of the audio stream in seconds. // // @return The duration of the audio stream in seconds. // double AudioReader::getDuration() { double duration = 0.0; if (_avStreamAudio) duration = _avStreamAudio->duration * av_q2d(_avStreamAudio->time_base); return duration; } //////////////////////////////////////////////////////////////////////////////// // setStartPosition // Set the start position of the audio reader. This is used to adjust the audio // with respect to video in the mov64Writer. This can be used to advance or // delay the audio. If the audio is advanced the start of the audio will not // be read. If the audio is delayed then silence is inserted. // // @param startTime The time in seconds of the position of the first read. // This value can be zero, positive or negative. // Time zero is the start of the file, all other values are // with respect to this position. // If positive, this will cause the reader to start reading // after of the start of the file. I.e. positive values will // skip the start of the file. // If negative, this will cause the reader to read 'ahead' of // the start of the file. As no data exists, it will provide // silence until the read position arrives at the start of the // file. // // @return 0 if successful, // <0 otherwise. // int AudioReader::setStartPosition(double startTime) { int ret = 0; _readPosition = (int64_t)(startTime / av_q2d(_avStreamAudio->time_base)); int64_t seekTarget = _readPosition; if (0 > seekTarget) seekTarget = 0; // The |read| method will return silence while |_readPosition| is less than zero. ret = av_seek_frame(_avFormatContext, _avStreamAudio->id, seekTarget, 0); ret = (ret >= 0) ? 0 : ret; return ret; } //////////////////////////////////////////////////////////////////////////////// /// mov64Writer mov64Writer::mov64Writer(Write* iop, bool componentOfMovWriter) : Writer(iop) , formatContext_(NULL) , streamVideo_(NULL) , streamAudio_(NULL) , writeTimecode_(false) , writeNCLC_(true) , isOpen_(false) , error_(IGNORE_FINISH) , fps_(DD::Image::root_real_fps()) , format_(0) , codec_(0) //The actual default is searched from the codec list in the constructor, to deal with custom lib compiles. , _audioFile(NULL) , _audioOffset(0.0f) , _audioOffsetUnit(0) , bitrate_(400000) //Redefined as knob default in init.py, to match updated ffmpeg defaults. , bitrateTolerance_(4000 * 10000) , gopSize_(12) , bFrames_(0) , mbDecision_(FF_MB_DECISION_SIMPLE) , qMin_(2) , qMax_(31) , componentOfMovWriter_(componentOfMovWriter) , defaultLutUpdated_(false) , codecKnob_(NULL) #if defined(FN_LICENSED_PRORES_CODEC) , appleProResCodec_(NULL) , oldProResCodecName_(NULL) , encoder_(0) #endif // FN_LICENSED_PRORES_CODEC { av_log_set_level(AV_LOG_ERROR); av_register_all(); formatsLongNames_.push_back("default"); formatsShortNames_.push_back("default"); AVOutputFormat* fmt = av_oformat_next(NULL); while (fmt) { if (fmt->video_codec != CODEC_ID_NONE) { if (Foundry::Nuke::isFormatWhitelistedForWriting( fmt->name ) ) { if (fmt->long_name) { formatsLongNames_.push_back(std::string(fmt->name) + std::string(" (") + std::string(fmt->long_name) + std::string(")")); formatsShortNames_.push_back(fmt->name); #if FN_MOV64WRITER_PRINT_CODECS std::cout << "Format: " << fmt->name << " = " << fmt->long_name << std::endl; #endif // FN_MOV64WRITER_PRINT_CODECS } } #if FN_MOV64WRITER_PRINT_CODECS else { std::cout << "Disallowed Format: " << fmt->name << " = " << fmt->long_name << std::endl; } #endif // FN_MOV64WRITER_PRINT_CODECS } fmt = av_oformat_next(fmt); } formatsShortNames_.push_back(0); #if defined(FN_LICENSED_PRORES_CODEC) // Apple ProRes support. // Add all the ProRes profiles supported by the Apple // ProRes library. int codecIndex = 0; while (SIZEOF_ARRAY(appleProResCodecLUT) > codecIndex) { const AppleProResCodecStr* appleProResCodec = &appleProResCodecLUT[codecIndex]; codecsShortNames_.push_back(appleProResCodec->shortName); codecsKnobLabels_.push_back(appleProResCodec->knobLabel); ++codecIndex; } #endif // FN_LICENSED_PRORES_CODEC AVCodec* c = av_codec_next(NULL); while (c) { if (c->type == AVMEDIA_TYPE_VIDEO && c->encode2) { if (Foundry::Nuke::isCodecWhitelistedForWriting( c->name ) && (c->long_name)) { //Initialising codec choice here to deal with custom compiles with differing codecs enabled. //Also note this is the legacy codec choice - the prores default is set in init.py. if (c->id == AV_CODEC_ID_MPEG4) { codec_=codecsShortNames_.size(); } codecsShortNames_.push_back(c->name); const char* knobLabel = Foundry::Nuke::getCodecKnobLabel( c->name ); codecsKnobLabels_.push_back(knobLabel); #if FN_MOV64WRITER_PRINT_CODECS std::cout << "Codec: " << c->name << " = " << c->long_name << std::endl; #endif // FN_MOV64WRITER_PRINT_CODECS } #if FN_MOV64WRITER_PRINT_CODECS else { std::cout << "Disallowed Codec: " << c->name << " = " << c->long_name << std::endl; } #endif // FN_MOV64WRITER_PRINT_CODECS } c = av_codec_next(c); } codecsShortNames_.push_back(0); codecsKnobLabels_.push_back(0); } mov64Writer::~mov64Writer() { } bool mov64Writer::isRec709Format(const int height) const { // First check for codecs which require special handling: // * JPEG codecs always use Rec 601. Knob* codecKnob = iop->knob("mov64_codec"); // Its possible that replaceable knobs may not exist if file_type knob is blank. This is allowed. if (codecKnob) { const bool isJpeg = IsJpeg(codecKnob, codec_); if (isJpeg) { return false; } } // Using method described in step 5 of QuickTimeCodecReader::setPreferredMetadata return (height >= 720); } // Figure out if an FFmpeg codec is definitely YUV based from its underlying storage // pixel format type. /*static*/ bool mov64Writer::IsYUV(AVPixelFormat pix_fmt) { // from swscale_internal.h const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); return !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2; } // Figure out if a codec is definitely YUV based from its shortname. /*static*/ bool mov64Writer::IsYUVFromShortName(const char* shortName) { return (!strcmp(shortName, "fn_apch") || !strcmp(shortName, "fn_apcn") || !strcmp(shortName, "fn_apcs") || !strcmp(shortName, "fn_apco") || !strcmp(shortName, "mjpeg") || !strcmp(shortName, "mpeg1video") || !strcmp(shortName, "mpeg4") || !strcmp(shortName, "v210")); } // Figure out if a codec is definitely RGB based from its shortname. /*static*/ bool mov64Writer::IsRGBFromShortName(const char* shortName) { return (!strcmp(shortName, "fn_ap4h") || !strcmp(shortName, "fn_ap4x") || !strcmp(shortName, "png") || !strcmp(shortName, "qtrle")); } AVColorTransferCharacteristic mov64Writer::getColorTransferCharacteristic() const { Knob* k = iop->knob("colorspace"); if (k) { Enumeration_KnobI* e = k->enumerationKnob(); if (e) { const std::string selection = e->getSelectedItemString(); if (selection.find("Gamma2.2") != std::string::npos) { return AVCOL_TRC_GAMMA22; } } } return AVCOL_TRC_UNSPECIFIED; } /** * called by Write::updateDefaultLUT() from the main thread to figure out * what text to put in the "colorspace" widget. */ LUT* mov64Writer::defaultLUT() const { AVOutputFormat* fmt = initFormat(/* reportErrors = */ false ); if (!fmt) { return LUT::getLut(LUT::GAMMA2_2); } int codecTemp = codec_; if (iop && iop->knob("mov64_codec")) codecTemp = iop->knob("mov64_codec")->get_value(); // Hardcode using utility shortname functions, as the pixel format isn't // an accurate representation of what the codec should use to cleanly // roundtrip with official quicktime and other packages, particularly in // the case of quicktime. if (IsRGBFromShortName(codecsShortNames_[codecTemp])) { return LUT::getLut(LUT::GAMMA1_8); } else if (IsYUVFromShortName(codecsShortNames_[codecTemp])) { return LUT::getLut(LUT::GAMMA2_2); } // Fall back to using the current codec's preferred pixel format. AVCodecID codecId = AV_CODEC_ID_NONE; AVCodec* videoCodec = NULL; const bool success = initCodec(fmt, codecId, videoCodec); if (!success) { return LUT::getLut(LUT::GAMMA2_2); } AVPixelFormat targetPixelFormat = AV_PIX_FMT_YUV420P; AVPixelFormat nukeBufferPixelFormat = AV_PIX_FMT_RGB24; int outBitDepth = 8; getPixelFormats(videoCodec, nukeBufferPixelFormat, targetPixelFormat, outBitDepth); // YUV = Gamma2.2, RGB = Gamma1.8 const bool isYUV = IsYUV(targetPixelFormat); return LUT::getLut(isYUV ? LUT::GAMMA2_2 : LUT::GAMMA1_8); } bool mov64Writer::isDefaultLUTKnob(DD::Image::Knob* knob) const { return knob->is("mov64_codec") || knob->is("mov64_format"); } AVOutputFormat* mov64Writer::initFormat(bool reportErrors) const { AVOutputFormat* fmt = NULL; if (!format_) { fmt = av_guess_format(NULL, filename(), NULL); if (!fmt && reportErrors) { iop->critical("could not deduce output format from file extension"); return NULL; } } else { fmt = av_guess_format(formatsShortNames_[format_], NULL, NULL); if (!fmt && reportErrors) { iop->critical("could not deduce output format"); return NULL; } } return fmt; } bool mov64Writer::initCodec(AVOutputFormat* fmt, AVCodecID& outCodecId, AVCodec*& outVideoCodec) const { outCodecId = fmt->video_codec; AVCodec* userCodec = avcodec_find_encoder_by_name(codecsShortNames_[codec_]); if (userCodec) { outCodecId = userCodec->id; } #if defined(FN_LICENSED_PRORES_CODEC) else { appleProResCodec_ = 0; // Reset. // Check whether the selected codec is Apple ProRes. // Perform an identical string match, i.e. the string // lengths and content are identical. E.g. pr422 will // match pr422 only and not pr422hq, pr422lt, etc. int codecIndex = 0; size_t codecNameLen = strlen(codecsShortNames_[codec_]); while (SIZEOF_ARRAY(appleProResCodecLUT) > codecIndex) { const char* shortName = appleProResCodecLUT[codecIndex].shortName; size_t shortNameLen = strlen(shortName); if ((codecNameLen == shortNameLen) && !strncmp(codecsShortNames_[codec_], shortName, codecNameLen)) { appleProResCodec_ = &appleProResCodecLUT[codecIndex]; outCodecId = AV_CODEC_ID_PRORES; break; } ++codecIndex; } } #endif // FN_LICENSED_PRORES_CODEC outVideoCodec = avcodec_find_encoder(outCodecId); if (!outVideoCodec) { iop->critical("unable to find video codec"); return false; } return true; } void mov64Writer::getPixelFormats(AVCodec* videoCodec, AVPixelFormat& outNukeBufferPixelFormat, AVPixelFormat& outTargetPixelFormat, int& outBitDepth) const { assert(videoCodec); #if defined(FN_LICENSED_PRORES_CODEC) // Special case for the Apple ProRes codec. // Provide the pixel format that matches the // formats supported by the Apple ProRes // codec library, r.e. PRPixelFormat in // ProResEncoder.h. if (AV_CODEC_ID_PRORES == videoCodec->id) { outTargetPixelFormat = appleProResCodec_->avPixelFormat; } else #endif // FN_LICENSED_PRORES_CODEC if (videoCodec->pix_fmts != NULL) { //This is the most frequent path, where we can guess best pix format using ffmpeg. //find highest bit depth pix fmt. const AVPixelFormat* currPixFormat = videoCodec->pix_fmts; int currPixFormatBitDepth = outBitDepth; while (*currPixFormat != -1) { currPixFormatBitDepth = GetPixelFormatBitDepth(*currPixFormat); if (currPixFormatBitDepth > outBitDepth) outBitDepth = currPixFormatBitDepth; currPixFormat++; } //figure out nukeBufferPixelFormat from this. outNukeBufferPixelFormat = GetPixelFormatFromBitDepth(outBitDepth); //call best_pix_fmt using the full list. int hasAlpha = 0; //Once we start supporting pixel types with alpha, this should be set appropriately. int loss = 0; //Potentially we should error, or at least report if over a certain value? outTargetPixelFormat = avcodec_find_best_pix_fmt_of_list(videoCodec->pix_fmts, outNukeBufferPixelFormat, hasAlpha, &loss); //Unlike the other cases, we're now done figuring out all aspects, so return. return; } else { //Lowest common denominator defaults. outTargetPixelFormat = AV_PIX_FMT_YUV420P; } outBitDepth = GetPixelFormatBitDepth(outTargetPixelFormat); outNukeBufferPixelFormat = GetPixelFormatFromBitDepth(outBitDepth); } // av_get_bits_per_sample knows about surprisingly few codecs. // We have to do this manually. /*static*/ int mov64Writer::GetPixelFormatBitDepth(const AVPixelFormat pixelFormat) { switch (pixelFormat) { case AV_PIX_FMT_BGRA64LE: case AV_PIX_FMT_RGB48LE: return 16; break; case AV_PIX_FMT_YUV411P: // Uncompressed 4:1:1 12bit return 12; break; case AV_PIX_FMT_YUV422P10LE: // Uncompressed 4:2:2 10bit - planar case AV_PIX_FMT_YUV444P10LE: // Uncompressed 4:4:4 10bit - planar case AV_PIX_FMT_YUVA444P: // Uncompressed packed QT 4:4:4:4 return 10; break; case AV_PIX_FMT_YUV420P: // MPEG-1, MPEG-2, MPEG-4 part2 (default) case AV_PIX_FMT_YUV444P: // Uncompressed 4:4:4 planar default: return 8; break; } } /*static*/ AVPixelFormat mov64Writer::GetPixelFormatFromBitDepth(const int bitDepth) { return (bitDepth>8 ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB24); } /*static*/ void mov64Writer::GetCodecSupportedParams(AVCodec* codec, bool& outLossyParams, bool& outInterGOPParams, bool& outInterBParams) { assert(codec); //The flags on the codec can't be trusted to indicate capabilities, so use the props bitmask on the descriptor instead. const AVCodecDescriptor* codecDesc = avcodec_descriptor_get(codec->id); outLossyParams = codecDesc ? !!(codecDesc->props & AV_CODEC_PROP_LOSSY) : false; outInterGOPParams = codecDesc ? !(codecDesc->props & AV_CODEC_PROP_INTRA_ONLY) : false; outInterBParams = codecDesc ? !(codecDesc->props & AV_CODEC_PROP_INTRA_ONLY) : false; //Overrides for specific cases where the codecs don't follow the rules. //PNG doesn't observe the params, despite claiming to be lossy. if (codecDesc && (codecDesc->id == AV_CODEC_ID_PNG)) { outLossyParams = outInterGOPParams = outInterBParams = false; } //Mpeg4 ms var 3 / AV_CODEC_ID_MSMPEG4V3 doesn't have a descriptor, but needs the params. if (codec && (codec->id == AV_CODEC_ID_MSMPEG4V3)) { outLossyParams = outInterGOPParams = outInterBParams = true; } //QTRLE supports differing GOPs, but any b frame settings causes unreadable files. if (codecDesc && (codecDesc->id == AV_CODEC_ID_QTRLE)) { outLossyParams = outInterBParams = false; outInterGOPParams = true; } } //////////////////////////////////////////////////////////////////////////////// // configureAudioStream // Set audio parameters of the audio stream. // // @param avCodec A pointer reference to an AVCodec to receive the AVCodec if // the codec can be located and is on the codec white list. // @param avStream A reference to an AVStream of a audio stream. // void mov64Writer::configureAudioStream(AVCodec* avCodec, AVStream* avStream) { AVCodecContext* avCodecContext = avStream->codec; avcodec_get_context_defaults3(avCodecContext, avCodec); avCodecContext->sample_fmt = audioReader_->getSampleFormat(); //avCodecContext->bit_rate = 64000; // Calculate... avCodecContext->sample_rate = audioReader_->getSampleRate(); avCodecContext->channels = audioReader_->getNumberOfChannels(); } //////////////////////////////////////////////////////////////////////////////// // configureVideoStream // Set video parameters of the video stream. // // @param avCodec A pointer reference to an AVCodec to receive the AVCodec if // the codec can be located and is on the codec white list. // @param avStream A reference to an AVStream of a audio stream. // void mov64Writer::configureVideoStream(AVCodec* avCodec, AVStream* avStream) { AVCodecContext* avCodecContext = avStream->codec; avcodec_get_context_defaults3(avCodecContext, avCodec); //Only update the relevant context variables where the user is able to set them. //This deals with cases where values are left on an old value when knob disabled. bool lossyParams = false; bool interGOPParams = false; bool interBParams = false; if (avCodec) GetCodecSupportedParams(avCodec, lossyParams, interGOPParams, interBParams); if (lossyParams) { avCodecContext->bit_rate = bitrate_; avCodecContext->bit_rate_tolerance = bitrateTolerance_; avCodecContext->qmin = qMin_; avCodecContext->qmax = qMax_; } avCodecContext->width = width(); avCodecContext->height = height(); avCodecContext->color_trc = getColorTransferCharacteristic(); av_dict_set(&formatContext_->metadata, kMetaKeyApplication, mFnStringize(FN_PRODUCT_NAME), 0); DD::Image::Version version = DD::Image::applicationVersion(); av_dict_set(&formatContext_->metadata, kMetaKeyApplicationVersion, version.majorMinorReleaseString().c_str(), 0); //Currently not set - the main problem being that the mov32 reader will use it to set its defaults. //TODO: investigate using the writer key in mov32 to ignore this value when set to mov64. //av_dict_set(&formatContext_->metadata, kMetaKeyPixelFormat, "YCbCr 8-bit 422 (2vuy)", 0); const char* ycbcrmetavalue = isRec709Format(avCodecContext->height) ? "Rec 709" : "Rec 601"; av_dict_set(&formatContext_->metadata, kMetaKeyYCbCrMatrix, ycbcrmetavalue, 0); const char* lutName = GetLutName(lut()); if (lutName) av_dict_set(&formatContext_->metadata, kMetaKeyColorspace, lutName, 0); av_dict_set(&formatContext_->metadata, kMetaKeyWriter, kMetaValueWriter64, 0); //Write the NCLC atom in the case the underlying storage is YUV. if(IsYUVFromShortName(codecsShortNames_[codec_])) { // Primaries are always 709. avCodecContext->color_primaries = AVCOL_PRI_BT709; if (writeNCLC_) av_dict_set(&avStream->metadata, kNCLCPrimariesKey, "1", 0); // Transfer function is always set to unknown. This results in more correct reading // on the part of QT Player. avCodecContext->color_trc = AVCOL_TRC_UNSPECIFIED; if (writeNCLC_) av_dict_set(&avStream->metadata, kNCLCTransferKey, "2", 0); // Matrix is based on that used when writing (a combo of height and legacy codec in general). if (isRec709Format(avCodecContext->height)) { avCodecContext->colorspace = AVCOL_SPC_BT709; if (writeNCLC_) av_dict_set(&avStream->metadata, kNCLCMatrixKey, "1", 0); } else { avCodecContext->colorspace = AVCOL_SPC_BT470BG; if (writeNCLC_) av_dict_set(&avStream->metadata, kNCLCMatrixKey, "6", 0); } } // From the Apple QuickTime movie guidelines. Set the // appropriate pixel aspect ratio for the movie. // Scale by 100 and convert to int for a reliable comparison, e.g. // 0.9100000000 != 0.9100002344. This is done as the pixel aspect // ratio is provided as a double and the author cannot find a // rational representation (num/den) of par. int32_t par = (int32_t)(info().format().pixel_aspect() * 100.0); if (200 == par) { avCodecContext->sample_aspect_ratio.num = 2; avCodecContext->sample_aspect_ratio.den = 1; } else if (150 == par) { avCodecContext->sample_aspect_ratio.num = 3; avCodecContext->sample_aspect_ratio.den = 2; } else if (146 == par) { // PAL 16:9 avCodecContext->sample_aspect_ratio.num = 118; avCodecContext->sample_aspect_ratio.den = 81; } else if (133 == par) { avCodecContext->sample_aspect_ratio.num = 4; avCodecContext->sample_aspect_ratio.den = 3; } else if (121 == par) { // NTSC 16:9 avCodecContext->sample_aspect_ratio.num = 40; avCodecContext->sample_aspect_ratio.den = 33; } else if (109 == par) { // PAL 4:3 avCodecContext->sample_aspect_ratio.num = 59; avCodecContext->sample_aspect_ratio.den = 54; } else if (100 == par) { // Typically HD avCodecContext->sample_aspect_ratio.num = 1; avCodecContext->sample_aspect_ratio.den = 1; } else if (91 == par) { // NTSC 4:3 avCodecContext->sample_aspect_ratio.num = 10; avCodecContext->sample_aspect_ratio.den = 11; } // timebase: This is the fundamental unit of time (in seconds) in terms // of which frame timestamps are represented. For fixed-fps content, // timebase should be 1/framerate and timestamp increments should be // identical to 1. // // Bug 23953 // ffmpeg does a horrible job of converting floats to AVRationals // It adds 0.5 randomly and does some other stuff // To work around that, we just multiply the fps by what I think is a reasonable number to make it an int // and use the reasonable number as the numerator for the timebase. // Timebase is not the frame rate; it's the inverse of the framerate // So instead of doing 1/fps, we just set the numerator and denominator of the timebase directly. // The upshot is that this allows ffmpeg to properly do framerates of 23.78 (or 23.796, which is what the user really wants when they put that in). // // The code was this: //streamVideo_->codec->time_base = av_d2q(1.0 / fps_, 100); const float CONVERSION_FACTOR = 1000.0f; avCodecContext->time_base.num = (int)CONVERSION_FACTOR; avCodecContext->time_base.den = (int)(fps_ * CONVERSION_FACTOR); #if defined(FN_LICENSED_PRORES_CODEC) // Apple ProRes specific. // The main non SDK library has been modified to provide customisation // of the QT chunk sizes in line with the QT guidelines from // Apple. The recommended chunk sizes are 4MiB for HD and 2MiB // for SD. The customisation was to look for a metadata // dictionary item that specifies the log2 chunk size. if (AV_CODEC_ID_PRORES == avCodecContext->codec_id) { if (avCodecContext->height >= 720) { // Assume HD. av_dict_set(&avStream->metadata, kChunkSizeKey, "22", 0); // Set 'log2chunksize' to 2^22 = 4MiB. } else { // Assume SD. av_dict_set(&avStream->metadata, kChunkSizeKey, "21", 0); // Set 'log2chunksize' to 2^21 = 2MiB. } // Nuke/NukeStudio only supports progressive. // Set this field so that an 'fiel' atom is inserted // into the QuickTime 'moov' atom. avCodecContext->field_order = AV_FIELD_PROGRESSIVE; // TODO: IF alpha is to be encoded, then the following must // be set to 32. This will ensure the correct bit depth // information will be embedded in the QuickTime movie. // Otherwise the following must be set to 24. //avCodecContext->bits_per_coded_sample = _encodeAlpha ? 32 : 24; avCodecContext->bits_per_coded_sample = 24; } #endif // FN_LICENSED_PRORES_CODEC // Trap fractional frame rates so that they can be specified correctly // in a QuickTime movie. The rational number representation of fractional // frame rates is 24000/1001, 30000/1001, 60000/1001, etc. // WARNING: There are some big assumptions here. The assumption is // that by specifying 23.98, 29.97 on the UI the intended // frame rate is 24/1.001, 30/1.001, etc. so the frame rate // is corrected here. int frameRate = (0.0 < fps_) ? (int)fps_ : 0; if ((23 == frameRate) || (29 == frameRate) || (59 == frameRate)) { avCodecContext->time_base.num = 1001; avCodecContext->time_base.den = (frameRate + 1) * 1000; } else { avCodecContext->time_base.num = 100; avCodecContext->time_base.den = frameRate * 100; } if (interGOPParams) avCodecContext->gop_size = gopSize_; // NOTE: in new ffmpeg, bframes don't seem to work correctly - ffmpeg crashes... if (interBParams && bFrames_) { avCodecContext->max_b_frames = bFrames_; avCodecContext->b_frame_strategy = 0; avCodecContext->b_quant_factor = 2.0f; } avCodecContext->mb_decision = mbDecision_; // Create a timecode stream for QuickTime movies. (There was no // requirement at the time of writing for any other file format. // Also not all containers support timecode.) if (writeTimecode_ && !strcmp(formatContext_->oformat->name, "mov")) { // Retrieve the timecode from Nuke/NukeStudio. // Adding a 'timecode' metadata item to the video stream // metadata will automagically create a timecode track // in the QuickTime movie created by FFmpeg. const MetaData::Bundle& metaData = iop->_fetchMetaData(""); size_t size = metaData.size(); MetaData::Bundle::PropertyPtr property = metaData.getData("input/timecode"); if (property) { std::string timecode = MetaData::propertyToString(property).c_str(); if (0 == timecode.size()) timecode = "00:00:00:00"; // Set a sane default. av_dict_set(&avStream->metadata, "timecode", timecode.c_str(), 0); } } } //////////////////////////////////////////////////////////////////////////////// // addStream // Add a new stream to the AVFormatContext of the file. This will search for // the codec and if found, validate it against the codec whitelist. If the codec // can be used, a new stream is created and configured. // // @param avFormatContext A reference to an AVFormatContext of the file. // @param avCodecId An AVCodecID enumeration of the codec to attempt to open. // @param pavCodec A pointer reference to an AVCodec to receive the AVCodec if // the codec can be located and is on the codec white list. // This can be NULL for non-codec streams, e.g. timecode. // // @return A reference to an AVStream if successful. // NULL otherwise. // AVStream* mov64Writer::addStream(AVFormatContext* avFormatContext, enum AVCodecID avCodecId, AVCodec** pavCodec) { AVStream* avStream = NULL; AVCodec* avCodec = NULL; if (pavCodec) { // Find the encoder. avCodec = avcodec_find_encoder(avCodecId); if (!avCodec) { iop->critical("could not find codec"); return NULL; } if (!Foundry::Nuke::isCodecWhitelistedForWriting(avCodec->name)) { #if defined(FN_LICENSED_PRORES_CODEC) // Apple ProRes specific. // ProRes encoding is supported by this writer // as the official Apple libraries have been licensed. So // check whether the black listed codec is indeed ProRes and // permit it to pass for this writer. if (strcmp(avCodec->name, "prores")) { iop->critical("unsupported codec"); return NULL; } #else iop->critical("unsupported codec"); return NULL; #endif // FN_LICENSED_PRORES_CODEC } } avStream = avformat_new_stream(avFormatContext, avCodec); if (!avStream) { iop->critical("could not allocate stream"); return NULL; } avStream->id = avFormatContext->nb_streams - 1; switch (avCodec->type) { case AVMEDIA_TYPE_AUDIO: configureAudioStream(avCodec, avStream); break; case AVMEDIA_TYPE_VIDEO: configureVideoStream(avCodec, avStream); break; default: break; } // Update the caller provided pointer with the codec. *pavCodec = avCodec; return avStream; } //////////////////////////////////////////////////////////////////////////////// // openCodec // Open a codec. // // @param avFormatContext A reference to an AVFormatContext of the file. // @param avCodec A reference to an AVCodec of the video codec. // @param avStream A reference to an AVStream of a video stream. // // @return 0 if successful, // <0 otherwise. // int mov64Writer::openCodec(AVFormatContext* avFormatContext, AVCodec* avCodec, AVStream* avStream) { AVCodecContext* avCodecContext = avStream->codec; if (AVMEDIA_TYPE_AUDIO == avCodecContext->codec_type) { // Audio codecs. if (avcodec_open2(avCodecContext, avCodec, NULL) < 0) { iop->critical("could not open audio codec"); return -1; } } else if (AVMEDIA_TYPE_VIDEO == avCodecContext->codec_type) { #if defined(FN_LICENSED_PRORES_CODEC) if (AV_CODEC_ID_PRORES == avCodecContext->codec_id) { // Apple ProRes specific. // Work around. // The non SDK ProRes codec is used simply for configuration. // The problem is that the pixel format required for the // Apple ProRes library is not supported by the non SDK ProRes // codec. The following is done simply to open the codec, so // it's opened with a pixel format that it supports, // otherwise the call to avcodec_open2 would fail. This does // not appear to have any side effects, i.e. the QuickTime // movie is created correctly. AVPixelFormat avPixelFormatTemp = avCodecContext->pix_fmt; avCodecContext->pix_fmt = AV_PIX_FMT_YUV422P10LE; if (avcodec_open2(avCodecContext, avCodec, NULL) < 0) { iop->critical("unable to open video codec"); return -1; } // Restore the desired pixel format for the Apple ProRes // library encoder. avCodecContext->pix_fmt = avPixelFormatTemp; // If the selected codec is a ProRes codec, then set the // correct profile. This will ensure that the correct // profile is embedded in the QuickTime movie. This also // means that ProRes 4:4:4:4 will now appear in the movie // which would not happen with the current ffmpeg version. // // !! This must be done AFTER the call to avcodec_open2 !! // avCodecContext->codec_tag = SWAP32(appleProResCodec_->codecType); // Convert to a four character code. // Create an instance of the ProRes encoder class. encoder_.reset(NukeCodecs::CodecFactory::makeEncoder(appleProResCodec_->codecType)); if (!encoder_.get()) { iop->critical("unable to initialise ProRes codec"); return -3; } // Special behaviour. // Valid on Aug 2014 for ffmpeg v2.1.4 // // R.e. libavformat/movenc.c::mov_write_video_tag // R.e. libavformat/movenc.c::find_compressor // // Fix the codec name field. Replace the generic 'prores' // tag used by the avcodec prores codec with the correct // ProRes codec profile name. The codec name field is // statically allocated so there is no need to cache the // original pointer. This results in the correct codec name // appearing in the QuickTime movie. avCodec = const_cast(avCodecContext->codec); // Cast away const to modify. oldProResCodecName_ = avCodec->name; // Cache so that this can be restored when the movie is closed. avCodec->name = appleProResCodec_->longName; // Replace with the correct codec profile name. } else #endif if (avcodec_open2(avCodecContext, avCodec, NULL) < 0) { iop->critical("unable to open video codec"); return -4; } } else if (AVMEDIA_TYPE_DATA == avCodecContext->codec_type) { // Timecode codecs. } return 0; } //////////////////////////////////////////////////////////////////////////////// // writeAudio // // * Read audio from the source file (this will also convert to the desired // sample format for the file). // * Encode // * Write to file. // // NOTE: Writing compressed audio is not yet implemented. E.g. 'flushing' the // encoder when the writer has finished writing all the video frames. // // @param avFormatContext A reference to an AVFormatContext of the file. // @param avStream A reference to an AVStream of an audio stream. // @param flush A boolean value to flag that any remaining frames in the interal // queue of the encoder should be written to the file. No new // frames will be queued for encoding. // // @return 0 if successful, // <0 otherwise for any failure to read (and convert) the audio format, // encode the audio or write to the file. // int mov64Writer::writeAudio(AVFormatContext* avFormatContext, AVStream* avStream, bool flush) { int ret = 0; FnAVFrame avFrame; int nbSamples = audioReader_->read(avFrame); if (nbSamples) { AVPacket pkt = {0}; // data and size must be 0 av_init_packet(&pkt); if (flush) { // A slight hack. // So that the durations of the video track and audio track will be // as close as possible, when flushing (finishing) calculate how many // remaining audio samples to write using the difference between the // the duration of the video track and the duration of the audio track. double videoTime = streamVideo_->pts.val * av_q2d(streamVideo_->time_base); double audioTime = streamAudio_->pts.val * av_q2d(streamAudio_->time_base); double delta = videoTime - audioTime; if (0.0f <= delta) { nbSamples = delta / av_q2d(streamAudio_->time_base); // Add one sample to the count to guarantee that the audio track // will be the same length or very slightly longer than the video // track. This will then end the final loop that writes audio up // to the duration of the video track. if (avFrame->nb_samples > nbSamples) avFrame->nb_samples = nbSamples; } } AVCodecContext* avCodecContext = avStream->codec; int gotPacket; ret = avcodec_encode_audio2(avCodecContext, &pkt, avFrame, &gotPacket); if (!ret && gotPacket) { pkt.stream_index = avStream->index; ret = av_write_frame(avFormatContext, &pkt); } if (ret < 0) { // Report the error. char szError[1024]; av_strerror(ret, szError, 1024); iop->error(szError); } } return ret; } //////////////////////////////////////////////////////////////////////////////// // writeVideo // // * Convert Nuke float RGB values to the ffmpeg pixel format of the encoder. // * Encode. // * Write to file. // // @param avFormatContext A reference to an AVFormatContext of the file. // @param avStream A reference to an AVStream of a video stream. // @param flush A boolean value to flag that any remaining frames in the interal // queue of the encoder should be written to the file. No new // frames will be queued for encoding. // // @return 0 if successful, // <0 otherwise for any failure to convert the pixel format, encode the // video or write to the file. // int mov64Writer::writeVideo(AVFormatContext* avFormatContext, AVStream* avStream, bool flush) { int ret = 0; // First convert from Nuke floating point RGB to either 16-bit or 8-bit RGB. // Create a buffer to hold either 16-bit or 8-bit RGB. AVCodecContext* avCodecContext = avStream->codec; // Create another buffer to convert from either 16-bit or 8-bit RGB // to the input pixel format required by the encoder. AVPixelFormat pixelFormatCodec = avCodecContext->pix_fmt; int picSize = avpicture_get_size(pixelFormatCodec, width(), height()); FnAVFrame avFrame; if (!flush) { AVPixelFormat pixelFormatNuke = (avCodecContext->bits_per_raw_sample > 8) ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB24; AVPicture avPicture; ret = avpicture_alloc(&avPicture, pixelFormatNuke, width(), height()); if (!ret) { // Smart pointer for an AVPicture structure to ensure that memory is // released (through a call to avpicture_free) when this object goes // out of scope, or if an exception occurs. boost::shared_ptr avPicturePtr(&avPicture, avpicture_free); // Convert floating point values to unsigned values. Row row(0, width()); input0().validate(); input0().request(0, 0, width(), height(), Mask_RGB, 1); for (int y = 0; y < height(); ++y) { get(y, 0, width(), Mask_RGB, row); if (iop->aborted()) { return -3; } for (Channel z = Chan_Red; z <= Chan_Blue; incr(z)) { const float* FROM = row[z]; if (avCodecContext->bits_per_raw_sample > 8) { // void to_short(int z, U16* to, const float* from, const float* alpha, int W, int bits = 16, int delta = 1); U16* TO = reinterpret_cast(avPicturePtr->data[0]); // avPicturePtr->linesize is in bytes, but stride is U16 (2 bytes), so divide linesize by 2 TO += (height() - y - 1) * (avPicturePtr->linesize[0] / 2) + z - 1; to_short(z - 1, TO, FROM, NULL, width(), 16, 3); } else { // void to_byte(int z, uchar* to, const float* from, const float* alpha, int W, int delta = 1); uint8_t* TO = avPicturePtr->data[0]; TO += (height() - y - 1) * avPicturePtr->linesize[0] + z - 1; to_byte(z - 1, TO, FROM, NULL, width(), 3); } } } ret = avFrame.alloc(width(), height(), pixelFormatCodec, 1); if (!ret) { #if defined(FN_LICENSED_PRORES_CODEC) // Convert from either 16-bit or 8-bit RGB to the input pixel // format required by the encoder. // Apple ProRes specific. // There is no support for conversion to AV_PIX_FMT_BGRA64LE from // any other pixel format. This is the pixel format of choice for // Apple ProRes codec library as it maintains the highest bit // depth for all codec profiles. Therefore the conversion to // AV_PIX_FMT_BGRA64LE must be done ourselves. if (AV_CODEC_ID_PRORES == avCodecContext->codec_id) { // For speed, divide the conversion between the physical // CPUs by creating a worker thread to run on each CPU. // Initialise the payload for each thread and then // spawn the worker thread. ret = convertBuffer(eRGB48LE_to_b64a, reinterpret_cast(avPicturePtr->data[0]), avPicturePtr->linesize[0], reinterpret_cast(avFrame->data[0]), avFrame->linesize[0], width(), height()); } else #endif { SwsContext* convertCtx = sws_getCachedContext(NULL, width(), height(), pixelFormatNuke, // from width(), height(), pixelFormatCodec,// to SWS_BICUBIC, NULL, NULL, NULL); // Set up the sws (SoftWareScaler) to convert colourspaces correctly, in the sws_scale function below const int colorspace = isRec709Format(height()) ? SWS_CS_ITU709 : SWS_CS_ITU601; const int dstRange = IsYUV(pixelFormatCodec) ? 0 : 1; // 0 = 16..235, 1 = 0..255 // Only apply colorspace conversions for YUV. if (IsYUV(pixelFormatCodec)) { ret = sws_setColorspaceDetails(convertCtx, sws_getCoefficients(SWS_CS_DEFAULT), // inv_table 1, // srcRange - 0 = 16..235, 1 = 0..255 sws_getCoefficients(colorspace), // table dstRange, // dstRange - 0 = 16..235, 1 = 0..255 0, // brightness fixed point, with 0 meaning no change, 1 << 16, // contrast fixed point, with 1<<16 meaning no change, 1 << 16); // saturation fixed point, with 1<<16 meaning no change); } int sliceHeight = sws_scale(convertCtx, avPicturePtr->data, // src avPicturePtr->linesize, // src rowbytes 0, height(), avFrame->data, // dst avFrame->linesize); // dst rowbytes assert(sliceHeight > 0); } } else { // av_image_alloc failed. ret = -1; } } else { // avpicture_alloc failed. } } if (!ret) { int error = 0; if ((avFormatContext->oformat->flags & AVFMT_RAWPICTURE) != 0) { AVPacket pkt; av_init_packet(&pkt); pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = avStream->index; pkt.data = avFrame->data[0]; pkt.size = sizeof(AVPicture); const int writeResult = av_write_frame(avFormatContext, &pkt); const bool writeSucceeded = (writeResult == 0); if (!writeSucceeded) { error = true; } } else { boost::shared_ptr outbuf(new uint8_t [picSize]); AVPacket pkt; av_init_packet(&pkt); // NOTE: If |flush| is true, then avFrame will be NULL at this point as // alloc will not have been called. const int bytesEncoded = encodeVideo(avCodecContext, outbuf.get(), picSize, avFrame); const bool encodeSucceeded = (bytesEncoded > 0); if (encodeSucceeded) { if (avCodecContext->coded_frame && (avCodecContext->coded_frame->pts != AV_NOPTS_VALUE)) pkt.pts = av_rescale_q(avCodecContext->coded_frame->pts, avCodecContext->time_base, avStream->time_base); if (avCodecContext->coded_frame && avCodecContext->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = avStream->index; pkt.data = outbuf.get(); pkt.size = bytesEncoded; const int writeResult = av_write_frame(avFormatContext, &pkt); const bool writeSucceeded = (writeResult == 0); if (!writeSucceeded) { // Report the error. char szError[1024]; av_strerror(bytesEncoded, szError, 1024); iop->error(szError); error = true; } } else { if (bytesEncoded < 0) { // Report the error. char szError[1024]; av_strerror(bytesEncoded, szError, 1024); iop->error(szError); error = true; } else if (flush) { // Flag that the flush is complete. ret = -10; } } } if (error) { iop->critical("error writing frame to file"); ret = -2; } } return ret; } //////////////////////////////////////////////////////////////////////////////// // encodeVideo // Encode a frame of video. // // Note that the uncompressed source frame to be encoded must be in an // appropriate pixel format for the encoder prior to calling this method as // this method does NOT perform an pixel format conversion, e.g. through using // Sws_xxx. // // @param avCodecContext A reference to an AVCodecContext of a video stream. // @param out A reference to a buffer to receive the encoded frame. // @param outSize The size in bytes of |out|. // @param avFrame A constant reference to an AVFrame that contains the source data // to be encoded. This must be in an appropriate pixel format for // the encoder. // // @return <0 for any failure to encode the frame, otherwise the size in byte // of the encoded frame. // int mov64Writer::encodeVideo(AVCodecContext* avCodecContext, uint8_t* out, int outSize, const AVFrame* avFrame) { int ret, got_packet = 0; if (outSize < FF_MIN_BUFFER_SIZE) { av_log(avCodecContext, AV_LOG_ERROR, "buffer smaller than minimum size\n"); return -1; } AVPacket pkt; av_init_packet(&pkt); pkt.data = out; pkt.size = outSize; #if defined(FN_LICENSED_PRORES_CODEC) if (AV_CODEC_ID_PRORES == avCodecContext->codec_id) { // Check for null pointer as we have to do our own buffer management // for licensed ProRes decoding and don't want to pass it on to // the library. if (avFrame) { // Apple ProRes specific. // Encoder a frame using the Apple ProRes encoder. NukeCodecs::EncoderParams params; params.in = avFrame->data[0]; params.inRowBytes = avFrame->linesize[0]; params.width = width(); params.height = height(); params.out = pkt.data; params.outSize = pkt.size; params.codecType = appleProResCodec_->codecType; ret = encoder_->Encode(params); if (ret > 0) { pkt.size = ret; avCodecContext->coded_frame->pts = AV_NOPTS_VALUE; avCodecContext->coded_frame->key_frame = 1; } else ret = -2; } else { // Signal that there is no further data to encode. pkt.size = 0; ret = 0; } } else #endif // FN_LICENSED_PRORES_CODEC { ret = avcodec_encode_video2(avCodecContext, &pkt, avFrame, &got_packet); if (!ret && got_packet && avCodecContext->coded_frame) { avCodecContext->coded_frame->pts = pkt.pts; avCodecContext->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); } } return ret ? ret : pkt.size; } //////////////////////////////////////////////////////////////////////////////// // writeToFile // Write video and if specifed audio to the movie. Interleave the audio and // video as specified in the QuickTime movie recommendation. This is to have // ~0.5s of audio interleaved with the video. // // @param avFormatContext A reference to the AVFormatContext of the file to // write. // @param finalise A flag to indicate that the streams should be flushed as // no further frames are to be encoded. // // @return 0 if successful. // <0 otherwise. // int mov64Writer::writeToFile(AVFormatContext* avFormatContext, bool finalise) { // Write interleaved audio and video if an audio file has // been specified. Otherwise just write video. // // If an audio file has been specified, calculate the // target stream time of the audio stream. For a QuickTime // movie write the audio in ~0.5s chunks so that there is // an approximate 0.5s interleave of audio and video. double videoTime = streamVideo_->pts.val * av_q2d(streamVideo_->time_base); if (streamAudio_) { // Determine the current audio stream time. double audioTime = streamAudio_->pts.val * av_q2d(streamAudio_->time_base); // Determine the target audio stream time. This is // the current video time rounded to the nearest // 0.5s boundary. double targetTime = ((int)(videoTime / 0.5)) * 0.5; if (audioTime < targetTime) { // If audio stream is more than 0.5s behind, write // another ~0.5s of audio. double sourceDuration = audioReader_->getDuration(); while ((audioTime < targetTime) && (audioTime < sourceDuration)) { writeAudio(avFormatContext, streamAudio_, finalise); audioTime = streamAudio_->pts.val * av_q2d(streamAudio_->time_base); } } } return writeVideo(formatContext_, streamVideo_, finalise); } //////////////////////////////////////////////////////////////////////////////// // open // Internal function to create all the required streams for writing a QuickTime // movie. // // @return true if successful, // false otherwise. // bool mov64Writer::open() { AVOutputFormat* avOutputFormat = initFormat(/* reportErrors = */ true); if (!avOutputFormat) { return false; } if (!formatContext_) { avformat_alloc_output_context2(&formatContext_, avOutputFormat, NULL, filename()); } snprintf(formatContext_->filename, sizeof(formatContext_->filename), "%s", filename()); // Create an audio stream if a file has been provided. if (_audioFile && (strlen(_audioFile) > 0)) { if (!streamAudio_) { // Attempt to create an audio reader. audioReader_.reset(new AudioReader()); // TODO: If the sample format is configurable via a knob, set // the desired format here, e.g.: //audioReader_->setSampleFormat(_avSampleFormat); if (!audioReader_->open(_audioFile)) { AVCodec* audioCodec = NULL; streamAudio_ = addStream(formatContext_, AV_CODEC_ID_PCM_S16LE, &audioCodec); if (!streamAudio_ || !audioCodec) { freeFormat(); return false; } // Bug 45010 The following flags must be set BEFORE calling // openCodec (avcodec_open2). This will ensure that codec // specific data is created and initialized. (E.g. for MPEG4 // the AVCodecContext::extradata will contain Elementary Stream // Descriptor which is required for QuickTime to decode the // stream.) AVCodecContext* avCodecContext = streamAudio_->codec; if (!strcmp(formatContext_->oformat->name, "mp4") || !strcmp(formatContext_->oformat->name, "mov") || !strcmp(formatContext_->oformat->name, "3gp")) { avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; } // Some formats want stream headers to be separate. if (formatContext_->oformat->flags & AVFMT_GLOBALHEADER) avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; if (openCodec(formatContext_, audioCodec, streamAudio_) < 0) { freeFormat(); return false; } // If audio has been specified, set the start position (in seconds). // The values are negated to convert them from a video time to an // audio time. // So for a knob value of -10s, this means that the audio starts at // a video time of -10s. So this is converted to +10s audio time. I.e. // The video starts +10s into the audio. Vice-versa for a knob value // of +10s. The video starts -10s into the audio. In this case the // audio reader will provide 10s of silence for the first 10s of // video. audioReader_->setStartPosition(-((_audioOffsetUnit == 0) ? _audioOffset : (_audioOffset / fps_))); } else { iop->critical("failed to open the audio file\nIt does not contain audio or is an invalid file"); return false; } } } // Create a video stream. AVCodecID codecId = AV_CODEC_ID_NONE; AVCodec* videoCodec = NULL; if (!initCodec(avOutputFormat, codecId, videoCodec)) { freeFormat(); return false; } // Test if the container recognises the codec type. bool isCodecSupportedInContainer = (avformat_query_codec(avOutputFormat, codecId, FF_COMPLIANCE_NORMAL) == 1); // mov seems to be able to cope with anything, which the above function doesn't seem to think is the case (even with FF_COMPLIANCE_EXPERIMENTAL) // and it doesn't return -1 for in this case, so we'll special-case this situation to allow this isCodecSupportedInContainer |= (strcmp(formatContext_->oformat->name, "mov") == 0); if (!isCodecSupportedInContainer) { iop->critical("the selected codec is not supported in this container."); return false; } AVPixelFormat targetPixelFormat = AV_PIX_FMT_YUV420P; AVPixelFormat nukeBufferPixelFormat = AV_PIX_FMT_RGB24; int outBitDepth = 8; getPixelFormats(videoCodec, nukeBufferPixelFormat, targetPixelFormat, outBitDepth); if (!streamVideo_) { streamVideo_ = addStream(formatContext_, codecId, &videoCodec); if (!streamVideo_ || !videoCodec) { freeFormat(); return false; } AVCodecContext* avCodecContext = streamVideo_->codec; avCodecContext->pix_fmt = targetPixelFormat; avCodecContext->bits_per_raw_sample = outBitDepth; // Bug 45010 The following flags must be set BEFORE calling // openCodec (avcodec_open2). This will ensure that codec // specific data is created and initialized. (E.g. for MPEG4 // the AVCodecContext::extradata will contain Elementary Stream // Descriptor which is required for QuickTime to decode the // stream.) if (!strcmp(formatContext_->oformat->name, "mp4") || !strcmp(formatContext_->oformat->name, "mov") || !strcmp(formatContext_->oformat->name, "3gp")) { avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; } // Some formats want stream headers to be separate. if (formatContext_->oformat->flags & AVFMT_GLOBALHEADER) avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER; if (openCodec(formatContext_, videoCodec, streamVideo_) < 0) { freeFormat(); return false; } if (!(avOutputFormat->flags & AVFMT_NOFILE)) { if (avio_open(&formatContext_->pb, filename(), AVIO_FLAG_WRITE) < 0) { iop->critical("unable to open file"); freeFormat(); return false; } } avformat_write_header(formatContext_, NULL); } // Special behaviour. // Valid on Aug 2014 for ffmpeg v2.1.4 // // R.e. libavformat/movenc.c::mov_write_udta_tag // R.e. libavformat/movenc.c::mov_write_string_metadata // // Remove all ffmpeg references from the QuickTime movie. // The 'encoder' key in the AVFormatContext metadata will // result in the writer adding @swr with libavformat // version information in the 'udta' atom. // // Prevent the @swr libavformat reference from appearing // in the 'udta' atom by setting the 'encoder' key in the // metadata to null. From movenc.c a zero length value // will not be written to the 'udta' atom. // AVDictionaryEntry* tag = av_dict_get(formatContext_->metadata, "encoder", NULL, AV_DICT_IGNORE_SUFFIX); if (tag) av_dict_set(&formatContext_->metadata, "encoder", "", 0); // Set the 'encoder' key to null. return true; } //////////////////////////////////////////////////////////////////////////////// // execute // This is called once for each frame of video that is to be encoded and // written to the file. // // For the first frame, configure the writer for encoding video and if an audio // file has been specified, configure for encoding audio. // // Audio is written in ~0.5s chunks (r.e. ProRes QuickTime Guidelines.rtf in // Thirdparty/ProResCodec//src/docs). The audio/video interleave in // the file is achieved by monitoring the duration of the video stream as each // video frame is encoded and written to the file. At every 0.5s interval of // the video stream, a 0.5s chunk of audio is written to the file. // // When writing is completed there is a final call to |finish()|. The video // stream is likely to be ahead of the audio stream at this point. Audio is // written to the file so that the duration of the audio stream will match the // duration of the video stream. // void mov64Writer::execute() { error_ = IGNORE_FINISH; if (!isOpen_) isOpen_ = open(); if (isOpen_) { error_ = CLEANUP; if (!writeToFile(formatContext_, false)) error_ = SUCCESS; } } //////////////////////////////////////////////////////////////////////////////// // finish // Complete the encoding, finalise and close the file. // Some video codecs may have data remaining in their encoding buffers. This // must be 'flushed' to the file. // This method flags the 'flush' to get the remaining encoded data out of the // encoder(s) and into the file. // Audio is written after video so it may require more writes from the audio // stream in order to ensure that the duration of the audio and video tracks // are the same. // void mov64Writer::finish() { if (error_ == IGNORE_FINISH) return; bool flushFrames = true; while (flushFrames) { // Continue to write the audio/video interleave while there are still // frames in the video and/or audio encoder queues, without queuing any // new data to encode. This is ffmpeg specific. flushFrames = !writeToFile(formatContext_, true) ? true : false; } // The audio is written in ~0.5s chunks only when the video stream position // passes a multiple of 0.5s interval. Flushing the video encoder may result // in less than 0.5s of video being written to the file so at this point the // video duration may be longer than the audio duration. This final stage // writes enough audio samples to the file so that the duration of both // audio and video are equal. double videoTime = streamVideo_->pts.val * av_q2d(streamVideo_->time_base); if (streamAudio_) { // Determine the current audio stream time. double audioTime = streamAudio_->pts.val * av_q2d(streamAudio_->time_base); if (audioTime < videoTime) { int ret = 0; double sourceDuration = audioReader_->getDuration(); while ((audioTime < videoTime) && (audioTime < sourceDuration) && !ret) { ret = writeAudio(formatContext_, streamAudio_, true); audioTime = streamAudio_->pts.val * av_q2d(streamAudio_->time_base); } } } // Finalise the movie. av_write_trailer(formatContext_); freeFormat(); } void mov64Writer::knobs(Knob_Callback f) { static std::vector formatsAliases; formatsAliases.resize(formatsLongNames_.size()); for (int i = 0; i < static_cast(formatsLongNames_.size()); ++i) formatsAliases[i] = formatsLongNames_[i].c_str(); formatsAliases.push_back(0); // The 'format' knob is no longer supported - we will ALWAYS set mov64_format to default (even for mov) Obsolete_knob(f, "format", "if {$value ne {MOV format (mov)}} { alert {ffmpeg format is no longer supported} }"); Enumeration_knob(f, &format_, &formatsAliases[0], "mov64_format"); SetFlags(f, Knob::HIDDEN); if (!componentOfMovWriter_) Obsolete_knob(f, "codec", Foundry::Nuke::getMov64CodecTranslationScript()); // Store the codec knob for use by the movWriter via getCodecKnob codecKnob_ = Enumeration_knob(f, &codec_, &codecsKnobLabels_[0], "mov64_codec", "codec"); SetFlags(f, Knob::ALWAYS_SAVE); // Obsolete knobs could stomp on shared mov32Writer ones, when used in the combined movWriter. // We need to handle them in Tcl. Obsolete_knob(f, "fps", componentOfMovWriter_ ? "knob mov64_fps $value\nknob mov32_fps $value" : "knob mov64_fps $value"); Float_knob(f, &fps_, IRange(0.0, 100.0f), "mov64_fps", "fps"); Obsolete_knob(f, "audiofile", componentOfMovWriter_ ? "knob mov64_audiofile $value\nknob mov32_audiofile $value" : "knob mov64_audiofile $value"); File_knob(f, &_audioFile, "mov64_audiofile", "audio file"); Obsolete_knob(f, "audio_offset", componentOfMovWriter_ ? "knob mov64_audio_offset $value\nknob mov32_audio_offset $value" : "knob mov64_audio_offset $value"); Float_knob(f, &_audioOffset, IRange(-1000.0, 1000.0), "mov64_audio_offset", "audio offset"); SetFlags(f, Knob::NO_ANIMATION); Tooltip(f, "Offset the audio file by the given number of seconds/frames. " "A value of -10 seconds means the first frame of the image " "sequence syncs to the 10 second mark of the audio. A value " "of +10 seconds means the audio will start 10 seconds into " "the image sequence"); Obsolete_knob(f, "units", componentOfMovWriter_ ? "knob mov64_units $value\nknob mov32_units $value" : "knob mov64_units $value"); static const char* offset_units[] = { "Seconds", "Frames", 0 }; Enumeration_knob(f, &_audioOffsetUnit, offset_units, "mov64_units", "units"); Obsolete_knob(f, "writeTimeCode", componentOfMovWriter_ ? "knob mov64_write_timecode $value\nknob mov32_write_timecode $value" : "knob mov64_write_timecode $value"); Bool_knob(f, &writeTimecode_, "mov64_write_timecode", "write time code" ); Tooltip(f, "Add a time code track to the generated QuickTime file. " "This requires the presence of the \"input/timecode\" key in " "the metadata. It is possible to give the time code track its reel name " "though the \"quicktime/reel\" key in metadata. This is automatically " "read from any QuickTime file already containing a time code track and " "propagated through the tree. If this is not present the reel name will " "be written blank. Use the ModifyMetaData node to add it.\n" "If the timecode is missing, the track will not be written."); Obsolete_knob(f, "Advanced", "knob mov64_advanced $value"); BeginClosedGroup(f, "mov64_advanced", "Advanced"); //ffmpeg offers a number of controls over quality, bitrate, compression, profile/level etc. //Most don't do anything for the codecs we offer, so we stick with those known to have an impact, //enabling/disabling in knob_changed for the codecs they affect. The bitrate/tolerance and quality //cnotrols all balance against each other, so tweaking them requires paying attention to all 3. //Potentially they could be another layer on top of predefined settings, but no other ffmpeg gui //has got this working cleanly, presumably due to the interop between controls. //Codec values, initialised from defaults in ffmpeg, are set to always save to ensure compat //between different versions of ffmpeg. //Knobs related to lossy compression settings. Obsolete_knob(f, "bitrate", "knob mov64_bitrate $value"); Int_knob(f, &bitrate_, IRange(0.0, 400000), "mov64_bitrate", "bitrate"); SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER | Knob::ALWAYS_SAVE); Tooltip (f, "The target bitrate the codec will attempt to reach, within the confines of the bitrate tolerance and " "quality min/max settings. Only supported by certain codecs."); Obsolete_knob(f, "bitrate_tolerance", "knob mov64_bitrate_tolerance $value"); Int_knob(f, &bitrateTolerance_, IRange(833, 4000 * 10000), "mov64_bitrate_tolerance", "bitrate tolerance"); SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER | Knob::ALWAYS_SAVE); Tooltip (f, "The amount the codec is allowed to vary from the target bitrate based on image and quality settings. " "Exercise caution with this control as too small a number for your image data will result in failed renders. " "As a guideline, the minimum slider range of target bitrate/target fps is the lowest advisable setting. " "Only supported by certain codecs."); Obsolete_knob(f, "quality_min", "knob mov64_quality_min $value"); static const char* qualityRangeTooltip = "The quality range the codec is allowed to vary the image data quantiser " "between to attempt to hit the desired bitrate. Higher values mean increased " "image degradation is possible, but with the upside of lower bit rates. " "Only supported by certain codecs."; Int_knob(f, &qMin_, "mov64_quality_min", "quality min"); SetFlags(f, Knob::NO_ANIMATION | Knob::ALWAYS_SAVE); Tooltip (f, qualityRangeTooltip); Obsolete_knob(f, "quality_max", "knob mov64_quality_max $value"); Int_knob(f, &qMax_, "mov64_quality_max", "max"); SetFlags(f, Knob::NO_ANIMATION | Knob::ALWAYS_SAVE); ClearFlags(f, Knob::STARTLINE); Tooltip (f, qualityRangeTooltip); //Knobs related to inter frame compression settings. Obsolete_knob(f, "gop_size", "knob mov64_gop_size $value"); Int_knob(f, &gopSize_, IRange(0, 30), "mov64_gop_size", "gop size"); SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER | Knob::ALWAYS_SAVE | Knob::NO_ANIMATION); Tooltip (f, "Specifies how many frames may be grouped together by the codec to form a compression GOP. Exercise caustion " "with this control as it may impact whether the resultant file can be opened in other packages. Only supported by " "certain codecs."); Obsolete_knob(f, "b_frames", "knob mov64_b_frames $value"); Int_knob(f, &bFrames_, IRange(0, FF_MAX_B_FRAMES), "mov64_b_frames", "b frames"); SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER | Knob::FORCE_RANGE | Knob::ALWAYS_SAVE | Knob::NO_ANIMATION); Tooltip (f, "Controls the maximum number of B frames found consecutively in the resultant stream, where zero means no limit " "imposed. Only supported by certain codecs."); Bool_knob(f, &writeNCLC_, "mov64_write_nclc", "write nclc"); Tooltip( f, "Write nclc data in the colr atom of the video header."); SetFlags(f, Knob::STARTLINE | Knob::NO_ANIMATION); //Badly formatted names obsoleted. Obsolete_knob(f, "bitrateTol", "knob mov64_bitrate_tolerance $value"); Obsolete_knob(f, "gopSize", "knob mov64_gop_size $value"); Obsolete_knob(f, "bFrames", "knob mov64_b_frames $value"); //Removed from panel - should never have been exposed as very low level control. //Maintained for compatibility with old scripts and unforseen circumstances. Obsolete_knob(f, "mbDecision", "knob mov64_mbDecision $value"); static const char* mbDecisionTypes[] = { "FF_MB_DECISION_SIMPLE", "FF_MB_DECISION_BITS", "FF_MB_DECISION_RD", 0 }; Enumeration_knob(f, &mbDecision_, mbDecisionTypes, "mov64_mbDecision", "macro block decision mode"); SetFlags(f, Knob::INVISIBLE); // Update the default LUT at launch if (!f.makeKnobs() && !defaultLutUpdated_) { iop->updateDefaultLUT(); defaultLutUpdated_ = true; } EndGroup(f); } int mov64Writer::knob_changed(DD::Image::Knob* knob) { if ( knob->is("file" ) ) { // Switch the 'format' knob based on the new filename suffix std::string suffix; FileOp::StripSuffix(filename(), &suffix); if (!suffix.empty()) { // Compare found suffix to known formats const size_t lengthWithoutNullTerminator = formatsShortNames_.size() - 1; for (size_t i = 0; i < lengthWithoutNullTerminator; ++i) { const char* formatName = formatsShortNames_[i]; if (suffix.compare(formatName) == 0) { iop->knob("mov64_format")->set_value(static_cast(i)); break; } } } return 1; } if (knob == &Knob::showPanel || knob->is("mov64_codec")) { //The advanced params are enabled/disabled based on the codec chosen and its capabilities. //We also investigated setting the defaults based on the codec defaults, however all current //codecs defaulted the same, and as a user experience it was pretty counter intuitive. //Check knob exists, to deal with cases where Nuke might not have updated underlying writer (#44774) //(we still want to use showPanel to update when loading from script and the like). if(iop->knob("mov64_codec")) { const size_t index = static_cast(iop->knob("mov64_codec")->get_value()); AVCodec* codec = avcodec_find_encoder_by_name(codecsShortNames_[index]); bool lossyParams = false; bool interGOPParams = false; bool interBParams = false; if (codec) { GetCodecSupportedParams(codec, lossyParams, interGOPParams, interBParams); } iop->knob("mov64_bitrate")->enable(lossyParams); iop->knob("mov64_bitrate_tolerance")->enable(lossyParams); iop->knob("mov64_quality_min")->enable(lossyParams); iop->knob("mov64_quality_max")->enable(lossyParams); iop->knob("mov64_gop_size")->enable(interGOPParams); iop->knob("mov64_b_frames")->enable(interBParams); //We use the bitrate to set the min range for bitrate tolerance. updateBitrateToleranceRange(); } return 1; } if (knob->is("mov64_fps") || knob->is("mov64_bitrate")) { updateBitrateToleranceRange(); return 1; } return Writer::knob_changed(knob); } //////////////////////////////////////////////////////////////////////////////// // updateBitrateToleranceRange // Utility to update tolerance knob's slider range. // Employed in place of chained knob_changed calls. // Only valid for use from knob_changed. // void mov64Writer::updateBitrateToleranceRange() { //Bitrate tolerance should in theory be allowed down to target bitrate/target framerate. //We're not force limiting the range since the upper range is not bounded. double minRange = iop->knob("mov64_bitrate")->get_value() / iop->knob("mov64_fps")->get_value(); iop->knob("mov64_bitrate_tolerance")->set_range(minRange, 4000 * 10000, false); } //////////////////////////////////////////////////////////////////////////////// // freeFormat // Release resources. // void mov64Writer::freeFormat() { if (streamVideo_) { #if defined(FN_LICENSED_PRORES_CODEC) if (oldProResCodecName_) { // Restore the original non SDK name for the ProRes codec. AVCodec* avCodec = const_cast(streamVideo_->codec->codec); avCodec->name = oldProResCodecName_; oldProResCodecName_ = NULL; } #endif avcodec_close(streamVideo_->codec); streamVideo_ = NULL; } if (streamAudio_) { avcodec_close(streamAudio_->codec); streamAudio_ = NULL; } if (!(formatContext_->oformat->flags & AVFMT_NOFILE)) avio_close(formatContext_->pb); avformat_free_context(formatContext_); formatContext_ = NULL; isOpen_ = false; } static Writer* build(Write* iop) { return new mov64Writer(iop); } #if FN_OS_LINUX const Writer::Description mov64Writer::d("mov\0avi\0mp4\0m4v\0mov64\0ffmpeg\0", "mov", build); #else const Writer::Description mov64Writer::d("mov64\0ffmpeg\0", "mov", build); #endif