The audio and video decoding of FFmpeg is synchronized with the audio and video, which is shared by the Android community

Posted by aggrav8d on Sun, 21 Nov 2021 00:50:41 +0100

//Obtain video file information, such as the width and height of the video

//The second parameter is a dictionary, which indicates what information you need to obtain, such as video metadata

if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {

    LOGE("%s", "Unable to get video file information");

    return;

}



//Gets the index location of the video stream

//Traverse all types of streams (audio stream, video stream, caption stream) to find the video stream

int v_stream_idx = -1;

int i = 0;

//number of streams

for (; i < pFormatCtx->nb_streams; i++) {

    //Type of flow

    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

        v_stream_idx = i;

        break;

    }

}



if (v_stream_idx == -1) {

    LOGE("%s", "Video stream not found\n");

    return;

}



//Gets the codec context in the video stream

AVCodecContext *pCodecCtx = pFormatCtx->streams[v_stream_idx]->codec;



//Find the corresponding decoder according to the encoding id in the encoding and decoding context

AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);



if (pCodec == NULL) {

    LOGE("%s", "The decoder cannot be found or the video is encrypted\n");

    return;

}



//When the decoder is turned on, there is a problem with the decoder (for example, we did not compile the corresponding type of decoder when compiling FFmpeg)

if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {

    LOGE("%s", "The decoder cannot be opened\n");

    return;

}



//Ready to read

//AVPacket is used to store compressed data frame by frame (H264)

//Buffer, open up space

AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket));



//AVFrame is used to store decoded pixel data (YUV)

//memory allocation 

AVFrame *yuv_frame = av_frame_alloc();

AVFrame *rgb_frame = av_frame_alloc();



int got_picture, ret;

int frame_count = 0;



//forms 

ANativeWindow *pWindow = ANativeWindow_fromSurface(env, surface);

//Buffer when drawing

ANativeWindow_Buffer out_buffer;



//Read compressed data frame by frame

while (av_read_frame(pFormatCtx, packet) >= 0) {

    //As long as the video compresses the data (judged according to the index position of the stream)

    if (packet->stream_index == v_stream_idx) {

        //7. Decode one frame of video compressed data to obtain video pixel data

        ret = avcodec_decode_video2(pCodecCtx, yuv_frame, &got_picture, packet);

        if (ret < 0) {

            LOGE("%s", "Decoding error");

            return;

        }



        //0 indicates that decoding is complete, and non-0 is decoding

        if (got_picture) {



            //lock window

            //Set the properties of the buffer: width, height and pixel format (consistent with the format of the Java layer)

            ANativeWindow_setBuffersGeometry(pWindow, pCodecCtx->width, pCodecCtx->height,

                                             WINDOW_FORMAT_RGBA_8888);

            ANativeWindow_lock(pWindow, &out_buffer, NULL);



            //Initialize buffer

            //Set properties, pixel format, width and height

            //rgb_ The buffer of the frame is the buffer of the Window. When the same is unlocked, it will be drawn

            avpicture_fill((AVPicture *) rgb_frame, out_buffer.bits, AV_PIX_FMT_RGBA,

                           pCodecCtx->width,

                           pCodecCtx->height);



            //Data in YUV format can be converted into data in RGBA 8888 format, and FFmpeg can also be converted. However, there are problems, which can be realized by using libyuv library

            I420ToARGB(yuv_frame->data[0], yuv_frame->linesize[0],

                       yuv_frame->data[2], yuv_frame->linesize[2],

                       yuv_frame->data[1], yuv_frame->linesize[1],

                       rgb_frame->data[0], rgb_frame->linesize[0],

                       pCodecCtx->width, pCodecCtx->height);



            //3,unlock window

            ANativeWindow_unlockAndPost(pWindow);



            frame_count++;

            LOGI("Decoding drawing section%d frame", frame_count);

        }

    }



    //Release resources

    av_free_packet(packet);

}



av_frame_free(&yuv_frame);

avcodec_close(pCodecCtx);

avformat_free_context(pFormatCtx);

(*env)->ReleaseStringUTFChars(env, input_, input);

}

#include "libswresample/swresample.h"

#define MAX_AUDIO_FRME_SIZE 48000 * 4

//Audio decoding (resampling)

JNIEXPORT void JNICALL

Java_com_haohao_ffmpeg_AVUtils_audioDecode(JNIEnv *env, jclass type, jstring input_,

                                       jstring output_) {

//Accessing static methods

jmethodID mid = (*env)->GetStaticMethodID(env, type, "onNativeCallback", "()V");

const char *input = (*env)->GetStringUTFChars(env, input_, 0);

const char *output = (*env)->GetStringUTFChars(env, output_, 0);



//Register components

av_register_all();

AVFormatContext *pFormatCtx = avformat_alloc_context();

//Open audio file

if (avformat_open_input(&pFormatCtx, input, NULL, NULL) != 0) {

    LOGI("%s", "Unable to open audio file");

    return;

}

//Get input file information

if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {

    LOGI("%s", "Unable to get input file information");

    return;

}

//Get audio stream index location

int i = 0, audio_stream_idx = -1;

for (; i < pFormatCtx->nb_streams; i++) {

    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {

        audio_stream_idx = i;

        break;

    }

}



//Get decoder

AVCodecContext *codecCtx = pFormatCtx->streams[audio_stream_idx]->codec;

AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);

if (codec == NULL) {

    LOGI("%s", "Unable to get decoder");

    return;

}

//Open decoder

if (avcodec_open2(codecCtx, codec, NULL) < 0) {

    LOGI("%s", "Unable to open decoder");

    return;

}

//compressed data 

AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket));

//Decompress data

AVFrame *frame = av_frame_alloc();

//Frame - > 16bit 44100 PCM unified audio sampling format and sampling rate

SwrContext *swrCtx = swr_alloc();



//Resampling setup parameters

//Input sampling format

enum AVSampleFormat in_sample_fmt = codecCtx->sample_fmt;

//Output sampling format: 16bit PCM

enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

//Input sample rate

int in_sample_rate = codecCtx->sample_rate;

//Output sampling rate

int out_sample_rate = 44100;

//Gets the input channel layout

//Obtain the default channel layout according to the number of channels (2 channels, default stereo)

//av_get_default_channel_layout(codecCtx->channels);

uint64_t in_ch_layout = codecCtx->channel_layout;

//Output channel layout (stereo)

uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;



swr_alloc_set_opts(swrCtx,

                   out_ch_layout, out_sample_fmt, out_sample_rate,

                   in_ch_layout, in_sample_fmt, in_sample_rate,

                   0, NULL);

swr_init(swrCtx);



//Number of output channels

int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);



//Resampling setup parameters



//PCM data with bit width of 16bit and sampling rate of 44100HZ

uint8_t *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);



FILE *fp_pcm = fopen(output, "wb");



int got_frame = 0, index = 0, ret;

//Continuously read compressed data

while (av_read_frame(pFormatCtx, packet) >= 0) {

    //decode

    ret = avcodec_decode_audio4(codecCtx, frame, &got_frame, packet);



    if (ret < 0) {

        LOGI("%s", "Decoding complete");

    }

    //Decode one frame successfully

    if (got_frame > 0) {

        LOGI("decode:%d", index++);

        swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRME_SIZE, frame->data, frame->nb_samples);

        //Get the size of the sample

        int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,

                                                         frame->nb_samples, out_sample_fmt, 1);

        fwrite(out_buffer, 1, out_buffer_size, fp_pcm);

    }



    av_free_packet(packet);

}



fclose(fp_pcm);

av_frame_free(&frame);

av_free(out_buffer);



swr_free(&swrCtx);

avcodec_close(codecCtx);

avformat_close_input(&pFormatCtx);



(*env)->ReleaseStringUTFChars(env, input_, input);

(*env)->ReleaseStringUTFChars(env, output_, output);

//Notify Java layer decoding complete

(*env)->CallStaticVoidMethod(env, type, mid);

}

JNIEXPORT void JNICALL

Java_com_haohao_ffmpeg_AVUtils_audioPlay(JNIEnv *env, jclass type, jstring input_) {

const char *input = (*env)->GetStringUTFChars(env, input_, 0);

LOGI("%s", "sound");

//Register components

av_register_all();

AVFormatContext *pFormatCtx = avformat_alloc_context();

//Open audio file

if (avformat_open_input(&pFormatCtx, input, NULL, NULL) != 0) {

    LOGI("%s", "Unable to open audio file");

    return;

}

//Get input file information

if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {

    LOGI("%s", "Unable to get input file information");

    return;

}

//Get audio stream index location

int i = 0, audio_stream_idx = -1;

for (; i < pFormatCtx->nb_streams; i++) {

    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {

        audio_stream_idx = i;

        break;

    }

}



//Get decoder

AVCodecContext *codecCtx = pFormatCtx->streams[audio_stream_idx]->codec;

AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);

if (codec == NULL) {

    LOGI("%s", "Unable to get decoder");

    return;

}

//Open decoder

if (avcodec_open2(codecCtx, codec, NULL) < 0) {

    LOGI("%s", "Unable to open decoder");

    return;

}

//compressed data 

AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket));

//Decompress data

AVFrame *frame = av_frame_alloc();

//Frame - > 16bit 44100 PCM unified audio sampling format and sampling rate

SwrContext *swrCtx = swr_alloc();



//Input sampling format

enum AVSampleFormat in_sample_fmt = codecCtx->sample_fmt;

//Output sampling format: 16bit PCM

enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

//Input sample rate

int in_sample_rate = codecCtx->sample_rate;

//Output sampling rate

int out_sample_rate = in_sample_rate;

//Gets the input channel layout

//Obtain the default channel layout according to the number of channels (2 channels, default stereo)

//av_get_default_channel_layout(codecCtx->channels);

uint64_t in_ch_layout = codecCtx->channel_layout;

//Output channel layout (stereo)

uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;



swr_alloc_set_opts(swrCtx,

                   out_ch_layout, out_sample_fmt, out_sample_rate,

                   in_ch_layout, in_sample_fmt, in_sample_rate,

                   0, NULL);

swr_init(swrCtx);



//Number of output channels

int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);

//AudioTrack object

jmethodID create_audio_track_mid = (*env)->GetStaticMethodID(env, type, "createAudioTrack",

                                                             "(II)Landroid/media/AudioTrack;");

jobject audio_track = (*env)->CallStaticObjectMethod(env, type, create_audio_track_mid,

                                                     out_sample_rate, out_channel_nb);



//Call the AudioTrack.play method

jclass audio_track_class = (*env)->GetObjectClass(env, audio_track);

jmethodID audio_track_play_mid = (*env)->GetMethodID(env, audio_track_class, "play", "()V");

jmethodID audio_track_stop_mid = (*env)->GetMethodID(env, audio_track_class, "stop", "()V");

(*env)->CallVoidMethod(env, audio_track, audio_track_play_mid);



//AudioTrack.write

jmethodID audio_track_write_mid = (*env)->GetMethodID(env, audio_track_class, "write",

                                                      "([BII)I");

//16bit 44100 PCM data

uint8_t *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);



int got_frame = 0, index = 0, ret;

//Continuously read compressed data

while (av_read_frame(pFormatCtx, packet) >= 0) {

    //Decode packets of audio type

    if (packet->stream_index == audio_stream_idx) {

        //decode

        ret = avcodec_decode_audio4(codecCtx, frame, &got_frame, packet);



        if (ret < 0) {

            LOGI("%s", "Decoding complete");

        }

        //Decode one frame successfully

        if (got_frame > 0) {

            LOGI("decode:%d", index++);

            swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRME_SIZE,

                        (const uint8_t **) frame->data, frame->nb_samples);

            //Get the size of the sample

            int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,

                                                             frame->nb_samples, out_sample_fmt,

                                                             1);



            //out_buffer buffer data, converted into byte array

            jbyteArray audio_sample_array = (*env)->NewByteArray(env, out_buffer_size);

            jbyte *sample_bytep = (*env)->GetByteArrayElements(env, audio_sample_array, NULL);

            //out_ Copy buffer data to sampe_bytep

            memcpy(sample_bytep, out_buffer, out_buffer_size);

            //synchronization

            (*env)->ReleaseByteArrayElements(env, audio_sample_array, sample_bytep, 0);



            //AudioTrack.write PCM data

            (*env)->CallIntMethod(env, audio_track, audio_track_write_mid,

                                  audio_sample_array, 0, out_buffer_size);

            //Release local reference

            (*env)->DeleteLocalRef(env, audio_sample_array);

        }

    }

    av_free_packet(packet);

}



(*env)->CallVoidMethod(env, audio_track, audio_track_stop_mid);



av_frame_free(&frame);

av_free(out_buffer);



swr_free(&swrCtx);

avcodec_close(codecCtx);

avformat_close_input(&pFormatCtx);



(*env)->ReleaseStringUTFChars(env, input_, input);

}



CMakeLists.txt



cmake_minimum_required(VERSION 3.4.1)

include_directories(${CMAKE_SOURCE_DIR}/src/main/cpp/include)

set(jnilibs "${CMAKE_SOURCE_DIR}/src/main/jniLibs")

set(CMAKE_LIBRARY_OUTPUT_DIRECTORY j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI})

add_library( # Sets the name of the library.

         native-lib



         # Sets the library as a shared library.

         SHARED



         # Provides a relative path to your source file(s).

         src/main/cpp/native-lib.c)

Add 8 function libraries and yuvlib libraries of FFmpeg

add_library(avutil-54 SHARED IMPORTED )

set_target_properties(avutil-54 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libavutil-54.so")

add_library(swresample-1 SHARED IMPORTED )

set_target_properties(swresample-1 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libswresample-1.so")

add_library(avcodec-56 SHARED IMPORTED )

set_target_properties(avcodec-56 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libavcodec-56.so")

add_library(avformat-56 SHARED IMPORTED )

set_target_properties(avformat-56 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libavformat-56.so")

add_library(swscale-3 SHARED IMPORTED )

set_target_properties(swscale-3 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libswscale-3.so")

add_library(postproc-53 SHARED IMPORTED )

set_target_properties(postproc-53 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libpostproc-53.so")

add_library(avfilter-5 SHARED IMPORTED )

set_target_properties(avfilter-5 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libavfilter-5.so")

add_library(avdevice-56 SHARED IMPORTED )

set_target_properties(avdevice-56 PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libavdevice-56.so")

add_library(yuv SHARED IMPORTED )

set_target_properties(yuv PROPERTIES IMPORTED_LOCATION " j n i l i b s / {jnilibs}/ jnilibs/{ANDROID_ABI}/libyuv.so")

find_library( # Sets the name of the path variable.

          log-lib



          # Specifies the name of the NDK library that

          # you want CMake to locate.

          log )

#Find the library related to Window drawing of Android system

find_library(

        android-lib

        android

        )

target_link_libraries( native-lib

                   ${log-lib} 

                   ${android-lib} 

                   avutil-54 

                   swresample-1

                   avcodec-56

                   avformat-56

                   swscale-3

                   postproc-53

                   avfilter-5

                   avdevice-56

                   yuv) 


PS: 



1.  Note: add file read and write permissions.



2, Principle and implementation of audio and video synchronization

============



2.1,principle

------



If you simply play according to the audio sampling rate and video frame rate, it is difficult to synchronize due to various factors causing time difference, such as machine running speed and decoding efficiency, and the audio and video time difference will increase linearly. Therefore, there are three ways to synchronize audio and video:



1.Refer to an external clock to synchronize audio and video to this time. I first thought of this way, but it's not good. Due to some biological principles, people are more sensitive to sound changes, but less sensitive to visual changes. Therefore, if you adjust the sound frequently, there will be some harsh or noise, which will affect the user experience. ( ps: By the way, I feel good about biology\_). 



2.Based on video, audio desynchronizes the time of video. It is not used for the same reason as above.



3.Based on the audio, the video desynchronizes the audio time. So this method is.



Therefore, the principle is to judge whether the video is fast or slow based on the audio time, so as to adjust the video speed. In fact, it is a dynamic process of catching up and waiting.



2.2,Some concepts

--------



Both audio and video DTS And PTS. 



DTS ,Decoding Time Stamp,Decode the timestamp and tell the decoder packet Decoding order.  

PTS ,Presentation Time Stamp,Displays a timestamp indicating the time from packet The display order of the decoded data in.  

The two are the same in audio, but video B The existence of frames (bidirectional prediction) will cause the decoding order to be different from the display order, that is, in the video DTS And PTS Not necessarily the same.



Time base :see FFmpeg Source code



/**

 * This is the fundamental unit of time (in seconds) in terms

 * of which frame timestamps are represented. For fixed-fps content,

 * timebase should be 1/framerate and timestamp increments should be

 * identically 1.

 * This often, but not always is the inverse of the frame rate or field rate

 * for video.

 * - encoding: MUST be set by user.

 * - decoding: the use of this field for decoding is deprecated.

 *             Use framerate instead.

 */

AVRational time_base; 
/**

 * rational number numerator/denominator

 */

typedef struct AVRational{

    int num; ///< numerator

    int den; ///< denominator

} AVRational; 

```



Personal understanding, in fact ffmpeg The time unit in is expressed as a fraction, num As molecules, den Is the denominator. And ffmpeg The calculation method is provided:



```

/**

 * Convert rational to double.

 * @param a rational to convert

 * @return (double) a

 */

static inline double av_q2d(AVRational a){

    return a.num / (double) a.den;

} 

```



Therefore, the display time of a frame in the video is calculated as(Unit is wonderful): 



```

time = pts * av_q2d(time_base); 

```



2.3,Synchronization code

--------



**1, Audio part**  

clock Is the playback duration of the audio (from the beginning to the current time)



```

if (packet->pts != AV_NOPTS_VALUE) {

            audio->clock = av_q2d(audio->time_base) * packet->pts;

 } 

```



Then add this packet The time that the data in the needs to be played



```

double time = datalen/((double) 44100 *2 * 2);

audio->clock = audio->clock +time; 

```



datalen Is the data length. The sampling rate is 44100, the number of sampling bits is 16, and the number of channels is 2. Therefore, the data length / Bytes per second.



ps: The calculation method here is not perfect. There are many problems. I'll make up for them later.



**2, Video part**  

First define several values:



```

 double  last_play  //Playback time of the previous frame

    ,play             //Playback time of the current frame

    , last_delay    // The interval between two frames of the last video played

    ,delay         //Two frame video interval

    ,audio_clock //Actual playing time of audio track

    ,diff   //Time difference between audio frame and video frame

    ,sync_threshold //Reasonable scope

    ,start_time  //Absolute time from the first frame

    ,pts

    ,actual_delay//Really need delay time

    start_time = av_gettime() / 1000000.0; 

//Get pts

    if ((pts = av_frame_get_best_effort_timestamp(frame)) == AV_NOPTS_VALUE) {

        pts = 0;

    }

    play = pts * av_q2d(vedio->time_base);

//Correction time

    play = vedio->synchronize(frame, play);

    delay = play - last_play;

    if (delay <= 0 || delay > 1) {

        delay = last_delay;

    }

    audio_clock = vedio->audio->clock;

    last_delay = delay;

    last_play = play;

//Time difference between audio and video

    diff = vedio->clock - audio_clock;

//It will be delayed and accelerated beyond a reasonable range

    sync_threshold = (delay > 0.01 ? 0.01 : delay);



    if (fabs(diff) < 10) {

        if (diff <= -sync_threshold) {

            delay = 0;

        } else if (diff >= sync_threshold) {

            delay = 2 * delay;

        }

    }

    start_time += delay;

    actual_delay = start_time - av_gettime() / 1000000.0;

    if (actual_delay < 0.01) {

        actual_delay = 0.01;

    }

//The sleep time ffmpeg is suggested to be written like this. Why should it be written like this remains to be studied

    av_usleep(actual_delay * 1000000.0 + 6000); 


correct play (Method of playing time) repeat\_pict / (2 \* fps) yes ffmpeg It's taught in the notes



synchronize(AVFrame *frame, double play) {

//clock is the time position of the current playback

if (play != 0)

    clock=play;

else //If pst is 0, set pts to the previous frame time first

    play = clock;

//If pts is 0, clock will be added actively

//The extension delay needs to be calculated:

double repeat_pict = frame->repeat_pict;

//Use AvCodecContext instead of stream

double frame_delay = av_q2d(codec->time_base);

//fps 

double fps = 1 / frame_delay;

//pts plus this delay is the display time  

double extra_delay = repeat_pict / (2 * fps);

double delay = extra_delay + frame_delay;

clock += delay;

summary

Finally, in order to help you deeply understand the principles of Android related knowledge points and interview related knowledge, here are the relevant core notes I collected and sorted out, including 2968 pages of PDF and 58w words, including 648 knowledge points of Android development. I sorted the technical points into video and PDF (actually spent a lot more energy than expected), Package knowledge context + many details.

**[CodeChina open source project: Android learning notes summary + mobile architecture Video + big factory interview real questions + project actual combat source code](

)**

There are a lot of materials for learning Android online, but if the knowledge learned is not systematic, only a superficial taste when encountering problems and no in-depth research, it is difficult to achieve real technical improvement. I hope this systematic technical system has a direction reference for everyone.

In 2021, although the road is bumpy and everyone is saying that Android will decline, don't panic. Make your own plans and learn your own habits. Competition is everywhere. This is true in every industry. Believe in yourself, there is nothing you can't do, only what you can't think of.

Although the interview failed, I will not give up my determination to enter the job! I suggest you should be fully prepared before the interview and get your favorite offer smoothly.

This article has been Tencent CODING open source hosting project: Android learning notes summary + mobile architecture Video + big factory interview real questions + project actual combat source code Collection, self-study resources and series of articles are continuously updated
It's taught in the ffmpeg annotation

synchronize(AVFrame *frame, double play) {

    //clock is the time position of the current playback

    if (play != 0)

        clock=play;

    else //If pst is 0, set pts to the previous frame time first

        play = clock;

    //If pts is 0, clock will be added actively

    //The extension delay needs to be calculated:

    double repeat_pict = frame->repeat_pict;

    //Use AvCodecContext instead of stream

    double frame_delay = av_q2d(codec->time_base);

    //fps 

    double fps = 1 / frame_delay;

    //pts plus this delay is the display time  

    double extra_delay = repeat_pict / (2 * fps);

    double delay = extra_delay + frame_delay;

    clock += delay;


### summary

Finally, in order to help you deeply understand Android The principles of relevant knowledge points and interview related knowledge are put here, which I have collected and sorted out Android The core notes of advanced knowledge and skills in development, with a total of 2968 pages PDF,58w Words, including Android 648 knowledge points were developed, and I sorted the technical points into videos and videos PDF(In fact, it took a lot more energy than expected + Many details.

[External chain picture transfer...(img-cRAPhgE4-1631249000198)]

**[CodeChina Open source projects:< Android Summary of study notes+Mobile architecture video+Real interview questions for large factories+Project practice source code](

)**

Online learning Android There are a lot of materials, but if the knowledge learned is not systematic, only a superficial taste when encountering problems and no in-depth research, it is difficult to achieve real technology improvement. I hope this systematic technology system will have a direction reference for everyone.

2021 Years, although the road is rough, are saying Android To decline, but don't panic, make your own plans and learn your own habits. Competition is everywhere, and it is the case in every industry. Believe in yourself, there is nothing you can't do, only what you can't think of.

Although the interview failed, I will not give up my determination to enter the job! I suggest you should be fully prepared before the interview and get your favorite job smoothly offer. 


> **This article has been[tencent CODING Open source hosting project:< Android Summary of study notes+Mobile architecture video+Real interview questions for large factories+Project practice source code](https://ali1024.coding.net/public/P7/Android/git), self-study resources and series of articles are constantly updated**

Topics: Android Design Pattern Programmer