3

h私は、フレームが表示され、オーディオが再生されたら、オーディオとビジュアルを同期する方法を説明するdranger ffmpegチュートリアルを研究しています。

残念ながら、チュートリアルは古くなっています (Stephen Dranger が私に説明してくれました)。また、sdl を使用していませんが、これは Blackberry 10 アプリケーション用です。

ビデオ フレームを正しい速度で表示することができず (非常に高速に再生されているだけです)、1 週間以上試してみました。

3 つのスレッドが発生しています。1 つはストリームからオーディオおよびビデオ キューに読み込むためのもので、次にオーディオおよびビデオ用の 2 つのスレッドです。

私の関連コードをスキャンした後に何が起こっているのかを誰かが説明できれば、あなたは命の恩人になるでしょう.

遅延 (私が usleep(testDelay) に渡すもの) が上昇 (増加) しているように見えますが、これは私には正しくないようです。

count = 1;
    MyApp* inst = worker->app;//(VideoUploadFacebook*)arg;
    qDebug() << "\n start loadstream";
    w = new QWaitCondition();
    w2 = new QWaitCondition();
    context = avformat_alloc_context();
    inst->threadStarted = true;
    cout << "start of decoding thread";
    cout.flush();


    av_register_all();
    avcodec_register_all();
    avformat_network_init();
    av_log_set_callback(&log_callback);
    AVInputFormat   *pFormat;
    //const char      device[]     = "/dev/video0";
    const char      formatName[] = "mp4";
    cout << "2start of decoding thread";
    cout.flush();



    if (!(pFormat = av_find_input_format(formatName))) {
        printf("can't find input format %s\n", formatName);
        //return void*;
    }
    //open rtsp
    if(avformat_open_input(&context, inst->capturedUrl.data(), pFormat,NULL) != 0){
        // return ;
        cout << "error opening of decoding thread: " << inst->capturedUrl.data();
        cout.flush();
    }

    cout << "3start of decoding thread";
    cout.flush();
    // av_dump_format(context, 0, inst->capturedUrl.data(), 0);
    /*   if(avformat_find_stream_info(context,NULL) < 0){
        return EXIT_FAILURE;
    }
     */
    //search video stream
    for(int i =0;i<context->nb_streams;i++){
        if(context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
            inst->video_stream_index = i;
    }
    cout << "3z start of decoding thread";
    cout.flush();
    AVFormatContext* oc = avformat_alloc_context();
    av_read_play(context);//play RTSP
    AVDictionary *optionsDict = NULL;
    ccontext = context->streams[inst->video_stream_index]->codec;

    inst->audioc = context->streams[1]->codec;

    cout << "4start of decoding thread";
    cout.flush();
    codec = avcodec_find_decoder(ccontext->codec_id);
    ccontext->pix_fmt = PIX_FMT_YUV420P;

    AVCodec* audio_codec = avcodec_find_decoder(inst->audioc->codec_id);
    inst->packet = new AVPacket();
    if (!audio_codec) {
        cout << "audio codec not found\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(inst->audioc, audio_codec, NULL) < 0) {
        cout << "could not open codec\n"; //fflush( stdout );
        exit(1);
    }

    if (avcodec_open2(ccontext, codec, &optionsDict) < 0) exit(1);

    cout << "5start of decoding thread";
    cout.flush();
    inst->pic = avcodec_alloc_frame();

    av_init_packet(inst->packet);

    while(av_read_frame(context,inst->packet) >= 0 && &inst->keepGoing)
    {

        if(inst->packet->stream_index == 0){//packet is video

            int check = 0;



            // av_init_packet(inst->packet);
            int result = avcodec_decode_video2(ccontext, inst->pic, &check, inst->packet);

            if(check)
                break;
        }
    }



    inst->originalVideoWidth = inst->pic->width;
    inst->originalVideoHeight = inst->pic->height;
    float aspect = (float)inst->originalVideoHeight / (float)inst->originalVideoWidth;
    inst->newVideoWidth = inst->originalVideoWidth;
    int newHeight = (int)(inst->newVideoWidth * aspect);
    inst->newVideoHeight = newHeight;//(int)inst->originalVideoHeight / inst->originalVideoWidth * inst->newVideoWidth;// = new height
    int size = avpicture_get_size(PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);
    uint8_t* picture_buf = (uint8_t*)(av_malloc(size));
    avpicture_fill((AVPicture *) inst->pic, picture_buf, PIX_FMT_YUV420P, inst->originalVideoWidth, inst->originalVideoHeight);

    picrgb = avcodec_alloc_frame();
    int size2 = avpicture_get_size(PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);
    uint8_t* picture_buf2 = (uint8_t*)(av_malloc(size2));
    avpicture_fill((AVPicture *) picrgb, picture_buf2, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight);



    if(ccontext->pix_fmt != PIX_FMT_YUV420P)
    {
        std::cout << "fmt != 420!!!: " << ccontext->pix_fmt << std::endl;//
        // return (EXIT_SUCCESS);//-1;

    }


    if (inst->createForeignWindow(inst->myForeignWindow->windowGroup(),
            "HelloForeignWindowAppIDqq", 0,
            0, inst->newVideoWidth,
            inst->newVideoHeight)) {

    } else {
        qDebug() << "The ForeginWindow was not properly initialized";
    }




    inst->keepGoing = true;

    inst->img_convert_ctx = sws_getContext(inst->originalVideoWidth, inst->originalVideoHeight, PIX_FMT_YUV420P, inst->newVideoWidth, inst->newVideoHeight,
            PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    is = (VideoState*)av_mallocz(sizeof(VideoState));
    if (!is)
        return NULL;

    is->audioStream = 1;
    is->audio_st = context->streams[1];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;
    is->videoStream = 0;
    is->video_st = context->streams[0];

    is->frame_timer = (double)av_gettime() / 1000000.0;
    is->frame_last_delay = 40e-3;

    is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
    //av_strlcpy(is->filename, filename, sizeof(is->filename));
    is->iformat = pFormat;
    is->ytop    = 0;
    is->xleft   = 0;

    /* start video display */
    is->pictq_mutex = new QMutex();
    is->pictq_cond  = new QWaitCondition();

    is->subpq_mutex = new QMutex();
    is->subpq_cond  = new QWaitCondition();

    is->video_current_pts_time = av_gettime();


    packet_queue_init(&audioq);

    packet_queue_init(&videoq);
    is->audioq = audioq;
    is->videoq = videoq;
    AVPacket* packet2  = new AVPacket();

    ccontext->get_buffer = our_get_buffer;
    ccontext->release_buffer = our_release_buffer;


    av_init_packet(packet2);
    while(inst->keepGoing)
    {


        if(av_read_frame(context,packet2) < 0 && keepGoing)
        {
            printf("bufferframe Could not read a frame from stream.\n");
            fflush( stdout );


        }else {



            if(packet2->stream_index == 0) {
                packet_queue_put(&videoq, packet2);
            } else if(packet2->stream_index == 1) {
                packet_queue_put(&audioq, packet2);
            } else {
                av_free_packet(packet2);
            }


            if(!videoThreadStarted)
            {
                videoThreadStarted = true;
                QThread* thread = new QThread;
                videoThread = new VideoStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));
                QObject::connect(videoThread, SIGNAL(refreshNeeded()), this, SLOT(refreshNeededSlot()));
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

            if(!audioThreadStarted)
            {
                audioThreadStarted = true;
                QThread* thread = new QThread;
                AudioStreamWorker* videoThread = new AudioStreamWorker(this);

                // Give QThread ownership of Worker Object
                videoThread->moveToThread(thread);

                // Connect videoThread error signal to this errorHandler SLOT.
                connect(videoThread, SIGNAL(error(QString)), this, SLOT(errorHandler(QString)));

                // Connects the thread’s started() signal to the process() slot in the videoThread, causing it to start.
                connect(thread, SIGNAL(started()), videoThread, SLOT(doWork()));
                connect(videoThread, SIGNAL(finished()), thread, SLOT(quit()));
                connect(videoThread, SIGNAL(finished()), videoThread, SLOT(deleteLater()));

                // Make sure the thread object is deleted after execution has finished.
                connect(thread, SIGNAL(finished()), thread, SLOT(deleteLater()));

                thread->start();
            }

        }

    } //finished main loop

    int MyApp::video_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;

    double pts;
    pic = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        pts = 0;

        global_video_pkt_pts2 = packet->pts;
        // Decode video frame
        len1 =  avcodec_decode_video2(ccontext, pic, &frameFinished, packet);
        if(packet->dts == AV_NOPTS_VALUE
                && pic->opaque && *(uint64_t*)pic->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pic->opaque;
        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;
        } else {
            pts = 0;
        }
        pts *= av_q2d(is->video_st->time_base);
        // Did we get a video frame?

                if(frameFinished) {
                    pts = synchronize_video(is, pic, pts);
                    actualPts = pts;
                    refreshSlot();
                }
                av_free_packet(packet);
    }
    av_free(pic);
    return 0;
}


int MyApp::audio_thread() {
    //VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int len1, frameFinished;
    ALuint source;
    ALenum format = 0;
    //   ALuint frequency;
    ALenum alError;
    ALint val2;
    ALuint buffers[NUM_BUFFERS];
    int dataSize;


    ALCcontext *aContext;
    ALCdevice *device;
    if (!alutInit(NULL, NULL)) {
        // printf(stderr, "init alut error\n");
    }
    device = alcOpenDevice(NULL);
    if (device == NULL) {
        // printf(stderr, "device error\n");
    }

    //Create a context
    aContext = alcCreateContext(device, NULL);
    alcMakeContextCurrent(aContext);
    if(!(aContext)) {
        printf("Could not create the OpenAL context!\n");
        return 0;
    }

    alListener3f(AL_POSITION, 0.0f, 0.0f, 0.0f);









    //ALenum alError;
    if(alGetError() != AL_NO_ERROR) {
        cout << "could not create buffers";
        cout.flush();
        fflush( stdout );
        return 0;
    }
    alGenBuffers(NUM_BUFFERS, buffers);
    alGenSources(1, &source);
    if(alGetError() != AL_NO_ERROR) {
        cout << "after Could not create buffers or the source.\n";
        cout.flush(  );
        return 0;
    }

    int i;
    int indexOfPacket;
    double pts;
    //double pts;
    int n;


    for(i = 0; i < NUM_BUFFERS; i++)
    {
        if(packet_queue_get(&audioq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }
        cout << "streamindex=audio \n";
        cout.flush(  );
        //printf("before decode  audio\n");
        //fflush( stdout );
        // AVPacket *packet = new AVPacket();//malloc(sizeof(AVPacket*));
        AVFrame *decodedFrame = NULL;
        int gotFrame = 0;
        // AVFrame* decodedFrame;

        if(!decodedFrame) {
            if(!(decodedFrame = avcodec_alloc_frame())) {
                cout << "Run out of memory, stop the streaming...\n";
                fflush( stdout );
                cout.flush();


                return -2;
            }
        } else {
            avcodec_get_frame_defaults(decodedFrame);
        }

        int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
        if(len < 0) {
            cout << "Error while decoding.\n";
            cout.flush(  );

            return -3;
        }
        if(len < 0) {
            /* if error, skip frame */
            is->audio_pkt_size = 0;
            //break;
        }
        is->audio_pkt_data += len;
        is->audio_pkt_size -= len;

        pts = is->audio_clock;
        // *pts_ptr = pts;
        n = 2 * is->audio_st->codec->channels;
        is->audio_clock += (double)packet->size/
                (double)(n * is->audio_st->codec->sample_rate);
        if(gotFrame) {
            cout << "got audio frame.\n";
            cout.flush(  );
            // We have a buffer ready, send it
            dataSize = av_samples_get_buffer_size(NULL, audioc->channels,
                    decodedFrame->nb_samples, audioc->sample_fmt, 1);

            if(!format) {
                if(audioc->sample_fmt == AV_SAMPLE_FMT_U8 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_U8P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO8;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO8;
                    }
                } else if(audioc->sample_fmt == AV_SAMPLE_FMT_S16 ||
                        audioc->sample_fmt == AV_SAMPLE_FMT_S16P) {
                    if(audioc->channels == 1) {
                        format = AL_FORMAT_MONO16;
                    } else if(audioc->channels == 2) {
                        format = AL_FORMAT_STEREO16;
                    }
                }

                if(!format) {
                    cout << "OpenAL can't open this format of sound.\n";
                    cout.flush(  );

                    return -4;
                }
            }
            printf("albufferdata audio b4.\n");
            fflush( stdout );
            alBufferData(buffers[i], format, *decodedFrame->data, dataSize, decodedFrame->sample_rate);
            cout << "after albufferdata all buffers \n";
            cout.flush(  );
            av_free_packet(packet);
            //=av_free(packet);
            av_free(decodedFrame);

            if((alError = alGetError()) != AL_NO_ERROR) {
                printf("Error while buffering.\n");

                printAlError(alError);
                return -6;
            }
        }
    }


    cout << "before quoe buffers \n";
    cout.flush();
    alSourceQueueBuffers(source, NUM_BUFFERS, buffers);
    cout << "before play.\n";
    cout.flush();
    alSourcePlay(source);
    cout << "after play.\n";
    cout.flush();
    if((alError = alGetError()) != AL_NO_ERROR) {
        cout << "error strating stream.\n";
        cout.flush();
        printAlError(alError);
        return 0;
    }


    // AVPacket *pkt = &is->audio_pkt;

    while(keepGoing)
    {
        while(packet_queue_get(&audioq, packet, 1)  >= 0) {
            // means we quit getting packets

            do {
                alGetSourcei(source, AL_BUFFERS_PROCESSED, &val2);
                usleep(SLEEP_BUFFERING);
            } while(val2 <= 0);
            if(alGetError() != AL_NO_ERROR)
            {
                fprintf(stderr, "Error gettingsource :(\n");
                return 1;
            }

            while(val2--)
            {



                ALuint buffer;
                alSourceUnqueueBuffers(source, 1, &buffer);
                if(alGetError() != AL_NO_ERROR)
                {
                    fprintf(stderr, "Error unqueue buffers :(\n");
                    //  return 1;
                }
                AVFrame *decodedFrame = NULL;
                int gotFrame = 0;
                // AVFrame* decodedFrame;

                if(!decodedFrame) {
                    if(!(decodedFrame = avcodec_alloc_frame())) {
                        cout << "Run out of memory, stop the streaming...\n";
                        //fflush( stdout );
                        cout.flush();


                        return -2;
                    }
                } else {
                    avcodec_get_frame_defaults(decodedFrame);
                }

                int  len = avcodec_decode_audio4(audioc, decodedFrame, &gotFrame, packet);
                if(len < 0) {
                    cout << "Error while decoding.\n";
                    cout.flush(  );
                    is->audio_pkt_size = 0;
                    return -3;
                }

                is->audio_pkt_data += len;
                is->audio_pkt_size -= len;
                if(packet->size <= 0) {
                    /* No data yet, get more frames */
                    //continue;
                }


                if(gotFrame) {
                    pts = is->audio_clock;
                    len = synchronize_audio(is, (int16_t *)is->audio_buf,
                            packet->size, pts);
                    is->audio_buf_size = packet->size;
                    pts = is->audio_clock;
                    // *pts_ptr = pts;
                    n = 2 * is->audio_st->codec->channels;
                    is->audio_clock += (double)packet->size /
                            (double)(n * is->audio_st->codec->sample_rate);
                    if(packet->pts != AV_NOPTS_VALUE) {
                        is->audio_clock = av_q2d(is->audio_st->time_base)*packet->pts;
                    }
                    len = av_samples_get_buffer_size(NULL, audioc->channels,
                            decodedFrame->nb_samples, audioc->sample_fmt, 1);
                    alBufferData(buffer, format, *decodedFrame->data, len, decodedFrame->sample_rate);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error buffering :(\n");
                        return 1;
                    }
                    alSourceQueueBuffers(source, 1, &buffer);
                    if(alGetError() != AL_NO_ERROR)
                    {
                        fprintf(stderr, "Error queueing buffers :(\n");
                        return 1;
                    }
                }





            }

            alGetSourcei(source, AL_SOURCE_STATE, &val2);
            if(val2 != AL_PLAYING)
                alSourcePlay(source);

        }


        //pic = avcodec_alloc_frame();
    }
    qDebug() << "end audiothread";
    return 1;
}

void MyApp::refreshSlot()
{


    if(true)
    {

        printf("got frame %d, %d\n", pic->width, ccontext->width);
        fflush( stdout );

        sws_scale(img_convert_ctx, (const uint8_t **)pic->data, pic->linesize,
                0, originalVideoHeight, &picrgb->data[0], &picrgb->linesize[0]);

        printf("rescaled frame %d, %d\n", newVideoWidth, newVideoHeight);
        fflush( stdout );
        //av_free_packet(packet);
        //av_init_packet(packet);

        qDebug() << "waking audio as video finished";
        ////mutex.unlock();
        //mutex2.lock();
        doingVideoFrame = false;
        //doingAudioFrame = false;
        ////mutex2.unlock();


        //mutex2.unlock();
        //w2->wakeAll();
        //w->wakeAll();
        qDebug() << "now woke audio";

        //pic = picrgb;
        uint8_t *srcy = picrgb->data[0];
        uint8_t *srcu = picrgb->data[1];
        uint8_t *srcv = picrgb->data[2];
        printf("got src yuv frame %d\n", &srcy);
        fflush( stdout );
        unsigned char *ptr = NULL;
        screen_get_buffer_property_pv(mScreenPixelBuffer, SCREEN_PROPERTY_POINTER, (void**) &ptr);
        unsigned char *y = ptr;
        unsigned char *u = y + (newVideoHeight * mStride) ;
        unsigned char *v = u + (newVideoHeight * mStride) / 4;
        int i = 0;
        printf("got buffer  picrgbwidth= %d \n", newVideoWidth);
        fflush( stdout );
        for ( i = 0; i < newVideoHeight; i++)
        {
            int doff = i * mStride;
            int soff = i * picrgb->linesize[0];
            memcpy(&y[doff], &srcy[soff], newVideoWidth);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[1];
            memcpy(&u[doff], &srcu[soff], newVideoWidth / 2);
        }

        for ( i = 0; i < newVideoHeight / 2; i++)
        {
            int doff = i * mStride / 2;
            int soff = i * picrgb->linesize[2];
            memcpy(&v[doff], &srcv[soff], newVideoWidth / 2);
        }
        printf("before posttoscreen \n");
        fflush( stdout );

        video_refresh_timer();
        qDebug() << "end refreshslot";

    }
    else
    {

    }





}

void  MyApp::refreshNeededSlot2()
    {
        printf("blitting to buffer");
        fflush(stdout);

        screen_buffer_t screen_buffer;
        screen_get_window_property_pv(mScreenWindow, SCREEN_PROPERTY_RENDER_BUFFERS, (void**) &screen_buffer);
        int attribs[] = { SCREEN_BLIT_SOURCE_WIDTH, newVideoWidth, SCREEN_BLIT_SOURCE_HEIGHT, newVideoHeight, SCREEN_BLIT_END };
        int res2 = screen_blit(mScreenCtx, screen_buffer, mScreenPixelBuffer, attribs);
        printf("dirty rectangles");
        fflush(stdout);
        int dirty_rects[] = { 0, 0, newVideoWidth, newVideoHeight };
        screen_post_window(mScreenWindow, screen_buffer, 1, dirty_rects, 0);
        printf("done screneposdtwindow");
        fflush(stdout);

    }

void MyApp::video_refresh_timer() {
    testDelay = 0;
    //  VideoState *is = ( VideoState* )userdata;
    VideoPicture *vp;
    //double pts = 0    ;
    double actual_delay, delay, sync_threshold, ref_clock, diff;

    if(is->video_st) {
        if(false)////is->pictq_size == 0)
        {
            testDelay = 1;
            schedule_refresh(is, 1);
        } else {
            // vp = &is->pictq[is->pictq_rindex];

            delay = actualPts - is->frame_last_pts; /* the pts from last time */
            if(delay <= 0 || delay >= 1.0) {
                /* if incorrect delay, use previous one */
                delay = is->frame_last_delay;
            }
            /* save for next time */
            is->frame_last_delay = delay;
            is->frame_last_pts = actualPts;

            is->video_current_pts = actualPts;
            is->video_current_pts_time = av_gettime();
            /* update delay to sync to audio */
            ref_clock = get_audio_clock(is);
            diff = actualPts - ref_clock;

            /* Skip or repeat the frame. Take delay into account
     FFPlay still doesn't "know if this is the best guess." */
            sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
            if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
                if(diff <= -sync_threshold) {
                    delay = 0;
                } else if(diff >= sync_threshold) {
                    delay = 2 * delay;
                }
            }
            is->frame_timer += delay;
            /* computer the REAL delay */
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
            if(actual_delay < 0.010) {
                /* Really it should skip the picture instead */
                actual_delay = 0.010;
            }
            testDelay = (int)(actual_delay * 1000 + 0.5);
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
            /* show the picture! */
            //video_display(is);


            // SDL_CondSignal(is->pictq_cond);
            // SDL_UnlockMutex(is->pictq_mutex);
        }
    } else {
        testDelay = 100;
        schedule_refresh(is, 100);

    }
}

void MyApp::schedule_refresh(VideoState *is, int delay) {
    qDebug() << "start schedule refresh timer" << delay;
    typeOfEvent = FF_REFRESH_EVENT2;
    w->wakeAll();
    //  SDL_AddTimer(delay,


}

現在、次の方法でループ内のデータを待機しています

QMutex mutex;
    mutex.lock();
    while(keepGoing)
    {



        qDebug() << "MAINTHREAD" << testDelay;


        w->wait(&mutex);
        mutex.unlock();
        qDebug() << "MAINTHREAD past wait";

        if(!keepGoing)
        {
            break;
        }
        if(testDelay > 0 && typeOfEvent == FF_REFRESH_EVENT2)
        {
            usleep(testDelay);
            refreshNeededSlot2();
        }
        else   if(testDelay > 0 && typeOfEvent == FF_QUIT_EVENT2)
        {
            keepGoing = false;
            exit(0);
            break;
            // usleep(testDelay);
            // refreshNeededSlot2();
        }
        qDebug() << "MAINTHREADend";
        mutex.lock();

    }
    mutex.unlock();

関連するコードをさらに提供する必要がある場合はお知らせください。コードが乱雑で申し訳ありません - 私はまだ C++ を学んでおり、前述のように 1 週​​間以上このコードを変更しています。

コンソールへの印刷出力から見た出力のサンプルを追加しました-理解できません(私の専門知識のレベルではほとんど複雑すぎます)が、フレームが再生され、オーディオが再生されているのを見ると、特にこの段階に到達するのに数週間かかったときは、あきらめるのは非常に困難でした。

問題を見つけたら、誰か手を貸してください。

syncvideo= 1073394046 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント 640 開始 video_refresh_timer actualpts = 1.66833 フレーム lastpts = 1.63497 開始スケジュール リフレッシュ タイマーは 123 の遅延が必要

syncvideo 後のポイント = 1073429033 がフレーム 640、640 を取得 リフレッシュ前の MAINTHREAD ループ遅延 = 123 video_refresh_timer の開始 actualpts = 1.7017 フレーム lastpts = 1.66833

syncvideo= 1073464021 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント、640 開始 video_refresh_timer actualpts = 1.73507 フレーム lastpts = 1.7017 開始スケジュール リフレッシュ タイマーは 140 遅延する必要がある

リフレッシュ前の MAINTHREAD ループ遅延 = 140 pts 同期ビデオ = 1073499008 取得フレーム 640、640 開始 video_refresh_timer actualpts = 1.76843 フレーム lastpts = 1.73507 スケジュール リフレッシュ タイマーの開始 163 の遅延が必要

syncvideo= 1073533996 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント、640 開始 video_refresh_timer actualpts = 1.8018 フレーム lastpts = 1.76843 開始スケジュール リフレッシュ タイマーは 188 遅延する必要がある

リフレッシュ前の MAINTHREAD ループ遅延 = 188 pts 同期ビデオ = 1073568983 取得フレーム 640、640 開始 video_refresh_timer actualpts = 1.83517 フレーム lastpts = 1.8018 開始スケジュール リフレッシュ タイマーは 246 遅延する必要がある

syncvideo= 1073603971 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント、640 開始 video_refresh_timer actualpts = 1.86853 フレーム lastpts = 1.83517 開始スケジュール リフレッシュ タイマーは 299 の遅延が必要

リフレッシュ前の MAINTHREAD ループ遅延 = 299 pts 同期ビデオ = 1073638958 取得フレーム 640、640 開始 video_refresh_timer actualpts = 1.9019 フレーム lastpts = 1.86853 開始スケジュール リフレッシュ タイマーは 358 遅延する必要がある

syncvideo= 1073673946 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント 640 開始 video_refresh_timer actualpts = 1.93527 フレーム lastpts = 1.9019 開始スケジュール リフレッシュ タイマーは 416 遅延する必要がある

リフレッシュ前の MAINTHREAD ループ遅延 = 416 pts 同期ビデオ = 1073708933 取得フレーム 640、640 開始 video_refresh_timer actualpts = 1.96863 フレーム lastpts = 1.93527 スケジュール リフレッシュ タイマーの開始 474 の遅延が必要

syncvideo= 1073742872 がフレーム 640、640 を取得した後の MAINTHREAD 過去の待機ポイント リフレッシュ前の MAINTHREAD ループ遅延 = 474 video_refresh_timer の開始 actualpts = 2.002 フレーム lastpts = 1.96863 スケジュール リフレッシュ タイマーの開始 518 の遅延が必要

syncvideo= 1073760366 がフレーム 640 を取得した後の MAINTHREAD 過去の待機ポイント、640 開始 video_refresh_timer actualpts = 2.03537 フレーム lastpts = 2.002 開始スケジュール リフレッシュ タイマーは 575 遅延する必要がある

4

0 に答える 0