开发者社区> 问答> 正文

ffmpeg 加码cook 噪音大? 400 报错

ffmpeg 加码cook 噪音大? 400 报错

	if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
// Set audio settings from codec info
wanted_spec.freq = codecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = codecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = is;

不知道这段代码出错没

我用ffmpeg类库开发了一个软件 音频用的自动解码       解码其他的音频没有问题 唯独cook音频出问题出现很大噪音

 

展开
收起
爱吃鱼的程序员 2020-06-05 13:33:36 527 0
1 条回答
写回答
取消 提交回答
  • https://developer.aliyun.com/profile/5yerqm5bn5yqg?spm=a2c6h.12873639.0.0.6eae304abcjaIB

    你发的这些东西看不出问题, 这只不过是设置SDL音频播放参数和回调接口而已.
    得看看你是如何解码的, 有没有把解码出来的数据弄丢, 不然是不会有噪音的...

    ######我把源码发上来了 你帮我看一下 拜托了######回复 @bruce_hou : 没看到你的代码, 我也不知道怎么回事.######具体怎么处理啊 能不能详细的说一下######我把源码发上来
    #include "libavformat/avformat.h"  
    #include "libswscale/swscale.h"  
      
    #include <SDL/SDL.h>  
    #include <SDL/SDL_thread.h>  
      
    #ifdef main  
    #undef main   
    #endif  
      
    #include <stdio.h>  
    #include <math.h>  
      
    #define SDL_AUDIO_BUFFER_SIZE 1024  
      
    #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)  
    #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)  
      
    #define AV_SYNC_THRESHOLD 0.01  
    #define AV_NOSYNC_THRESHOLD 10.0  
      
    #define FF_ALLOC_EVENT   (SDL_USEREVENT)  
    #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)  
    #define FF_QUIT_EVENT (SDL_USEREVENT + 2)  
      
    #define VIDEO_PICTURE_QUEUE_SIZE 1  
      
    typedef struct PacketQueue  
    {  
        AVPacketList *first_pkt, *last_pkt;  
        int nb_packets;  
        int size;  
        SDL_mutex *mutex;  
        SDL_cond *cond;  
    } PacketQueue;  
      
    typedef struct VideoPicture  
    {  
        SDL_Overlay *bmp;  
        int width, height;   
        int allocated;  
        double pts;  
    } VideoPicture;  
      
    typedef struct VideoState  
    {  
        AVFormatContext *pFormatCtx;  
        int             videoStream, audioStream;  
        double          audio_clock;  
        AVStream        *audio_st;  
        PacketQueue     audioq;  
        uint8_t         audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];  
        unsigned int    audio_buf_size;  
        unsigned int    audio_buf_index;  
        AVPacket        audio_pkt;  
        uint8_t         *audio_pkt_data;  
        int             audio_pkt_size;  
        int             audio_hw_buf_size;  
        double          frame_timer;  
        double          frame_last_pts;  
        double          frame_last_delay;  
        double          video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame  
        AVStream        *video_st;  
        PacketQueue     videoq;  
      
        VideoPicture    pictq[VIDEO_PICTURE_QUEUE_SIZE];  
        int             pictq_size, pictq_rindex, pictq_windex;  
        SDL_mutex       *pictq_mutex;  
        SDL_cond        *pictq_cond;  
        SDL_Thread      *parse_tid;  
        SDL_Thread      *video_tid;  
      
        char            filename[1024];  
        int             quit;  
    } VideoState;  
      
    SDL_Surface     *screen;  
      
      
    VideoState *global_video_state;  
      
    void packet_queue_init(PacketQueue *q)  
    {  
        memset(q, 0, sizeof(PacketQueue));  
        q->mutex = SDL_CreateMutex();  
        q->cond = SDL_CreateCond();  
    }  
    int packet_queue_put(PacketQueue *q, AVPacket *pkt)  
    {  
        AVPacketList *pkt1;  
        if(av_dup_packet(pkt) < 0)  
        {  
            return -1;  
        }  
        pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));  
        if (!pkt1)  
            return -1;  
        pkt1->pkt = *pkt;  
        pkt1->next = NULL;  
      
        SDL_LockMutex(q->mutex);  
      
        if (!q->last_pkt)  
            q->first_pkt = pkt1;  
        else  
            q->last_pkt->next = pkt1;  
        q->last_pkt = pkt1;  
        q->nb_packets++;  
        q->size += pkt1->pkt.size;  
        SDL_CondSignal(q->cond);  
      
        SDL_UnlockMutex(q->mutex);  
        return 0;  
    }  
      
    static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)  
    {  
        AVPacketList *pkt1;  
        int ret;  
      
        SDL_LockMutex(q->mutex);  
      
        for(;;)  
        {  
            if(global_video_state->quit)  
            {  
                ret = -1;  
                break;  
            }  
      
            pkt1 = q->first_pkt;  
            if (pkt1)  
            {  
                q->first_pkt = pkt1->next;  
                if (!q->first_pkt)  
                    q->last_pkt = NULL;  
                q->nb_packets--;  
                q->size -= pkt1->pkt.size;  
                *pkt = pkt1->pkt;  
                av_free(pkt1);  
                ret = 1;  
                break;  
            }  
            else if (!block)  
            {  
                ret = 0;  
                break;  
            }  
            else  
            {  
                SDL_CondWait(q->cond, q->mutex);  
            }  
        }  
        SDL_UnlockMutex(q->mutex);  
        return ret;  
    }  
    double get_audio_clock(VideoState *is)  
    {  
        double pts;  
        int hw_buf_size, bytes_per_sec, n;  
      
        pts = is->audio_clock;   
        hw_buf_size = is->audio_buf_size - is->audio_buf_index;  
        bytes_per_sec = 0;  
        n = is->audio_st->codec->channels * 2;  
        if(is->audio_st)  
        {  
            bytes_per_sec = is->audio_st->codec->sample_rate * n;  
        }  
        if(bytes_per_sec)  
        {  
            pts -= (double)hw_buf_size / bytes_per_sec;  
        }  
        return pts;  
    }  
      
    int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)  
    {  
        int len1, data_size, n;  
        AVPacket *pkt = &is->audio_pkt;  
        double pts;  
      
        for(;;)  
        {  
            while(is->audio_pkt_size > 0)  
            {  
                data_size = buf_size;  
                len1 = avcodec_decode_audio2(is->audio_st->codec,  
                                             (int16_t *)audio_buf, &data_size,  
                                             is->audio_pkt_data, is->audio_pkt_size);  
                if(len1 < 0)  
                {  
                      
                    is->audio_pkt_size = 0;  
                    break;  
                }  
                is->audio_pkt_data += len1;  
                is->audio_pkt_size -= len1;  
                if(data_size <= 0)  
                {  
                      
                    continue;  
                }  
                pts = is->audio_clock;  
                *pts_ptr = pts;  
                n = 2 * is->audio_st->codec->channels;  
                is->audio_clock += (double)data_size /  
                                   (double)(n * is->audio_st->codec->sample_rate);  
      
                  
                return data_size;  
            }  
            if(pkt->data)  
                av_free_packet(pkt);  
      
            if(is->quit)  
            {  
                return -1;  
            }  
              
            if(packet_queue_get(&is->audioq, pkt, 1) < 0)  
            {  
                return -1;  
            }  
            is->audio_pkt_data = pkt->data;  
            is->audio_pkt_size = pkt->size;  
              
            if(pkt->pts != AV_NOPTS_VALUE)  
            {  
                is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;  
            }  
        }  
    }  
      
    void audio_callback(void *userdata, Uint8 *stream, int len)  
    {  
        VideoState *is = (VideoState *)userdata;  
        int len1, audio_size;  
        double pts;  
      
        while(len > 0)  
        {  
            if(is->audio_buf_index >= is->audio_buf_size)  
            {  
                  
                audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);  
                if(audio_size < 0)  
                {  
                      
                    is->audio_buf_size = 1024;  
                    memset(is->audio_buf, 0, is->audio_buf_size);  
                }  
                else  
                {  
                    is->audio_buf_size = audio_size;  
                }  
                is->audio_buf_index = 0;  
            }  
            len1 = is->audio_buf_size - is->audio_buf_index;  
            if(len1 > len)  
                len1 = len;  
            memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);  
            len -= len1;  
            stream += len1;  
            is->audio_buf_index += len1;  
        }  
    }  
      
    static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)  
    {  
        SDL_Event event;  
        event.type = FF_REFRESH_EVENT;  
        event.user.data1 = opaque;  
        SDL_PushEvent(&event);  
        return 0;   
    }  
      
      
    static void schedule_refresh(VideoState *is, int delay)  
    {  
        SDL_AddTimer(delay, sdl_refresh_timer_cb, is);  
    }  
      
    void video_display(VideoState *is)  
    {  
        SDL_Rect rect;  
        VideoPicture *vp;  
        AVPicture pict;  
        float aspect_ratio;  
        int w, h, x, y;  
        int i;  
      
        vp = &is->pictq[is->pictq_rindex];  
        if(vp->bmp)  
        {  
            if(is->video_st->codec->sample_aspect_ratio.num == 0)  
            {  
                aspect_ratio = 0;  
            }  
            else  
            {  
                aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *  
                               is->video_st->codec->width / is->video_st->codec->height;  
            }  
            if(aspect_ratio <= 0.0)  
            {  
                aspect_ratio = (float)is->video_st->codec->width /  
                               (float)is->video_st->codec->height;  
            }  
            h = screen->h;  
            w = ((int)(h * aspect_ratio)) & -3;  
            if(w > screen->w)  
            {  
                w = screen->w;  
                h = ((int)(w / aspect_ratio)) & -3;  
            }  
            x = (screen->w - w) / 2;  
            y = (screen->h - h) / 2;  
      
            rect.x = x;  
            rect.y = y;  
            rect.w = w;  
            rect.h = h;  
            SDL_DisplayYUVOverlay(vp->bmp, &rect);  
        }  
    }  
      
    void video_refresh_timer(void *userdata)  
    {  
        VideoState *is = (VideoState *)userdata;  
        VideoPicture *vp;  
        double actual_delay, delay, sync_threshold, ref_clock, diff;  
      
        if(is->video_st)  
        {  
            if(is->pictq_size == 0)  
            {  
                schedule_refresh(is, 1);  
            }  
            else  
            {  
                vp = &is->pictq[is->pictq_rindex];  
      
                delay = vp->pts - is->frame_last_pts;   
                if(delay <= 0 || delay >= 1.0)  
                {  
                      
                    delay = is->frame_last_delay;  
                }  
                  
                is->frame_last_delay = delay;  
                is->frame_last_pts = vp->pts;  
      
                  
                ref_clock = get_audio_clock(is);  
                diff = vp->pts - ref_clock;  
      
                  
                sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;  
                if(fabs(diff) < AV_NOSYNC_THRESHOLD)  
                {  
                    if(diff <= -sync_threshold)  
                    {  
                        delay = 0;  
                    }  
                    else if(diff >= sync_threshold)  
                    {  
                        delay = 2 * delay;  
                    }  
                }  
                is->frame_timer += delay;  
                  
                actual_delay = is->frame_timer - (av_gettime() / 1000000.0);  
                if(actual_delay < 0.010)  
                {  
                      
                    actual_delay = 0.010;  
                }  
                schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));  
                  
                video_display(is);  
      
                  
                if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)  
                {  
                    is->pictq_rindex = 0;  
                }  
                SDL_LockMutex(is->pictq_mutex);  
                is->pictq_size--;  
                SDL_CondSignal(is->pictq_cond);  
                SDL_UnlockMutex(is->pictq_mutex);  
            }  
        }  
        else  
        {  
            schedule_refresh(is, 100);  
        }  
    }  
      
    void alloc_picture(void *userdata)  
    {  
        VideoState *is = (VideoState *)userdata;  
        VideoPicture *vp;  
      
        vp = &is->pictq[is->pictq_windex];  
        if(vp->bmp)  
        {  
            // we already have one make another, bigger/smaller  
            SDL_FreeYUVOverlay(vp->bmp);  
        }  
        // Allocate a place to put our YUV image on that screen  
        vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,  
                                       is->video_st->codec->height,  
                                       SDL_YV12_OVERLAY,  
                                       screen);  
        vp->width = is->video_st->codec->width;  
        vp->height = is->video_st->codec->height;  
      
        SDL_LockMutex(is->pictq_mutex);  
        vp->allocated = 1;  
        SDL_CondSignal(is->pictq_cond);  
        SDL_UnlockMutex(is->pictq_mutex);  
    }  
      
    int queue_picture(VideoState *is, AVFrame *pFrame, double pts)  
    {  
        VideoPicture *vp;  
        //int dst_pix_fmt;  
        AVPicture pict;  
      
          
        SDL_LockMutex(is->pictq_mutex);  
        while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&  
                !is->quit)  
        {  
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);  
        }  
        SDL_UnlockMutex(is->pictq_mutex);  
      
        if(is->quit)  
            return -1;  
      
        // windex is set to 0 initially  
        vp = &is->pictq[is->pictq_windex];  
      
          
        if(!vp->bmp ||  
                vp->width != is->video_st->codec->width ||  
                vp->height != is->video_st->codec->height)  
        {  
            SDL_Event event;  
      
            vp->allocated = 0;  
              
            event.type = FF_ALLOC_EVENT;  
            event.user.data1 = is;  
            SDL_PushEvent(&event);  
      
              
            SDL_LockMutex(is->pictq_mutex);  
            while(!vp->allocated && !is->quit)  
            {  
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);  
            }  
            SDL_UnlockMutex(is->pictq_mutex);  
            if(is->quit)  
            {  
                return -1;  
            }  
        }  
          
          
        static struct SwsContext *img_convert_ctx;  
        if (img_convert_ctx == NULL)  
        {  
            img_convert_ctx = sws_getContext(is->video_st->codec->width, is->video_st->codec->height,  
                                             is->video_st->codec->pix_fmt,  
                                             is->video_st->codec->width, is->video_st->codec->height,  
                                             PIX_FMT_YUV420P,  
                                             SWS_BICUBIC, NULL, NULL, NULL);  
            if (img_convert_ctx == NULL)  
            {  
                fprintf(stderr, "Cannot initialize the conversion context\n");  
                exit(1);  
            }  
        }  
      
        if(vp->bmp)  
        {  
            SDL_LockYUVOverlay(vp->bmp);  
      
            //dst_pix_fmt = PIX_FMT_YUV420P;  
              
      
            pict.data[0] = vp->bmp->pixels[0];  
            pict.data[1] = vp->bmp->pixels[2];  
            pict.data[2] = vp->bmp->pixels[1];  
      
            pict.linesize[0] = vp->bmp->pitches[0];  
            pict.linesize[1] = vp->bmp->pitches[2];  
            pict.linesize[2] = vp->bmp->pitches[1];  
      
            // Convert the image into YUV format that SDL uses  
              
            sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,  
                              0, is->video_st->codec->height, pict.data, pict.linesize);  
            SDL_UnlockYUVOverlay(vp->bmp);  
            vp->pts = pts;  
      
              
            if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)  
            {  
                is->pictq_windex = 0;  
            }  
            SDL_LockMutex(is->pictq_mutex);  
            is->pictq_size++;  
            SDL_UnlockMutex(is->pictq_mutex);  
        }  
        return 0;  
    }  
      
    double synchronize_video(VideoState *is, AVFrame *src_frame, double pts)  
    {  
        double frame_delay;  
      
        if(pts != 0)  
        {  
              
            is->video_clock = pts;  
        }  
        else  
        {  
              
            pts = is->video_clock;  
        }  
          
        frame_delay = av_q2d(is->video_st->codec->time_base);  
          
        frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);  
        is->video_clock += frame_delay;  
        return pts;  
    }  
    uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;  
      
      
    int our_get_buffer(struct AVCodecContext *c, AVFrame *pic)  
    {  
        int ret = avcodec_default_get_buffer(c, pic);  
        uint64_t *pts = (uint64_t *)av_malloc(sizeof(uint64_t));  
        *pts = global_video_pkt_pts;  
        pic->opaque = pts;  
        return ret;  
    }  
    void our_release_buffer(struct AVCodecContext *c, AVFrame *pic)  
    {  
        if(pic) av_freep(&pic->opaque);  
        avcodec_default_release_buffer(c, pic);  
    }  
      
    int video_thread(void *arg)  
    {  
        VideoState *is = (VideoState *)arg;  
        AVPacket pkt1, *packet = &pkt1;  
        int len1, frameFinished;  
        AVFrame *pFrame;  
        double pts;  
      
        pFrame = avcodec_alloc_frame();  
      
        for(;;)  
        {  
            if(packet_queue_get(&is->videoq, packet, 1) < 0)  
            {  
                // means we quit getting packets  
                break;  
            }  
            pts = 0;  
      
            // Save global pts to be stored in pFrame in first call  
            global_video_pkt_pts = packet->pts;  
            // Decode video frame  
            len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished,  
                                        packet->data, packet->size);  
            if(packet->dts == AV_NOPTS_VALUE  
                    && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)  
            {  
                pts = *(uint64_t *)pFrame->opaque;  
            }  
            else if(packet->dts != AV_NOPTS_VALUE)  
            {  
                pts = packet->dts;  
            }  
            else  
            {  
                pts = 0;  
            }  
            pts *= av_q2d(is->video_st->time_base);  
      
            // Did we get a video frame?  
            if(frameFinished)  
            {  
                pts = synchronize_video(is, pFrame, pts);  
                if(queue_picture(is, pFrame, pts) < 0)  
                {  
                    break;  
                }  
            }  
            av_free_packet(packet);  
        }  
        av_free(pFrame);  
        return 0;  
    }  
      
    int stream_component_open(VideoState *is, int stream_index)  
    {  
        AVFormatContext *pFormatCtx = is->pFormatCtx;  
        AVCodecContext *codecCtx;  
        AVCodec *codec;  
        SDL_AudioSpec wanted_spec, spec;  
      
        if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams)  
        {  
            return -1;  
        }  
      
        // Get a pointer to the codec context for the video stream  
        codecCtx = pFormatCtx->streams[stream_index]->codec;  
      
        if(codecCtx->codec_type == CODEC_TYPE_AUDIO)  
        {  
            // Set audio settings from codec info  
            wanted_spec.freq = codecCtx->sample_rate;  
            wanted_spec.format = AUDIO_S16SYS;  
            wanted_spec.channels = codecCtx->channels;  
            wanted_spec.silence = 0;  
            wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;  
            wanted_spec.callback = audio_callback;  
            wanted_spec.userdata = is;  
      
            if(SDL_OpenAudio(&wanted_spec, &spec) < 0)  
            {  
                fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());  
                return -1;  
            }  
            is->audio_hw_buf_size = spec.size;  
        }  
        codec = avcodec_find_decoder(codecCtx->codec_id);  
      
        if(!codec || (avcodec_open(codecCtx, codec) < 0))  
        {  
            fprintf(stderr, "Unsupported codec!\n");  
            return -1;  
        }  
      
        switch(codecCtx->codec_type)  
        {  
        case CODEC_TYPE_AUDIO:  
            is->audioStream = stream_index;  
            is->audio_st = pFormatCtx->streams[stream_index];  
            is->audio_buf_size = 0;  
            is->audio_buf_index = 0;  
            memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));  
            packet_queue_init(&is->audioq);  
            SDL_PauseAudio(0);  
            break;  
        case CODEC_TYPE_VIDEO:  
            is->videoStream = stream_index;  
            is->video_st = pFormatCtx->streams[stream_index];  
      
            is->frame_timer = (double)av_gettime() / 1000000.0;  
            is->frame_last_delay = 40e-3;  
      
            packet_queue_init(&is->videoq);  
            is->video_tid = SDL_CreateThread(video_thread, is);  
            codecCtx->get_buffer = our_get_buffer;  
            codecCtx->release_buffer = our_release_buffer;  
            break;  
        default:  
            break;  
        }  
        return 0;  
    }  
      
    int decode_interrupt_cb(void)  
    {  
        return (global_video_state && global_video_state->quit);  
    }  
      
    int decode_thread(void *arg)  
    {  
      
        VideoState *is = (VideoState *)arg;  
        AVFormatContext *pFormatCtx;  
        AVPacket pkt1, *packet = &pkt1;  
      
        int video_index = -1;  
        int audio_index = -1;  
        int i;  
      
        is->videoStream=-1;  
        is->audioStream=-1;  
      
        global_video_state = is;  
        // will interrupt blocking functions if we quit!  
        url_set_interrupt_cb(decode_interrupt_cb);  
      
        // Open video file  
        if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)  
            return -1; // Couldn't open file  
      
        is->pFormatCtx = pFormatCtx;  
      
        // Retrieve stream information  
        if(av_find_stream_info(pFormatCtx)<0)  
            return -1; // Couldn't find stream information  
      
        // Dump information about file onto standard error  
        dump_format(pFormatCtx, 0, is->filename, 0);  
      
        // Find the first video stream  
      
        for(i=0; i<pFormatCtx->nb_streams; i++)  
        {  
            if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&  
                    video_index < 0)  
            {  
                video_index=i;  
            }  
            if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&  
                    audio_index < 0)  
            {  
                audio_index=i;  
            }  
        }  
        if(audio_index >= 0)  
        {  
            stream_component_open(is, audio_index);  
        }  
        if(video_index >= 0)  
        {  
            stream_component_open(is, video_index);  
        }  
      
        if(is->videoStream < 0 || is->audioStream < 0)  
        {  
            fprintf(stderr, "%s: could not open codecs\n", is->filename);  
            goto fail;  
        }  
      
        // main decode loop  
      
        for(;;)  
        {  
            if(is->quit)  
            {  
                break;  
            }  
            // seek stuff goes here  
            if(is->audioq.size > MAX_AUDIOQ_SIZE ||  
                    is->videoq.size > MAX_VIDEOQ_SIZE)  
            {  
                SDL_Delay(10);  
                continue;  
            }  
            if(av_read_frame(is->pFormatCtx, packet) < 0)  
            {  
                if(url_ferror(pFormatCtx->pb) == 0)  
                {  
                    SDL_Delay(100);   
                    continue;  
                }  
                else  
                {  
                    break;  
                }  
            }  
            // Is this a packet from the video stream?  
            if(packet->stream_index == is->videoStream)  
            {  
                packet_queue_put(&is->videoq, packet);  
            }  
            else if(packet->stream_index == is->audioStream)  
            {  
                packet_queue_put(&is->audioq, packet);  
            }  
            else  
            {  
                av_free_packet(packet);  
            }  
        }  
          
        while(!is->quit)  
        {  
            SDL_Delay(100);  
        }  
      
    fail:  
        {  
            SDL_Event event;  
            event.type = FF_QUIT_EVENT;  
            event.user.data1 = is;  
            SDL_PushEvent(&event);  
        }  
        return 0;  
    }  
      
    int main(int argc, char *argv[])  
    {  
      
        SDL_Event       event;  
        VideoState      *is;  
        is = (VideoState *)av_mallocz(sizeof(VideoState));  
        if(argc < 2)  
        {  
            fprintf(stderr, "Usage: test <file>\n");  
            exit(1);  
        }  
        // Register all formats and codecs  
        av_register_all();  
        if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))  
        {  
            fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());  
            exit(1);  
        }  
      
        // Make a screen to put our video  
    #ifndef __DARWIN__  
        screen = SDL_SetVideoMode(640, 480, 0, 0);  
    #else  
        screen = SDL_SetVideoMode(640, 480, 24, 0);  
    #endif  
        if(!screen)  
        {  
            fprintf(stderr, "SDL: could not set video mode - exiting\n");  
            exit(1);  
        }  
      
        //pstrcpy(is->filename, sizeof(is->filename), argv[1]);  
        strcpy(is->filename,argv[1]);  
        is->pictq_mutex = SDL_CreateMutex();  
        is->pictq_cond = SDL_CreateCond();  
      
        schedule_refresh(is, 40);  
      
        is->parse_tid = SDL_CreateThread(decode_thread, is);  
        if(!is->parse_tid)  
        {  
            av_free(is);  
            return -1;  
        }  
        for(;;)  
        {  
            SDL_WaitEvent(&event);  
            switch(event.type)  
            {  
            case FF_QUIT_EVENT:  
            case SDL_QUIT:  
                is->quit = 1;  
                SDL_Quit();  
                exit(0);  
                break;  
            case FF_ALLOC_EVENT:  
                alloc_picture(event.user.data1);  
                break;  
            case FF_REFRESH_EVENT:  
                video_refresh_timer(event.user.data1);  
                break;  
            default:  
                break;  
            }  
        }  
        return 0;  
    } 

    你帮我看一下

    ######你有没有最新的ffmpeg转码示范代码啊######回复 @Jack.arain : 给我一个示范代码好不,这上面的代码是一个示范代码,我把 avcodec_decode_audio3,######看了下代码, 不知道你用的是哪个版本的ffmpeg, 但你这个代码是非常老的了, 另外, 在ffmpeg后期的版本中, avcodec_decode_audio函数可能解码出来的数据要比实际的数据要长, 需要使用av_samples_get_buffer_size来获得实际大小.######

    C++我不懂,不过音视频解码的内核我写过不少。如果内核解别的不出错,那么噪音基本来源于输出的音频PCM的BUF不干净或者输入的频段信息有缺失,某个通道没有数据进入。前者你的输出BUF可能被别的代码(包括解码本身)写操作过。你查下有没有内存泄露的问题。后者,是你的输入不全的问题。更大概率是后者。前者要定期加入噪音的概率还是比较小。

    这类问题的检测方法是外围做最简单的调用。验证调用方法和内核模块都没有问题。然后再堆叠其他系统模块上去。
    ######的确是旧的代码,现在新的都用上了avcodec_decode_audio4了,你的ffmpeg库是什么版本的,新的版本有libswresample,可以帮你的音频做重采样,这样播放出来就不会有噪声了,如果想看事例代码,就看你的ffmpeg源码里的ffplay.c吧,里面的接口都和你的ffmpeg对应的,看看人家是怎么处理音频的,希望对你有用。
    2020-06-05 13:33:51
    赞同 展开评论 打赏
问答分类:
问答地址:
问答排行榜
最热
最新

相关电子书

更多
营销互动保障 - 捉猫猫在未知中前行 立即下载
营销互动保障-捉猫猫在未知中前行 立即下载
驾驭时空中国⾸辆⾃动驾驶低速电动⻋发布 立即下载