在之前篇中的解封装的最后,把音视频的压缩数据分别放入了视频和音频的队列中,上篇中展示了视频的播放流程,本篇展示音频的播放过程。
音频的播放主要是采用OpenSLES进行的。OpenSL ES 是无授权费、跨平台、针对嵌入式系统精心优化的硬件音频加速API。该库都允许使用C或C ++来实现高性能,低延迟的音频操作。 Android的OpenSL ES库同样位于NDK的platforms文件夹内。
配置 CMake中增加的配置 1 2 3 4 5 target_link_libraries ( native-lib .... openSLES )
引入openSLES的头文件 1 2 #include <SLES/OpenSLES.h> #include <SLES/OpenSLES_Android.h>
相关的结构体 1 2 3 4 5 6 7 8 9 10 11 12 SLObjectItf engineObject = 0 ; SLEngineItf engineInterface = 0 ; SLObjectItf outputMixObject = 0 ; SLObjectItf bqPlayerObject = 0 ; SLPlayItf bqPlayerPlay = 0 ; SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue = 0 ;
重采样相关的函数 1> swr_alloc:申请内存
2> swr_alloc_set_opts:设置参数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 @param SwrContext *s:音频重采样上下文 @param int64_t out_ch_layout:输出的声道样式 @param AVSampleFormat out_sample_fmt:输出的采样格式 @param int out_sample_rate:输出采样率 @param int64_t in_ch_layout:输入的声道样式 @param AVSampleFormat in_sample_fmt:输入的采样格式 @param int in_sample_rate:输入的采样率 @param int log_offset, void *log_ctx:日志参数
3> swr_init:初始化
4> swr_convert:转换
1 2 3 4 5 @param SwrContext *s:音频重采样上下文 @param uint8_t **out, int out_count:输出的数据和单通道数据大小 @param uint8_t **in, int in_count:输出的数据和单通道数据大小
5> swr_free:释放
音频的解码流程 跟视频一样,同样是两个队列两个线程
两个队列
存放压缩数据的队列 queue<AVPacket *>
存放解码后原始数据的队列 queue<AVFrame *>
两个线程
第一个线程是取出队列的压缩包 进行解码,解码后的原始包 丢到frame对别中去
第二个线程是从frame队列中取出原始包进行播放
1 2 3 4 5 6 7 8 9 10 11 12 void AudioChannel::start () { isPlaying = true ; packets.setWork (1 ); frames.setWork (1 ); pthread_create (&pid_audio_decode, nullptr , task_audio_decode, this ); pthread_create (&pid_audio_play, nullptr , task_audio_play, this ); }
音频:取出队列的压缩包 进行解码 解码后的原始包 再push队列中去 (音频:PCM数据)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 void AudioChannel::audio_decode () { AVPacket *packet = nullptr ; while (isPlaying){ int res = packets.getQueueAndDel (packet); if (!isPlaying){ releaseAVPacket (&packet); break ; } if (!res){ continue ; } res = avcodec_send_packet (codecContext, packet); if (res){ LOGE ("audio channel decode AVPacket fail !!!" ) releaseAVPacket (&packet); break ; } AVFrame *frame = av_frame_alloc (); res = avcodec_receive_frame (codecContext, frame); if (res == AVERROR (EAGAIN)){ continue ; }else if (res != 0 ){ LOGE ("audio channel receive AVFrame fail !!!" ) releaseAVPacket (&packet); break ; } if (isPlaying){ frames.insert (frame); }else { releaseAVFrame (&frame); } releaseAVPacket (&packet); } releaseAVPacket (&packet); }
重采样播放 1.创建引擎对象并获取【引擎接口】 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 result = slCreateEngine (&engineObject, 0 , nullptr , 0 , nullptr , nullptr ); if (SL_RESULT_SUCCESS != result){ LOGE ("slCreateEngine error!" ); return ; } (*engineObject)->Realize (engineObject, SL_BOOLEAN_FALSE); result = (*engineObject)->GetInterface (engineObject, SL_IID_ENGINE, &engineInterface); if (SL_RESULT_SUCCESS != result) { LOGE ("engineObject Realize error" ); return ; } if (engineInterface) { LOGD ("engineInterface create success" ); } else { LOGE ("engineInterface create error" ); return ;
2.设置混音器 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 result = (*engineInterface)->CreateOutputMix (engineInterface, &outputMixObject, 0 , nullptr , nullptr ); if (SL_RESULT_SUCCESS != result) { LOGE ("CreateOutputMix failed" ); return ; } result = (*outputMixObject)->Realize (outputMixObject, SL_BOOLEAN_FALSE); if (SL_RESULT_SUCCESS != result) { LOGE ("(*outputMixObject)->Realize failed" ); return ; } LOGI ("outputMixObject Success" );
3.创建播放器 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 10 }; SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 2 , SL_SAMPLINGRATE_44_1, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN}; SLDataSource audioSrc = {&loc_bufq, &format_pcm}; SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject}; SLDataSink audioSnk = {&loc_outmix, nullptr }; const SLInterfaceID ids[1 ] = {SL_IID_BUFFERQUEUE};const SLboolean req[1 ] = {SL_BOOLEAN_TRUE};result = (*engineInterface)->CreateAudioPlayer (engineInterface, &bqPlayerObject, &audioSrc, &audioSnk, 1 , ids, req ); if (SL_RESULT_SUCCESS != result) { LOGE ("CreateAudioPlayer failed!" ); return ; } result = (*bqPlayerObject)->Realize (bqPlayerObject, SL_BOOLEAN_FALSE); if (SL_RESULT_SUCCESS != result) { LOGE ("init bqPlayerObject Realize failed!" ); return ; } LOGI ("bqPlayerObject init success!" );result = (*bqPlayerObject)->GetInterface (bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay); if (SL_RESULT_SUCCESS != result) { LOGE ("bqPlayerObject GetInterface SL_IID_PLAY failed!" ); return ; } LOGI ("bqPlayerObject GetInterface Success" );
4.设置回调函数 1 2 3 4 5 6 7 8 9 10 11 12 result = (*bqPlayerObject)->GetInterface (bqPlayerObject, SL_IID_BUFFERQUEUE, &bqPlayerBufferQueue); if (result != SL_RESULT_SUCCESS) { LOGE ("bqPlayerBufferQueue GetInterface SL_IID_BUFFERQUEUE failed!" ); return ; } (*bqPlayerBufferQueue)->RegisterCallback (bqPlayerBufferQueue, bqPlayerCallback, this ); LOGI ("setting playCallback Success" );
5.设置播放器状态为播放状态 1 2 (*bqPlayerPlay)->SetPlayState (bqPlayerPlay, SL_PLAYSTATE_PLAYING); LOGI ("SetPlayState Success" );
6.手动激活回调函数 1 2 bqPlayerCallback (bqPlayerBufferQueue, this );LOGI ("active playCallback Success" );
7.重采样 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 int AudioChannel::getPCM () { int pcm_data_size = 0 ; AVFrame *frame = nullptr ; while (isPlaying) { int ret = frames.getQueueAndDel (frame); if (!isPlaying) { break ; } if (!ret) { continue ; } int dst_nb_samples = av_rescale_rnd (swr_get_delay (swr_ctx, frame->sample_rate) + frame->nb_samples, out_sample_rate, frame->sample_rate, AV_ROUND_UP); int samples_per_channel = swr_convert (swr_ctx, &out_buffers, dst_nb_samples, (const uint8_t **) frame->data, frame->nb_samples); pcm_data_size = samples_per_channel * out_sample_size * out_channels; audio_time = frame->best_effort_timestamp * av_q2d (time_base); break ; } releaseAVFrame (&frame); return pcm_data_size; }
源码地址:https://github.com/jiajunhui/ffmpeg-jjhplayer