首页 技术 正文
技术 2022年11月20日
0 收藏 768 点赞 4,238 浏览 12194 个字

其实这篇的内容和(一)用ffmpeg解码视频基本是一样的,重点还是给ffmpeg指定callback函数,而这个函数是从RTSP服务端那里获取音频数据的。

这里,解码音频的示例代码量之所以比解码视频的略微复杂,主要是因为ffmpeg解码音频时要比解码视频要复杂一点,具体可以参见ffmpeg解码音频示例以及官网示例代码

具体内容将不再赘述,源码如下:

 extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
} #include <SDL.h>
#include <SDL_thread.h> #ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif #include <stdio.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h> // compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif #define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000 #include <signal.h>
#include "rtspClient.h"
#include <iostream>
#include <string> using std::cout;
using std::endl; int rtspClientRequest(RtspClient * Client, string url);
int fill_iobuffer(void * opaque, uint8_t * buf, int bufsize); typedef struct AudioParams {
int freq;
int channels;
int64_t channel_layout;
enum AVSampleFormat fmt;
int frame_size;
int bytes_per_sec;
} AudioParams;
int sample_rate, nb_channels;
int64_t channel_layout;
AudioParams audio_hw_params_tgt;
AudioParams audio_hw_params_src; int resample(AVFrame * af, uint8_t * audio_buf, int * audio_buf_size); struct SwrContext * swr_ctx = NULL; int resample(AVFrame * af, uint8_t * audio_buf, int * audio_buf_size)
{
int data_size = ;
int resampled_data_size = ;
int64_t dec_channel_layout;
data_size = av_samples_get_buffer_size(NULL,
av_frame_get_channels(af),
af->nb_samples,
(AVSampleFormat)af->format,
); dec_channel_layout =
(af->channel_layout && av_frame_get_channels(af) == av_get_channel_layout_nb_channels(af->channel_layout)) ?
af->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af));
if( af->format != audio_hw_params_src.fmt ||
af->sample_rate != audio_hw_params_src.freq ||
dec_channel_layout != audio_hw_params_src.channel_layout ||
!swr_ctx) {
swr_free(&swr_ctx);
swr_ctx = swr_alloc_set_opts(NULL,
audio_hw_params_tgt.channel_layout, (AVSampleFormat)audio_hw_params_tgt.fmt, audio_hw_params_tgt.freq,
dec_channel_layout, (AVSampleFormat)af->format, af->sample_rate,
, NULL);
if (!swr_ctx || swr_init(swr_ctx) < ) {
av_log(NULL, AV_LOG_ERROR,
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
af->sample_rate, av_get_sample_fmt_name((AVSampleFormat)af->format), av_frame_get_channels(af),
audio_hw_params_tgt.freq, av_get_sample_fmt_name(audio_hw_params_tgt.fmt), audio_hw_params_tgt.channels);
swr_free(&swr_ctx);
return -;
}
printf("swr_init\n");
audio_hw_params_src.channels = av_frame_get_channels(af);
audio_hw_params_src.fmt = (AVSampleFormat)af->format;
audio_hw_params_src.freq = af->sample_rate;
} if (swr_ctx) {
const uint8_t **in = (const uint8_t **)af->extended_data;
uint8_t **out = &audio_buf;
int out_count = (int64_t)af->nb_samples * audio_hw_params_tgt.freq / af->sample_rate + ;
int out_size = av_samples_get_buffer_size(NULL, audio_hw_params_tgt.channels, out_count, audio_hw_params_tgt.fmt, );
int len2;
if (out_size < ) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
return -;
}
av_fast_malloc(&audio_buf, (unsigned int*)audio_buf_size, out_size);
if (!audio_buf)
return AVERROR(ENOMEM);
len2 = swr_convert(swr_ctx, out, out_count, in, af->nb_samples);
if (len2 < ) {
av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
return -;
}
if (len2 == out_count) {
av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
if (swr_init(swr_ctx) < )
swr_free(&swr_ctx);
}
resampled_data_size = len2 * audio_hw_params_tgt.channels * av_get_bytes_per_sample(audio_hw_params_tgt.fmt);
} else {
audio_buf = af->data[];
resampled_data_size = data_size;
} return resampled_data_size;
} static void sigterm_handler(int sig)
{
exit();
} typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue; PacketQueue audioq; int quit = ; void packet_queue_init(PacketQueue *q) {
memset(q, , sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
} int packet_queue_put(PacketQueue *q, AVPacket *pkt) { AVPacketList *pkt1;
if(av_dup_packet(pkt) < ) {
return -;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -;
pkt1->pkt = *pkt;
pkt1->next = NULL; SDL_LockMutex(q->mutex); if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond); SDL_UnlockMutex(q->mutex);
return ;
} int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
{
AVPacket pkt1, *pkt = &pkt1;
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = ;
pkt->stream_index = stream_index;
return packet_queue_put(q, pkt);
} static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret; SDL_LockMutex(q->mutex); for(;;) { if(quit) {
ret = -;
break;
} pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = ;
break;
} else if (!block) {
ret = ;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
} AVFrame frame;
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = ; int len1, data_size = ; for(;;) {
while(audio_pkt_size > ) {
int got_frame = ;
len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
if(len1 < ) {
/* if error, skip frame */
audio_pkt_size = ;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
data_size = ;
if(got_frame) {
data_size = resample(&frame, audio_buf, &buf_size);
// data_size = av_samples_get_buffer_size(NULL,
// aCodecCtx->channels,
// frame.nb_samples,
// aCodecCtx->sample_fmt,
// 1);
assert(data_size <= buf_size);
// memcpy(audio_buf, frame.data[0], data_size);
}
if(data_size <= ) {
/* No data yet, get more frames */
continue;
}
// memcpy(audio_buf, frame.data[0], data_size); /* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt); if(quit) {
return -;
} if(packet_queue_get(&audioq, &pkt, ) < ) {
return -;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
} void audio_callback(void *userdata, Uint8 *stream, int len) { AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size; static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * ) / ];
static unsigned int audio_buf_size = ;
static unsigned int audio_buf_index = ; while(len > ) {
if(audio_buf_index >= audio_buf_size) {
/* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < ) {
/* If error, output silence */
audio_buf_size = ; // arbitrary?
memset(audio_buf, , audio_buf_size);
} else {
audio_buf_size = audio_size;
}
audio_buf_index = ;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
} int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL;
int i, audioStream;
AVPacket packet; AVCodecContext *aCodecCtxOrig = NULL;
AVCodecContext *aCodecCtx = NULL;
AVCodec *aCodec = NULL; SDL_Event event;
SDL_AudioSpec wanted_spec, spec; AVInputFormat *piFmt = NULL;
RtspClient Client; signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ if(argc != ) {
cout << "Usage: " << argv[] << " <URL>" << endl;
cout << "For example: " << endl;
cout << argv[] << " rtsp://127.0.0.1/ansersion" << endl;
return ;
}
rtspClientRequest(&Client, argv[]);
// Register all formats and codecs
av_register_all(); if(SDL_Init(SDL_INIT_AUDIO)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit();
} // // Open video file
// if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
// return -1; // Couldn't open file pFormatCtx = NULL;
pFormatCtx = avformat_alloc_context();
unsigned char * iobuffer = (unsigned char *)av_malloc();
AVIOContext * avio = avio_alloc_context(iobuffer, , , &Client, fill_iobuffer, NULL, NULL);
pFormatCtx->pb = avio; if(!avio) {
printf("avio_alloc_context error!!!\n");
return -;
} if(av_probe_input_buffer(avio, &piFmt, "", NULL, , ) < ) {
printf("av_probe_input_buffer error!\n");
return -;
} else {
printf("probe success\n");
printf("format: %s[%s]\n", piFmt->name, piFmt->long_name);
} int err = avformat_open_input(&pFormatCtx, "nothing", NULL, NULL);
if(err) {
printf("avformat_open_input error: %d\n", err);
return -;
}
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<)
return -; // Couldn't find stream information // Dump information about file onto standard error
// av_dump_format(pFormatCtx, 0, argv[1], 0);
av_dump_format(pFormatCtx, , "", ); // Find the first video stream
audioStream=-;
for(i=; i<pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
audioStream < ) {
audioStream=i;
}
}
// if(videoStream==-1)
// return -1; // Didn't find a video stream
if(audioStream==-)
return -; aCodecCtxOrig=pFormatCtx->streams[audioStream]->codec;
aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id);
if(!aCodec) {
fprintf(stderr, "Unsupported codec!\n");
return -;
} // Copy context
aCodecCtx = avcodec_alloc_context3(aCodec);
if(avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != ) {
fprintf(stderr, "Couldn't copy codec context");
return -; // Error copying codec context
} avcodec_open2(aCodecCtx, aCodec, NULL); sample_rate = aCodecCtx->sample_rate;
nb_channels = aCodecCtx->channels;
channel_layout = aCodecCtx->channel_layout; // printf("channel_layout=%" PRId64 "\n", channel_layout);
printf("channel_layout=%lld\n", channel_layout);
printf("nb_channels=%d\n", nb_channels);
printf("freq=%d\n", sample_rate); if (!channel_layout || nb_channels != av_get_channel_layout_nb_channels(channel_layout)) {
channel_layout = av_get_default_channel_layout(nb_channels);
channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
printf("correction\n");
} // Set audio settings from codec info
wanted_spec.freq = sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = nb_channels;
wanted_spec.silence = ;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < ) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -;
}
printf("freq: %d\tchannels: %d\n", spec.freq, spec.channels); audio_hw_params_tgt.fmt = AV_SAMPLE_FMT_S16;
audio_hw_params_tgt.freq = spec.freq;
audio_hw_params_tgt.channel_layout = channel_layout;
audio_hw_params_tgt.channels = spec.channels;
audio_hw_params_tgt.frame_size = av_samples_get_buffer_size(NULL, audio_hw_params_tgt.channels, , audio_hw_params_tgt.fmt, );
audio_hw_params_tgt.bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params_tgt.channels, audio_hw_params_tgt.freq, audio_hw_params_tgt.fmt, );
if (audio_hw_params_tgt.bytes_per_sec <= || audio_hw_params_tgt.frame_size <= ) {
printf("size error\n");
return -;
}
audio_hw_params_src = audio_hw_params_tgt; // audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(); // Read frames and save first five frames to disk
i=;
int ret = ;
// while(av_read_frame(pFormatCtx, &packet)>=0) {
while(ret >= ) {
ret = av_read_frame(pFormatCtx, &packet); if(ret < ) {
/* av_read_frame may get error when RTP data are blocked due to the network busy */
if(ret == AVERROR_EOF || avio_feof(pFormatCtx->pb)) {
packet_queue_put_nullpacket(&audioq, audioStream);
printf("continue ret=%d\n", ret);
ret = ;
continue;
}
printf("ret=%d\n", ret);
break;
}
printf("av_read_frame\n");
if(packet.stream_index==audioStream) {
packet_queue_put(&audioq, &packet);
} else {
av_free_packet(&packet);
}
// Free the packet that was allocated by av_read_frame
SDL_PollEvent(&event);
switch(event.type) {
case SDL_QUIT:
printf("SDL_QUIT\n");
quit = ;
SDL_Quit();
exit();
break;
default:
printf("SDL_Default\n");
break;
} } while() SDL_Delay(); // Close the codecs
avcodec_close(aCodecCtxOrig);
avcodec_close(aCodecCtx); // Close the video file
avformat_close_input(&pFormatCtx); return ;
} int rtspClientRequest(RtspClient * Client, string url)
{
if(!Client) return -; // cout << "Start play " << url << endl;
string RtspUri(url);
// string RtspUri("rtsp://192.168.81.145/ansersion"); /* Set up rtsp server resource URI */
Client->SetURI(RtspUri); /* Send DESCRIBE command to server */
Client->DoDESCRIBE(); /* Parse SDP message after sending DESCRIBE command */
Client->ParseSDP(); /* Send SETUP command to set up all 'audio' and 'video'
* sessions which SDP refers. */
Client->DoSETUP(); /* Send PLAY command to play only 'video' sessions.*/
Client->DoPLAY("audio"); return ;
} int fill_iobuffer(void * opaque, uint8_t * buf, int bufsize) {
size_t size = ;
if(!opaque) return -;
RtspClient * Client = (RtspClient *)opaque;
if(!Client->GetMediaData("audio", buf, &size, bufsize)) size = ;
printf("fill_iobuffer size: %u\n", size);
return size;
}

注:

1, 兼容myRtspClient-1.2.1及以上版本,且仅支持接收mp2,mp3音频;

2, 音频解码原理可参见:http://www.cnblogs.com/ansersion/p/5265033.html;

3, 示例源码编译需要SDL和ffmpeg,具体可参见解码视频的附录二;

4, 博主编译环境为 x86_64位ubuntu 16.04,以供参考。

myRtspClient-1.2.3

ffmpeg-2.8.5

下载源码以及Makefile

编译、配置和运行同上一篇:用ffmpeg解码视频

一个基于JRTPLIB的轻量级RTSP客户端(myRTSPClient)——解码篇:(二)用ffmpeg解码音频

上一篇               回目录            下一篇

相关推荐
python开发_常用的python模块及安装方法
adodb:我们领导推荐的数据库连接组件bsddb3:BerkeleyDB的连接组件Cheetah-1.0:我比较喜欢这个版本的cheeta…
日期:2022-11-24 点赞:878 阅读:9,000
Educational Codeforces Round 11 C. Hard Process 二分
C. Hard Process题目连接:http://www.codeforces.com/contest/660/problem/CDes…
日期:2022-11-24 点赞:807 阅读:5,512
下载Ubuntn 17.04 内核源代码
zengkefu@server1:/usr/src$ uname -aLinux server1 4.10.0-19-generic #21…
日期:2022-11-24 点赞:569 阅读:6,358
可用Active Desktop Calendar V7.86 注册码序列号
可用Active Desktop Calendar V7.86 注册码序列号Name: www.greendown.cn Code: &nb…
日期:2022-11-24 点赞:733 阅读:6,141
Android调用系统相机、自定义相机、处理大图片
Android调用系统相机和自定义相机实例本博文主要是介绍了android上使用相机进行拍照并显示的两种方式,并且由于涉及到要把拍到的照片显…
日期:2022-11-24 点赞:512 阅读:7,771
Struts的使用
一、Struts2的获取  Struts的官方网站为:http://struts.apache.org/  下载完Struts2的jar包,…
日期:2022-11-24 点赞:671 阅读:4,849