1# 开发音频通话功能 2 3在音频通话场景下,音频输出(播放对端声音)和音频输入(录制本端声音)会同时进行,应用可以通过使用AudioRenderer来实现音频输出,通过使用AudioCapturer来实现音频输入,同时使用AudioRenderer和AudioCapturer即可实现音频通话功能。 4 5在音频通话开始和结束时,应用可以自行检查当前的[音频场景模式](audio-call-overview.md#音频场景模式)和[铃声模式](audio-call-overview.md#铃声模式),以便采取合适的音频管理及提示策略。 6 7以下代码示范了同时使用AudioRenderer和AudioCapturer实现音频通话功能的基本过程,其中未包含音频通话数据的传输过程,实际开发中,需要将网络传输来的对端通话数据解码播放,此处仅以读取音频文件的数据代替;同时需要将本端录制的通话数据编码打包,通过网络发送给对端,此处仅以将数据写入音频文件代替。 8 9## 使用AudioRenderer播放对端的通话声音 10 11 该过程与[使用AudioRenderer开发音频播放功能](using-audiorenderer-for-playback.md)过程相似,关键区别在于audioRendererInfo参数和音频数据来源。audioRendererInfo参数中,音频内容类型需设置为语音:CONTENT_TYPE_SPEECH,音频流使用类型需设置为VOIP通话:STREAM_USAGE_VOICE_COMMUNICATION。 12 13```ts 14import { audio } from '@kit.AudioKit'; 15import { fileIo as fs } from '@kit.CoreFileKit'; 16import { BusinessError } from '@kit.BasicServicesKit'; 17 18const TAG = 'VoiceCallDemoForAudioRenderer'; 19// 与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRendererInfo参数和音频数据来源 20class Options { 21 offset?: number; 22 length?: number; 23} 24 25let bufferSize: number = 0; 26let renderModel: audio.AudioRenderer | undefined = undefined; 27let audioStreamInfo: audio.AudioStreamInfo = { 28 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率 29 channels: audio.AudioChannel.CHANNEL_2, // 通道 30 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式 31 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式 32}; 33let audioRendererInfo: audio.AudioRendererInfo = { 34 // 需使用通话场景相应的参数 35 usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:VOIP通话 36 rendererFlags: 0 // 音频渲染器标志:默认为0即可 37}; 38let audioRendererOptions: audio.AudioRendererOptions = { 39 streamInfo: audioStreamInfo, 40 rendererInfo: audioRendererInfo 41}; 42let path = getContext().cacheDir; 43// 确保该沙箱路径下存在该资源 44let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; 45let file: fs.File = fs.openSync(filePath, fs.OpenMode.READ_ONLY); 46let writeDataCallback = (buffer: ArrayBuffer) => { 47 let options: Options = { 48 offset: bufferSize, 49 length: buffer.byteLength 50 }; 51 fs.readSync(file.fd, buffer, options); 52 bufferSize += buffer.byteLength; 53}; 54 55// 初始化,创建实例,设置监听事件 56audio.createAudioRenderer(audioRendererOptions, (err: BusinessError, renderer: audio.AudioRenderer) => { // 创建AudioRenderer实例 57 if (!err) { 58 console.info(`${TAG}: creating AudioRenderer success`); 59 renderModel = renderer; 60 if (renderModel !== undefined) { 61 renderModel.on('stateChange', (state: audio.AudioState) => { // 设置监听事件,当转换到指定的状态时触发回调 62 if (state == 1) { 63 console.info('audio renderer state is: STATE_PREPARED'); 64 } 65 if (state == 2) { 66 console.info('audio renderer state is: STATE_RUNNING'); 67 } 68 }); 69 renderModel.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调 70 if (position == 1000) { 71 console.info('ON Triggered successfully'); 72 } 73 }); 74 renderModel.on('writeData', writeDataCallback); 75 } 76 } else { 77 console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`); 78 } 79}); 80 81// 开始一次音频渲染 82async function start() { 83 if (renderModel !== undefined) { 84 let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; 85 if (stateGroup.indexOf(renderModel.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染 86 console.error(TAG + 'start failed'); 87 return; 88 } 89 renderModel.start((err: BusinessError) => { 90 if (err) { 91 console.error('Renderer start failed.'); 92 } else { 93 console.info('Renderer start success.'); 94 } 95 }); 96 } 97} 98 99// 暂停渲染 100async function pause() { 101 if (renderModel !== undefined) { 102 // 只有渲染器状态为STATE_RUNNING的时候才能暂停 103 if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING) { 104 console.info('Renderer is not running'); 105 return; 106 } 107 await renderModel.pause(); // 暂停渲染 108 if (renderModel.state.valueOf() === audio.AudioState.STATE_PAUSED) { 109 console.info('Renderer is paused.'); 110 } else { 111 console.error('Pausing renderer failed.'); 112 } 113 } 114} 115 116// 停止渲染 117async function stop() { 118 if (renderModel !== undefined) { 119 // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 120 if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING && renderModel.state.valueOf() !== audio.AudioState.STATE_PAUSED) { 121 console.info('Renderer is not running or paused.'); 122 return; 123 } 124 await renderModel.stop(); // 停止渲染 125 if (renderModel.state.valueOf() === audio.AudioState.STATE_STOPPED) { 126 console.info('Renderer stopped.'); 127 } else { 128 console.error('Stopping renderer failed.'); 129 } 130 } 131} 132 133// 销毁实例,释放资源 134async function release() { 135 if (renderModel !== undefined) { 136 // 渲染器状态不是STATE_RELEASED状态,才能release 137 if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { 138 console.info('Renderer already released'); 139 return; 140 } 141 await renderModel.release(); // 释放资源 142 if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { 143 console.info('Renderer released'); 144 } else { 145 console.error('Renderer release failed.'); 146 } 147 } 148} 149``` 150 151## 使用AudioCapturer录制本端的通话声音 152 153 该过程与[使用AudioCapturer开发音频录制功能](using-audiocapturer-for-recording.md)过程相似,关键区别在于audioCapturerInfo参数和音频数据流向。audioCapturerInfo参数中音源类型需设置为语音通话:SOURCE_TYPE_VOICE_COMMUNICATION。 154 155 所有录制均需要申请麦克风权限:ohos.permission.MICROPHONE,申请方式请参考[向用户申请授权](../../security/AccessToken/request-user-authorization.md)。 156 157```ts 158import { audio } from '@kit.AudioKit'; 159import { fileIo as fs } from '@kit.CoreFileKit'; 160import { BusinessError } from '@kit.BasicServicesKit'; 161 162const TAG = 'VoiceCallDemoForAudioCapturer'; 163class Options { 164 offset?: number; 165 length?: number; 166} 167 168// 与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向 169let bufferSize: number = 0; 170let audioCapturer: audio.AudioCapturer | undefined = undefined; 171let audioStreamInfo: audio.AudioStreamInfo = { 172 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率 173 channels: audio.AudioChannel.CHANNEL_2, // 通道 174 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式 175 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式 176}; 177let audioCapturerInfo: audio.AudioCapturerInfo = { 178 // 需使用通话场景相应的参数 179 source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话 180 capturerFlags: 0 // 音频采集器标志:默认为0即可 181}; 182let audioCapturerOptions: audio.AudioCapturerOptions = { 183 streamInfo: audioStreamInfo, 184 capturerInfo: audioCapturerInfo 185}; 186let path = getContext().cacheDir; 187let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; 188let file: fs.File = fs.openSync(filePath, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); 189let readDataCallback = (buffer: ArrayBuffer) => { 190 let options: Options = { 191 offset: bufferSize, 192 length: buffer.byteLength 193 }; 194 fs.writeSync(file.fd, buffer, options); 195 bufferSize += buffer.byteLength; 196}; 197 198// 初始化,创建实例,设置监听事件 199async function init() { 200 audio.createAudioCapturer(audioCapturerOptions, (err: BusinessError, capturer: audio.AudioCapturer) => { // 创建AudioCapturer实例 201 if (err) { 202 console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); 203 return; 204 } 205 console.info(`${TAG}: create AudioCapturer success`); 206 audioCapturer = capturer; 207 if (audioCapturer !== undefined) { 208 audioCapturer.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当采集的帧数达到1000帧时触发回调 209 if (position === 1000) { 210 console.info('ON Triggered successfully'); 211 } 212 }); 213 audioCapturer.on('periodReach', 2000, (position: number) => { // 订阅periodReach事件,当采集的帧数每达到2000时触发回调 214 if (position === 2000) { 215 console.info('ON Triggered successfully'); 216 } 217 }); 218 audioCapturer.on('readData', readDataCallback); 219 } 220 }); 221} 222 223// 开始一次音频采集 224async function start() { 225 if (audioCapturer !== undefined) { 226 let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; 227 if (stateGroup.indexOf(audioCapturer.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集 228 console.error(`${TAG}: start failed`); 229 return; 230 } 231 audioCapturer.start((err: BusinessError) => { 232 if (err) { 233 console.error('Capturer start failed.'); 234 } else { 235 console.info('Capturer start success.'); 236 } 237 }); 238 } 239} 240 241// 停止采集 242async function stop() { 243 if (audioCapturer !== undefined) { 244 // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 245 if (audioCapturer.state.valueOf() !== audio.AudioState.STATE_RUNNING && audioCapturer.state.valueOf() !== audio.AudioState.STATE_PAUSED) { 246 console.info('Capturer is not running or paused'); 247 return; 248 } 249 await audioCapturer.stop(); // 停止采集 250 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_STOPPED) { 251 console.info('Capturer stopped'); 252 } else { 253 console.error('Capturer stop failed'); 254 } 255 } 256} 257 258// 销毁实例,释放资源 259async function release() { 260 if (audioCapturer !== undefined) { 261 // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release 262 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED || audioCapturer.state.valueOf() === audio.AudioState.STATE_NEW) { 263 console.info('Capturer already released'); 264 return; 265 } 266 await audioCapturer.release(); // 释放资源 267 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED) { 268 console.info('Capturer released'); 269 } else { 270 console.error('Capturer release failed'); 271 } 272 } 273} 274``` 275