From eb65b5aa6c19504e1fa2aebde52476558992fd6e Mon Sep 17 00:00:00 2001 From: jiao_yanlin Date: Thu, 9 Mar 2023 16:36:20 +0800 Subject: [PATCH] Sample code problem modification Signed-off-by: jiao_yanlin --- zh-cn/application-dev/media/audio-capturer.md | 333 +++++------ zh-cn/application-dev/media/audio-renderer.md | 533 +++++++++--------- .../reference/apis/js-apis-audio.md | 16 +- 3 files changed, 437 insertions(+), 445 deletions(-) diff --git a/zh-cn/application-dev/media/audio-capturer.md b/zh-cn/application-dev/media/audio-capturer.md index 74e09bf19f..451362be0b 100644 --- a/zh-cn/application-dev/media/audio-capturer.md +++ b/zh-cn/application-dev/media/audio-capturer.md @@ -21,38 +21,48 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ## 约束与限制 -开发者在进行音频数据采集功能开发前,需要先对所开发的应用配置麦克风权限(ohos.permission.MICROPHONE),权限配置相关内容可参考:[访问控制授权申请指导](../security/accesstoken-guidelines.md) +开发者在进行音频数据采集功能开发前,需要先对所开发的应用配置麦克风权限(ohos.permission.MICROPHONE),配置方式请参见[访问控制授权申请](../security/accesstoken-guidelines.md#配置文件权限声明)。 ## 开发指导 详细API含义可参考:[音频管理API文档AudioCapturer](../reference/apis/js-apis-audio.md#audiocapturer8) -1. 使用createAudioCapturer()创建一个AudioCapturer实例。 +1. 使用createAudioCapturer()创建一个全局的AudioCapturer实例。 在audioCapturerOptions中设置音频采集器的相关参数。该实例可用于音频采集、控制和获取采集状态,以及注册通知回调。 ```js - import audio from '@ohos.multimedia.audio'; - - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - - let audioCapturerInfo = { - source: audio.SourceType.SOURCE_TYPE_MIC, - capturerFlags: 0 // 0是音频采集器的扩展标志位,默认为0 - } - - let audioCapturerOptions = { - streamInfo: audioStreamInfo, - capturerInfo: audioCapturerInfo - } - - let audioCapturer = await audio.createAudioCapturer(audioCapturerOptions); - console.log('AudioRecLog: Create audio capturer success.'); + import audio from '@ohos.multimedia.audio'; + import fs from '@ohos.file.fs'; //便于步骤3 read函数调用 + + //音频渲染相关接口自测试 + @Entry + @Component + struct AudioRenderer { + @State message: string = 'Hello World' + private audioCapturer : audio.AudioCapturer; //供全局调用 + + async initAudioCapturer(){ + let audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, + channels: audio.AudioChannel.CHANNEL_1, + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW + } + + let audioCapturerInfo = { + source: audio.SourceType.SOURCE_TYPE_MIC, + capturerFlags: 0 // 0是音频采集器的扩展标志位,默认为0 + } + + let audioCapturerOptions = { + streamInfo: audioStreamInfo, + capturerInfo: audioCapturerInfo + } + + this.audioCapturer = await audio.createAudioCapturer(audioCapturerOptions); + console.log('AudioRecLog: Create audio capturer success.'); + } ``` 2. 调用start()方法来启动/恢复采集任务。 @@ -60,23 +70,18 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 启动完成后,采集器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。 ```js - import audio from '@ohos.multimedia.audio'; - - async function startCapturer() { - let state = audioCapturer.state; + async startCapturer() { + let state = this.audioCapturer.state; // Capturer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一. - if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED || - state != audio.AudioState.STATE_STOPPED) { - console.info('Capturer is not in a correct state to start'); - return; - } - await audioCapturer.start(); - - state = audioCapturer.state; - if (state == audio.AudioState.STATE_RUNNING) { - console.info('AudioRecLog: Capturer started'); - } else { - console.error('AudioRecLog: Capturer start failed'); + if (state == audio.AudioState.STATE_PREPARED || state == audio.AudioState.STATE_PAUSED || + state == audio.AudioState.STATE_STOPPED) { + await this.audioCapturer.start(); + state = this.audioCapturer.state; + if (state == audio.AudioState.STATE_RUNNING) { + console.info('AudioRecLog: Capturer started'); + } else { + console.error('AudioRecLog: Capturer start failed'); + } } } ``` @@ -86,91 +91,88 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 参考以下示例,将采集到的数据写入文件。 ```js - import fs from '@ohos.file.fs'; - - let state = audioCapturer.state; - // 只有状态为STATE_RUNNING的时候才可以read. - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Capturer is not in a correct state to read'); - return; - } - - const path = '/data/data/.pulse_dir/capture_js.wav'; // 采集到的音频文件存储路径 - let file = fs.openSync(filePath, 0o2); - let fd = file.fd; - if (file !== null) { - console.info('AudioRecLog: file created'); - } else { - console.info('AudioRecLog: file create : FAILED'); - return; - } - - if (fd !== null) { - console.info('AudioRecLog: file fd opened in append mode'); - } - - let numBuffersToCapture = 150; // 循环写入150次 - let count = 0; - while (numBuffersToCapture) { - let bufferSize = await audioCapturer.getBufferSize(); - let buffer = await audioCapturer.read(bufferSize, true); - let options = { - offset: count * this.bufferSize, - length: this.bufferSize + async readData(){ + let state = this.audioCapturer.state; + // 只有状态为STATE_RUNNING的时候才可以read. + if (state != audio.AudioState.STATE_RUNNING) { + console.info('Capturer is not in a correct state to read'); + return; } - if (typeof(buffer) == undefined) { - console.info('AudioRecLog: read buffer failed'); + const path = '/data/data/.pulse_dir/capture_js.wav'; // 采集到的音频文件存储路径 + let file = fs.openSync(path, 0o2); + let fd = file.fd; + if (file !== null) { + console.info('AudioRecLog: file created'); } else { - let number = fs.writeSync(fd, buffer, options); - console.info(`AudioRecLog: data written: ${number}`); - } - numBuffersToCapture--; - count++; + console.info('AudioRecLog: file create : FAILED'); + return; + } + if (fd !== null) { + console.info('AudioRecLog: file fd opened in append mode'); + } + let numBuffersToCapture = 150; // 循环写入150次 + let count = 0; + while (numBuffersToCapture) { + this.bufferSize = await this.audioCapturer.getBufferSize(); + let buffer = await this.audioCapturer.read(this.bufferSize, true); + let options = { + offset: count * this.bufferSize, + length: this.bufferSize + } + if (typeof(buffer) == undefined) { + console.info('AudioRecLog: read buffer failed'); + } else { + let number = fs.writeSync(fd, buffer, options); + console.info(`AudioRecLog: data written: ${number}`); + } + numBuffersToCapture--; + count++; + } } ``` 4. 采集完成后,调用stop方法,停止录制。 ```js - async function StopCapturer() { - let state = audioCapturer.state; - // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 - if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { - console.info('AudioRecLog: Capturer is not running or paused'); - return; - } - - await audioCapturer.stop(); - - state = audioCapturer.state; - if (state == audio.AudioState.STATE_STOPPED) { - console.info('AudioRecLog: Capturer stopped'); - } else { - console.error('AudioRecLog: Capturer stop failed'); - } - } + async StopCapturer() { + let state = this.audioCapturer.state; + // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 + if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { + console.info('AudioRecLog: Capturer is not running or paused'); + return; + } + + await this.audioCapturer.stop(); + + state = this.audioCapturer.state; + if (state == audio.AudioState.STATE_STOPPED) { + console.info('AudioRecLog: Capturer stopped'); + } else { + console.error('AudioRecLog: Capturer stop failed'); + } + } ``` 5. 任务结束,调用release()方法释放相关资源。 ```js - async function releaseCapturer() { - let state = audioCapturer.state; - // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release - if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { - console.info('AudioRecLog: Capturer already released'); - return; - } - - await audioCapturer.release(); - - state = audioCapturer.state; - if (state == audio.AudioState.STATE_RELEASED) { - console.info('AudioRecLog: Capturer released'); - } else { - console.info('AudioRecLog: Capturer release failed'); - } - } + async releaseCapturer() { + let state = this.audioCapturer.state; + // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release + if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { + console.info('AudioRecLog: Capturer already released'); + return; + } + + await this.audioCapturer.release(); + + state = this.audioCapturer.state; + if (state == audio.AudioState.STATE_RELEASED) { + console.info('AudioRecLog: Capturer released'); + } else { + console.info('AudioRecLog: Capturer release failed'); + } + } ``` 6. (可选)获取采集器相关信息 @@ -178,23 +180,20 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 通过以下代码,可以获取采集器的相关信息。 ```js - // 获取当前采集器状态 - let state = audioCapturer.state; - - // 获取采集器信息 - let audioCapturerInfo : audio.AuduioCapturerInfo = await audioCapturer.getCapturerInfo(); - - // 获取音频流信息 - let audioStreamInfo : audio.AudioStreamInfo = await audioCapturer.getStreamInfo(); - - // 获取音频流ID - let audioStreamId : number = await audioCapturer.getAudioStreamId(); - - // 获取纳秒形式的Unix时间戳 - let audioTime : number = await audioCapturer.getAudioTime(); - - // 获取合理的最小缓冲区大小 - let bufferSize : number = await audioCapturer.getBufferSize(); + async getAudioCapturerInfo(){ + // 获取当前采集器状态 + let state = this.audioCapturer.state; + // 获取采集器信息 + let audioCapturerInfo : audio.AudioCapturerInfo = await this.audioCapturer.getCapturerInfo(); + // 获取音频流信息 + let audioStreamInfo : audio.AudioStreamInfo = await this.audioCapturer.getStreamInfo(); + // 获取音频流ID + let audioStreamId : number = await this.audioCapturer.getAudioStreamId(); + // 获取纳秒形式的Unix时间戳 + let audioTime : number = await this.audioCapturer.getAudioTime(); + // 获取合理的最小缓冲区大小 + let bufferSize : number = await this.audioCapturer.getBufferSize(); + } ``` 7. (可选)使用on('markReach')方法订阅采集器标记到达事件,使用off('markReach')取消订阅事件。 @@ -202,12 +201,13 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 注册markReach监听后,当采集器采集的帧数到达设定值时,会触发回调并返回设定的值。 ```js - audioCapturer.on('markReach', (reachNumber) => { - console.info('Mark reach event Received'); - console.info(`The Capturer reached frame: ${reachNumber}`); - }); - - audioCapturer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 + async markReach(){ + this.audioCapturer.on('markReach', 10, (reachNumber) => { + console.info('Mark reach event Received'); + console.info(`The Capturer reached frame: ${reachNumber}`); + }); + this.audioCapturer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 + } ``` 8. (可选)使用on('periodReach')方法订阅采集器区间标记到达事件,使用off('periodReach')取消订阅事件。 @@ -215,40 +215,43 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 注册periodReach监听后,**每当**采集器采集的帧数到达设定值时,会触发回调并返回设定的值。 ```js - audioCapturer.on('periodReach', (reachNumber) => { - console.info('Period reach event Received'); - console.info(`In this period, the Capturer reached frame: ${reachNumber}`); - }); - - audioCapturer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 + async periodReach(){ + this.audioCapturer.on('periodReach', 10, (reachNumber) => { + console.info('Period reach event Received'); + console.info(`In this period, the Capturer reached frame: ${reachNumber}`); + }); + this.audioCapturer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 + } ``` 9. 如果应用需要在采集器状态更新时进行一些操作,可以订阅该事件,当采集器状态更新时,会受到一个包含有事件类型的回调。 ```js - audioCapturer.on('stateChange', (state) => { - console.info(`AudioCapturerLog: Changed State to : ${state}`) - switch (state) { - case audio.AudioState.STATE_PREPARED: - console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------'); - console.info('Audio State is : Prepared'); - break; - case audio.AudioState.STATE_RUNNING: - console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------'); - console.info('Audio State is : Running'); - break; - case audio.AudioState.STATE_STOPPED: - console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------'); - console.info('Audio State is : stopped'); - break; - case audio.AudioState.STATE_RELEASED: - console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------'); - console.info('Audio State is : released'); - break; - default: - console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------'); - console.info('Audio State is : invalid'); - break; - } - }); + async stateChange(){ + this.audioCapturer.on('stateChange', (state) => { + console.info(`AudioCapturerLog: Changed State to : ${state}`) + switch (state) { + case audio.AudioState.STATE_PREPARED: + console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------'); + console.info('Audio State is : Prepared'); + break; + case audio.AudioState.STATE_RUNNING: + console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------'); + console.info('Audio State is : Running'); + break; + case audio.AudioState.STATE_STOPPED: + console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------'); + console.info('Audio State is : stopped'); + break; + case audio.AudioState.STATE_RELEASED: + console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------'); + console.info('Audio State is : released'); + break; + default: + console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------'); + console.info('Audio State is : invalid'); + break; + } + }); + } ``` \ No newline at end of file diff --git a/zh-cn/application-dev/media/audio-renderer.md b/zh-cn/application-dev/media/audio-renderer.md index 0599b33ab7..0f9436fc3b 100644 --- a/zh-cn/application-dev/media/audio-renderer.md +++ b/zh-cn/application-dev/media/audio-renderer.md @@ -28,47 +28,59 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 详细API含义可参考:[音频管理API文档AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8) -1. 使用createAudioRenderer()创建一个AudioRenderer实例。 +1. 使用createAudioRenderer()创建一个全局的AudioRenderer实例,以便后续步骤使用。 在audioRendererOptions中设置相关参数。该实例可用于音频渲染、控制和获取渲染状态,以及注册通知回调。 ```js - import audio from '@ohos.multimedia.audio'; - - let audioStreamInfo = { - samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, - channels: audio.AudioChannel.CHANNEL_1, - sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, - encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW - } - let audioRendererInfo = { - content: audio.ContentType.CONTENT_TYPE_SPEECH, - usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, - rendererFlags: 0 // 0是音频渲染器的扩展标志位,默认为0 - } - let audioRendererOptions = { - streamInfo: audioStreamInfo, - rendererInfo: audioRendererInfo - } - - let audioRenderer = await audio.createAudioRenderer(audioRendererOptions); - console.log("Create audio renderer success."); + import audio from '@ohos.multimedia.audio'; + import fs from '@ohos.file.fs'; + + //音频渲染相关接口自测试 + @Entry + @Component + struct AudioRenderer1129 { + private audioRenderer: audio.AudioRenderer; + private bufferSize;//便于步骤3 write函数调用使用 + private audioRenderer1: audio.AudioRenderer; //便于步骤14 完整示例调用使用 + private audioRenderer2: audio.AudioRenderer; //便于步骤14 完整示例调用使用 + + async initAudioRender(){ + let audioStreamInfo = { + samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, + channels: audio.AudioChannel.CHANNEL_1, + sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, + encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW + } + let audioRendererInfo = { + content: audio.ContentType.CONTENT_TYPE_SPEECH, + usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, + rendererFlags: 0 // 0是音频渲染器的扩展标志位,默认为0 + } + let audioRendererOptions = { + streamInfo: audioStreamInfo, + rendererInfo: audioRendererInfo + } + this.audioRenderer = await audio.createAudioRenderer(audioRendererOptions); + console.log("Create audio renderer success."); + } + } ``` 2. 调用start()方法来启动/恢复播放任务。 ```js - async function startRenderer() { - let state = audioRenderer.state; + async startRenderer() { + let state = this.audioRenderer.state; // Renderer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一. if (state != audio.AudioState.STATE_PREPARED && state != audio.AudioState.STATE_PAUSED && - state != audio.AudioState.STATE_STOPPED) { + state != audio.AudioState.STATE_STOPPED) { console.info('Renderer is not in a correct state to start'); return; } - - await audioRenderer.start(); - - state = audioRenderer.state; + + await this.audioRenderer.start(); + + state = this.audioRenderer.state; if (state == audio.AudioState.STATE_RUNNING) { console.info('Renderer started'); } else { @@ -81,112 +93,97 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 3. 调用write()方法向缓冲区写入数据。 - 将需要播放的音频数据读入缓冲区,重复调用write()方法写入。 + 将需要播放的音频数据读入缓冲区,重复调用write()方法写入。请注意引入“import fs from '@ohos.file.fs';”,具体请参考步骤1。 ```js - import fs from '@ohos.file.fs'; - import audio from '@ohos.multimedia.audio'; - - async function writeBuffer(buf) { - // 写入数据时,渲染器的状态必须为STATE_RUNNING - if (audioRenderer.state != audio.AudioState.STATE_RUNNING) { - console.error('Renderer is not running, do not write'); - return; - } - let writtenbytes = await audioRenderer.write(buf); - console.info(`Actual written bytes: ${writtenbytes} `); - if (writtenbytes < 0) { - console.error('Write buffer failed. check the state of renderer'); - } - } - - // 此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区) - const bufferSize = await audioRenderer.getBufferSize(); - let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 - const filePath = dir + '/file_example_WAV_2MG.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav - console.info(`file filePath: ${ filePath}`); - - let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); - let stat = await fs.stat(filePath); //音乐文件信息 - let buf = new ArrayBuffer(bufferSize); - let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - for (let i = 0;i < len; i++) { - let options = { - offset: i * this.bufferSize, - length: this.bufferSize - } - let readsize = await fs.read(file.fd, buf, options) - let writeSize = await new Promise((resolve,reject)=>{ - this.audioRenderer.write(buf,(err,writeSize)=>{ - if(err){ - reject(err) - }else{ - resolve(writeSize) - } + async writeData(){ + // 此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区) + this.bufferSize = await this.audioRenderer.getBufferSize(); + let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 + const filePath = dir + '/file_example_WAV_2MG.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav + console.info(`file filePath: ${ filePath}`); + + let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); + let stat = await fs.stat(filePath); //音乐文件信息 + let buf = new ArrayBuffer(this.bufferSize); + let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); + for (let i = 0;i < len; i++) { + let options = { + offset: i * this.bufferSize, + length: this.bufferSize + } + let readsize = await fs.read(file.fd, buf, options) + let writeSize = await new Promise((resolve,reject)=>{ + this.audioRenderer.write(buf,(err,writeSize)=>{ + if(err){ + reject(err) + }else{ + resolve(writeSize) + } + }) }) - }) + } + + fs.close(file) + await this.audioRenderer.stop(); //停止渲染 + await this.audioRenderer.release(); //释放资源 } - - fs.close(file) - await audioRenderer.stop(); //停止渲染 - await audioRenderer.release(); //释放资源 ``` 4. (可选)调用pause()方法或stop()方法暂停/停止渲染音频数据。 ```js - async function pauseRenderer() { - let state = audioRenderer.state; - // 只有渲染器状态为STATE_RUNNING的时候才能暂停 - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Renderer is not running'); - return; - } - - await audioRenderer.pause(); - - state = audioRenderer.state; - if (state == audio.AudioState.STATE_PAUSED) { - console.info('Renderer paused'); - } else { - console.error('Renderer pause failed'); - } - } - - async function stopRenderer() { - let state = audioRenderer.state; - // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 - if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { - console.info('Renderer is not running or paused'); - return; - } - - await audioRenderer.stop(); - - state = audioRenderer.state; - if (state == audio.AudioState.STATE_STOPPED) { - console.info('Renderer stopped'); - } else { - console.error('Renderer stop failed'); - } - } + async pauseRenderer() { + let state = this.audioRenderer.state; + // 只有渲染器状态为STATE_RUNNING的时候才能暂停 + if (state != audio.AudioState.STATE_RUNNING) { + console.info('Renderer is not running'); + return; + } + + await this.audioRenderer.pause(); + + state = this.audioRenderer.state; + if (state == audio.AudioState.STATE_PAUSED) { + console.info('Renderer paused'); + } else { + console.error('Renderer pause failed'); + } + } + + async stopRenderer() { + let state = this.audioRenderer.state; + // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 + if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { + console.info('Renderer is not running or paused'); + return; + } + + await this.audioRenderer.stop(); + + state = this.audioRenderer.state; + if (state == audio.AudioState.STATE_STOPPED) { + console.info('Renderer stopped'); + } else { + console.error('Renderer stop failed'); + } + } ``` 5. (可选)调用drain()方法清空缓冲区。 ```js - async function drainRenderer() { - let state = audioRenderer.state; - // 只有渲染器状态为STATE_RUNNING的时候才能使用drain() - if (state != audio.AudioState.STATE_RUNNING) { - console.info('Renderer is not running'); - return; - } - - await audioRenderer.drain(); - - state = audioRenderer.state; + async drainRenderer() { + let state = this.audioRenderer.state; + // 只有渲染器状态为STATE_RUNNING的时候才能使用drain() + if (state != audio.AudioState.STATE_RUNNING) { + console.info('Renderer is not running'); + return; } + + await this.audioRenderer.drain(); + state = this.audioRenderer.state; + } ``` 6. 任务完成,调用release()方法释放相关资源。 @@ -194,23 +191,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 AudioRenderer会使用大量的系统资源,所以请确保完成相关任务后,进行资源释放。 ```js - async function releaseRenderer() { - let state = audioRenderer.state; - // 渲染器状态不是STATE_RELEASED或STATE_NEW状态,才能release - if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { - console.info('Renderer already released'); - return; - } - - await audioRenderer.release(); + async releaseRenderer() { + let state = this.audioRenderer.state; + // 渲染器状态不是STATE_RELEASED或STATE_NEW状态,才能release + if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { + console.info('Renderer already released'); + return; + } + await this.audioRenderer.release(); - state = audioRenderer.state; - if (state == audio.AudioState.STATE_RELEASED) { - console.info('Renderer released'); - } else { - console.info('Renderer release failed'); - } + state = this.audioRenderer.state; + if (state == audio.AudioState.STATE_RELEASED) { + console.info('Renderer released'); + } else { + console.info('Renderer release failed'); } + } ``` 7. (可选)获取渲染器相关信息 @@ -218,26 +214,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 通过以下代码,可以获取渲染器的相关信息。 ```js - // 获取当前渲染器状态 - let state = audioRenderer.state; - - // 获取渲染器信息 - let audioRendererInfo : audio.AudioRendererInfo = await audioRenderer.getRendererInfo(); - - // 获取音频流信息 - let audioStreamInfo : audio.AudioStreamInfo = await audioRenderer.getStreamInfo(); - - // 获取音频流ID - let audioStreamId : number = await audioRenderer.getAudioStreamId(); - - // 获取纳秒形式的Unix时间戳 - let audioTime : number = await audioRenderer.getAudioTime(); - - // 获取合理的最小缓冲区大小 - let bufferSize : number = await audioRenderer.getBufferSize(); - - // 获取渲染速率 - let renderRate : audio.AudioRendererRate = await audioRenderer.getRenderRate(); + async getRenderInfo(){ + // 获取当前渲染器状态 + let state = this.audioRenderer.state; + // 获取渲染器信息 + let audioRendererInfo : audio.AudioRendererInfo = await this.audioRenderer.getRendererInfo(); + // 获取音频流信息 + let audioStreamInfo : audio.AudioStreamInfo = await this.audioRenderer.getStreamInfo(); + // 获取音频流ID + let audioStreamId : number = await this.audioRenderer.getAudioStreamId(); + // 获取纳秒形式的Unix时间戳 + let audioTime : number = await this.audioRenderer.getAudioTime(); + // 获取合理的最小缓冲区大小 + let bufferSize : number = await this.audioRenderer.getBufferSize(); + // 获取渲染速率 + let renderRate : audio.AudioRendererRate = await this.audioRenderer.getRenderRate(); + } ``` 8. (可选)设置渲染器相关信息 @@ -245,17 +237,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 通过以下代码,可以设置渲染器的相关信息。 ```js - // 设置渲染速率为正常速度 - let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL; - await audioRenderer.setRenderRate(renderRate); - - // 设置渲染器音频中断模式为SHARE_MODE - let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE; - await audioRenderer.setInterruptMode(interruptMode); - - // 设置一个流的音量为0.5 - let volume : number = 0.5; - await audioRenderer.setVolume(volume); + async setAudioRenderInfo(){ + // 设置渲染速率为正常速度 + let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL; + await this.audioRenderer.setRenderRate(renderRate); + // 设置渲染器音频中断模式为SHARE_MODE + let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE; + await this.audioRenderer.setInterruptMode(interruptMode); + // 设置一个流的音量为0.5 + let volume : number = 0.5; + await this.audioRenderer.setVolume(volume); + } ``` 9. (可选)使用on('audioInterrupt')方法订阅渲染器音频中断事件,使用off('audioInterrupt')取消订阅事件。 @@ -269,45 +261,45 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 需要说明的是,本模块的订阅音频中断事件与[AudioManager](../reference/apis/js-apis-audio.md#audiomanager)模块中的on('interrupt')稍有不同。自api9以来,on('interrupt')和off('interrupt')均被废弃。在AudioRenderer模块,当开发者需要监听焦点变化事件时,只需要调用on('audioInterrupt')函数,当应用内部的AudioRenderer对象在start\stop\pause等动作发生时,会主动请求焦点,从而发生焦点转移,相关的AudioRenderer对象即可获取到对应的回调信息。但对除AudioRenderer的其他对象,例如FM、语音唤醒等,应用不会创建对象,此时可调用AudioManager中的on('interrupt')获取焦点变化通知。 ```js - audioRenderer.on('audioInterrupt', (interruptEvent) => { - console.info('InterruptEvent Received'); - console.info(`InterruptType: ${interruptEvent.eventType}`); - console.info(`InterruptForceType: ${interruptEvent.forceType}`); - console.info(`AInterruptHint: ${interruptEvent.hintType}`); - - if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) { - switch (interruptEvent.hintType) { + async subscribeAudioRender(){ + this.audioRenderer.on('audioInterrupt', (interruptEvent) => { + console.info('InterruptEvent Received'); + console.info(`InterruptType: ${interruptEvent.eventType}`); + console.info(`InterruptForceType: ${interruptEvent.forceType}`); + console.info(`AInterruptHint: ${interruptEvent.hintType}`); + + if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) { + switch (interruptEvent.hintType) { // 音频框架发起的强制暂停操作,为防止数据丢失,此时应该停止数据的写操作 - case audio.InterruptHint.INTERRUPT_HINT_PAUSE: - isPlay = false; - break; + case audio.InterruptHint.INTERRUPT_HINT_PAUSE: + console.info('isPlay is false'); + break; // 音频框架发起的强制停止操作,为防止数据丢失,此时应该停止数据的写操作 - case audio.InterruptHint.INTERRUPT_HINT_STOP: - isPlay = false; - break; + case audio.InterruptHint.INTERRUPT_HINT_STOP: + console.info('isPlay is false'); + break; // 音频框架发起的强制降低音量操作 - case audio.InterruptHint.INTERRUPT_HINT_DUCK: - break; + case audio.InterruptHint.INTERRUPT_HINT_DUCK: + break; // 音频框架发起的恢复音量操作 - case audio.InterruptHint.INTERRUPT_HINT_UNDUCK: - break; - } - } else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) { - switch (interruptEvent.hintType) { + case audio.InterruptHint.INTERRUPT_HINT_UNDUCK: + break; + } + } else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) { + switch (interruptEvent.hintType) { // 提醒App开始渲染 - case audio.InterruptHint.INTERRUPT_HINT_RESUME: - startRenderer(); - break; + case audio.InterruptHint.INTERRUPT_HINT_RESUME: + this.startRenderer(); + break; // 提醒App音频流被中断,由App自主决定是否继续(此处选择暂停) - case audio.InterruptHint.INTERRUPT_HINT_PAUSE: - isPlay = false; - pauseRenderer(); - break; + case audio.InterruptHint.INTERRUPT_HINT_PAUSE: + console.info('isPlay is false'); + this.pauseRenderer(); + break; + } } - } - }); - - audioRenderer.off('audioInterrupt'); // 取消音频中断事件的订阅,后续将无法监听到音频中断事件 + }); + } ``` 10. (可选)使用on('markReach')方法订阅渲染器标记到达事件,使用off('markReach')取消订阅事件。 @@ -315,12 +307,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 注册markReach监听后,当渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。 ```js - audioRenderer.on('markReach', (reachNumber) => { - console.info('Mark reach event Received'); - console.info(`The renderer reached frame: ${reachNumber}`); - }); - - audioRenderer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 + async markReach(){ + this.audioRenderer.on('markReach', 50, (position) => { + if (position == 50) { + console.info('ON Triggered successfully'); + } + }); + this.audioRenderer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 + } ``` 11. (可选)使用on('periodReach')方法订阅渲染器区间标记到达事件,使用off('periodReach')取消订阅事件。 @@ -328,12 +322,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 注册periodReach监听后,**每当**渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。 ```js - audioRenderer.on('periodReach', (reachNumber) => { - console.info('Period reach event Received'); - console.info(`In this period, the renderer reached frame: ${reachNumber} `); - }); - - audioRenderer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 + async periodReach(){ + this.audioRenderer.on('periodReach',10, (reachNumber) => { + console.info(`In this period, the renderer reached frame: ${reachNumber} `); + }); + + this.audioRenderer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 + } ``` 12. (可选)使用on('stateChange')方法订阅渲染器音频状态变化事件。 @@ -341,10 +336,12 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 注册stateChange监听后,当渲染器的状态发生改变时,会触发回调并返回当前渲染器的状态。 ```js - audioRenderer.on('stateChange', (audioState) => { - console.info('State change event Received'); - console.info(`Current renderer state is: ${audioState}`); - }); + async stateChange(){ + this.audioRenderer.on('stateChange', (audioState) => { + console.info('State change event Received'); + console.info(`Current renderer state is: ${audioState}`); + }); + } ``` 13. (可选)对on()方法的异常处理。 @@ -352,21 +349,24 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 在使用on()方法时,如果传入的字符串错误或传入的参数类型错误,程序会抛出异常,需要用try catch来捕获。 ```js - try { - audioRenderer.on('invalidInput', () => { // 字符串不匹配 - }) - } catch (err) { - console.info(`Call on function error, ${err}`); // 程序抛出401异常 - } - try { - audioRenderer.on(1, () => { // 入参类型错误 - }) - } catch (err) { - console.info(`Call on function error, ${err}`); // 程序抛出6800101异常 + async errorCall(){ + try { + this.audioRenderer.on('invalidInput', () => { // 字符串不匹配 + }) + } catch (err) { + console.info(`Call on function error, ${err}`); // 程序抛出401异常 + } + try { + this.audioRenderer.on(1, () => { // 入参类型错误 + }) + } catch (err) { + console.info(`Call on function error, ${err}`); // 程序抛出6800101异常 + } } ``` 14. (可选)on('audioInterrupt')方法完整示例。 + 请注意:在调用前声明audioRenderer1与audioRenderer2对象,具体请参考步骤1。 同一个应用中的AudioRender1和AudioRender2在创建时均设置了焦点模式为独立,并且调用on('audioInterrupt')监听焦点变化。刚开始AudioRender1拥有焦点,当AudioRender2获取到焦点时,audioRenderer1将收到焦点转移的通知,打印相关日志。如果AudioRender1和AudioRender2不将焦点模式设置为独立,则监听处理中的日志在应用运行过程中永远不会被打印。 ```js async runningAudioRender1(){ @@ -385,31 +385,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 streamInfo: audioStreamInfo, rendererInfo: audioRendererInfo } - + //1.1 创建对象 - audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions); + this.audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions); console.info("Create audio renderer 1 success."); - + //1.2 设置焦点模式为独立模式 :1 - audioRenderer1.setInterruptMode(1).then( data => { + this.audioRenderer1.setInterruptMode(1).then( data => { console.info('audioRenderer1 setInterruptMode Success!'); }).catch((err) => { console.error(`audioRenderer1 setInterruptMode Fail: ${err}`); }); - + //1.3 设置监听 - audioRenderer1.on('audioInterrupt', async(interruptEvent) => { + this.audioRenderer1.on('audioInterrupt', async(interruptEvent) => { console.info(`audioRenderer1 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) }); - + //1.4 启动渲染 - await audioRenderer1.start(); + await this.audioRenderer1.start(); console.info('startAudioRender1 success'); - + //1.5 获取缓存区大小,此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区) - const bufferSize = await audioRenderer1.getBufferSize(); + const bufferSize = await this.audioRenderer1.getBufferSize(); console.info(`audio bufferSize: ${bufferSize}`); - + //1.6 获取原始音频数据文件 let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 const path1 = dir + '/music001_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music001_48000_32_1.wav @@ -418,14 +418,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 let stat = await fs.stat(path1); //音乐文件信息 let buf = new ArrayBuffer(bufferSize); let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - + //1.7 通过audioRender对缓存区的原始音频数据进行渲染 for (let i = 0;i < len; i++) { let options = { offset: i * this.bufferSize, length: this.bufferSize } - let readsize = await fs.read(file.fd, buf, options) + let readsize = await fs.read(file1.fd, buf, options) let writeSize = await new Promise((resolve,reject)=>{ this.audioRenderer1.write(buf,(err,writeSize)=>{ if(err){ @@ -434,13 +434,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 resolve(writeSize) } }) - }) + }) } fs.close(file1) - await audioRenderer1.stop(); //停止渲染 - await audioRenderer1.release(); //释放资源 + await this.audioRenderer1.stop(); //停止渲染 + await this.audioRenderer1.release(); //释放资源 } - + async runningAudioRender2(){ let audioStreamInfo = { samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, @@ -457,31 +457,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 streamInfo: audioStreamInfo, rendererInfo: audioRendererInfo } - + //2.1 创建对象 - audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions); + this.audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions); console.info("Create audio renderer 2 success."); - + //2.2 设置焦点模式为独立模式 :1 - audioRenderer2.setInterruptMode(1).then( data => { + this.audioRenderer2.setInterruptMode(1).then( data => { console.info('audioRenderer2 setInterruptMode Success!'); }).catch((err) => { console.error(`audioRenderer2 setInterruptMode Fail: ${err}`); }); - + //2.3 设置监听 - audioRenderer2.on('audioInterrupt', async(interruptEvent) => { + this.audioRenderer2.on('audioInterrupt', async(interruptEvent) => { console.info(`audioRenderer2 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) }); - + //2.4 启动渲染 - await audioRenderer2.start(); + await this.audioRenderer2.start(); console.info('startAudioRender2 success'); - + //2.5 获取缓存区大小 - const bufferSize = await audioRenderer2.getBufferSize(); + const bufferSize = await this.audioRenderer2.getBufferSize(); console.info(`audio bufferSize: ${bufferSize}`); - + //2.6 获取原始音频数据文件 let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 const path2 = dir + '/music002_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music002_48000_32_1.wav @@ -490,14 +490,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 let stat = await fs.stat(path2); //音乐文件信息 let buf = new ArrayBuffer(bufferSize); let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); - + //2.7 通过audioRender对缓存区的原始音频数据进行渲染 for (let i = 0;i < len; i++) { let options = { offset: i * this.bufferSize, length: this.bufferSize } - let readsize = await fs.read(file.fd, buf, options) + let readsize = await fs.read(file2.fd, buf, options) let writeSize = await new Promise((resolve,reject)=>{ this.audioRenderer2.write(buf,(err,writeSize)=>{ if(err){ @@ -506,28 +506,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 resolve(writeSize) } }) - }) + }) } fs.close(file2) - await audioRenderer2.stop(); //停止渲染 - await audioRenderer2.release(); //释放资源 + await this.audioRenderer2.stop(); //停止渲染 + await this.audioRenderer2.release(); //释放资源 } - - async writeBuffer(buf, audioRender) { - let writtenbytes; - await audioRender.write(buf).then((value) => { - writtenbytes = value; - console.info(`Actual written bytes: ${writtenbytes} `); - }); - if (typeof(writtenbytes) != 'number' || writtenbytes < 0) { - console.error('get Write buffer failed. check the state of renderer'); - } - } - + //综合调用入口 async test(){ - await runningAudioRender1(); - await runningAudioRender2(); + await this.runningAudioRender1(); + await this.runningAudioRender2(); } - + ``` \ No newline at end of file diff --git a/zh-cn/application-dev/reference/apis/js-apis-audio.md b/zh-cn/application-dev/reference/apis/js-apis-audio.md index a147cdf988..da9ebd7910 100644 --- a/zh-cn/application-dev/reference/apis/js-apis-audio.md +++ b/zh-cn/application-dev/reference/apis/js-apis-audio.md @@ -4533,15 +4533,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); let stat = await fs.stat(path); let buf = new ArrayBuffer(bufferSize); -let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); +let len = stat.size % bufferSize == 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); for (let i = 0;i < len; i++) { let options = { - offset: i * this.bufferSize, - length: this.bufferSize + offset: i * bufferSize, + length: bufferSize } let readsize = await fs.read(file.fd, buf, options) let writeSize = await new Promise((resolve,reject)=>{ - this.audioRenderer.write(buf,(err,writeSize)=>{ + audioRenderer.write(buf,(err,writeSize)=>{ if(err){ reject(err) }else{ @@ -4586,15 +4586,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); let stat = await fs.stat(path); let buf = new ArrayBuffer(bufferSize); -let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); +let len = stat.size % bufferSize == 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); for (let i = 0;i < len; i++) { let options = { - offset: i * this.bufferSize, - length: this.bufferSize + offset: i * bufferSize, + length: bufferSize } let readsize = await fs.read(file.fd, buf, options) try{ - let writeSize = await this.audioRenderer.write(buf); + let writeSize = await audioRenderer.write(buf); } catch(err) { console.error(`audioRenderer.write err: ${err}`); } -- GitLab