提交 5dec6355 编写于 作者: L liyuhang

Modified docs for audioRenderer and audioCapturer

Signed-off-by: Nliyuhang <liyuhang24@Huawei.com>
Change-Id: I9352a92873f7d4dc8c8ef780c48daec3608e377c
Signed-off-by: Nliyuhang <liyuhang24@Huawei.com>
上级 d54a0b8e
......@@ -32,86 +32,70 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
在audioCapturerOptions中设置音频采集器的相关参数。该实例可用于音频采集、控制和获取采集状态,以及注册通知回调。
```js
var audioStreamInfo = {
import audio from '@ohos.multimedia.audio';
let audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}
var audioCapturerInfo = {
let audioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_MIC,
capturerFlags: 1
capturerFlags: 0 // 0是音频采集器的扩展标志位,默认为0
}
var audioCapturerOptions = {
let audioCapturerOptions = {
streamInfo: audioStreamInfo,
capturerInfo: audioCapturerInfo
}
let audioCapturer = await audio.createAudioCapturer(audioCapturerOptions);
var state = audioRenderer.state;
console.log('AudioRecLog: Create audio capturer success.');
```
2. (可选)使用on('stateChange')订阅音频采集器状态更改。
如果应用需要在采集器状态更新时进行一些操作,可以订阅该事件。更多事件请参考[API参考文档](../reference/apis/js-apis-audio.md)
```js
audioCapturer.on('stateChange',(state) => {
console.info('AudioCapturerLog: Changed State to : ' + state)
switch (state) {
case audio.AudioState.STATE_PREPARED:
console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------');
console.info('Audio State is : Prepared');
break;
case audio.AudioState.STATE_RUNNING:
console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------');
console.info('Audio State is : Running');
break;
case audio.AudioState.STATE_STOPPED:
console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------');
console.info('Audio State is : stopped');
break;
case audio.AudioState.STATE_RELEASED:
console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------');
console.info('Audio State is : released');
break;
default:
console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------');
console.info('Audio State is : invalid');
break;
}
});
```
3. 调用start()方法来启动/恢复采集任务。
2. 调用start()方法来启动/恢复采集任务。
启动完成后,采集器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。
```js
await audioCapturer.start();
if (audioCapturer.state == audio.AudioState.STATE_RUNNING) {
console.info('AudioRecLog: Capturer started');
} else {
console.info('AudioRecLog: Capturer start failed');
}
```
4. 使用getBufferSize()方法获取要读取的最小缓冲区大小。
import audio from '@ohos.multimedia.audio';
async function startCapturer() {
let state = audioCapturer.state;
// Capturer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一.
if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED ||
state != audio.AudioState.STATE_STOPPED) {
console.info('Capturer is not in a correct state to start');
return;
}
await audioCapturer.start();
```js
var bufferSize = await audioCapturer.getBufferSize();
console.info('AudioRecLog: buffer size: ' + bufferSize);
let state = audioCapturer.state;
if (state == audio.AudioState.STATE_RUNNING) {
console.info('AudioRecLog: Capturer started');
} else {
console.error('AudioRecLog: Capturer start failed');
}
}
```
5. 读取采集器的音频数据并将其转换为字节流。重复调用read()方法读取数据,直到应用准备停止采集。
3. 读取采集器的音频数据并将其转换为字节流。重复调用read()方法读取数据,直到应用准备停止采集。
参考以下示例,将采集到的数据写入文件。
```js
import fileio from '@ohos.fileio';
let state = audioCapturer.state;
// 只有状态为STATE_RUNNING的时候才可以read.
if (state != audio.AudioState.STATE_RUNNING) {
console.info('Capturer is not in a correct state to read');
return;
}
const path = '/data/data/.pulse_dir/capture_js.wav';
const path = '/data/data/.pulse_dir/capture_js.wav'; // 采集到的音频文件存储路径
let fd = fileio.openSync(path, 0o102, 0o777);
if (fd !== null) {
console.info('AudioRecLog: file fd created');
......@@ -126,13 +110,13 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
console.info('AudioRecLog: file fd opened in append mode');
}
var numBuffersToCapture = 150;
let numBuffersToCapture = 150; // 循环写入150次
while (numBuffersToCapture) {
var buffer = await audioCapturer.read(bufferSize, true);
let buffer = await audioCapturer.read(bufferSize, true);
if (typeof(buffer) == undefined) {
console.info('read buffer failed');
console.info('AudioRecLog: read buffer failed');
} else {
var number = fileio.writeSync(fd, buffer);
let number = fileio.writeSync(fd, buffer);
console.info('AudioRecLog: data written: ' + number);
}
......@@ -140,24 +124,126 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
}
```
6. 采集完成后,调用stop方法,停止录制。
4. 采集完成后,调用stop方法,停止录制。
```js
await audioCapturer.stop();
if (audioCapturer.state == audio.AudioState.STATE_STOPPED) {
console.info('AudioRecLog: Capturer stopped');
} else {
console.info('AudioRecLog: Capturer stop failed');
}
async function StopCapturer() {
let state = audioCapturer.state;
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
console.info('AudioRecLog: Capturer is not running or paused');
return;
}
await audioCapturer.stop();
state = audioCapturer.state;
if (state == audio.AudioState.STATE_STOPPED) {
console.info('AudioRecLog: Capturer stopped');
} else {
console.error('AudioRecLog: Capturer stop failed');
}
}
```
7. 任务结束,调用release()方法释放相关资源。
5. 任务结束,调用release()方法释放相关资源。
```js
await audioCapturer.release();
if (audioCapturer.state == audio.AudioState.STATE_RELEASED) {
console.info('AudioRecLog: Capturer released');
} else {
console.info('AudioRecLog: Capturer release failed');
}
```
\ No newline at end of file
async function releaseCapturer() {
let state = audioCapturer.state;
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
console.info('AudioRecLog: Capturer already released');
return;
}
await audioCapturer.release();
state = audioCapturer.state;
if (state == audio.AudioState.STATE_RELEASED) {
console.info('AudioRecLog: Capturer released');
} else {
console.info('AudioRecLog: Capturer release failed');
}
}
```
6. (可选)获取采集器相关信息
通过以下代码,可以获取采集器的相关信息。
```js
// 获取当前采集器状态
let state = audioCapturer.state;
// 获取采集器信息
let audioCapturerInfo : audio.AuduioCapturerInfo = await audioCapturer.getCapturerInfo();
// 获取音频流信息
let audioStreamInfo : audio.AudioStreamInfo = await audioCapturer.getStreamInfo();
// 获取音频流ID
let audioStreamId : number = await audioCapturer.getAudioStreamId();
// 获取纳秒形式的Unix时间戳
let audioTime : number = await audioCapturer.getAudioTime();
// 获取合理的最小缓冲区大小
let bufferSize : number = await audioCapturer.getBuffersize();
```
7. (可选)使用on('markReach')方法订阅采集器标记到达事件,使用off('markReach')取消订阅事件。
注册markReach监听后,当采集器采集的帧数到达设定值时,会触发回调并返回设定的值。
```js
audioCapturer.on('markReach', (reachNumber) => {
console.info('Mark reach event Received');
console.info('The Capturer reached frame: ' + reachNumber);
});
audioCapturer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件
```
8. (可选)使用on('periodReach')方法订阅采集器区间标记到达事件,使用off('periodReach')取消订阅事件。
注册periodReach监听后,**每当**采集器采集的帧数到达设定值时,会触发回调并返回设定的值。
```js
audioCapturer.on('periodReach', (reachNumber) => {
console.info('Period reach event Received');
console.info('In this period, the Capturer reached frame: ' + reachNumber);
});
audioCapturer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件
```
9. 如果应用需要在采集器状态更新时进行一些操作,可以订阅该事件,当采集器状态更新时,会受到一个包含有事件类型的回调。
```js
audioCapturer.on('stateChange', (state) => {
console.info('AudioCapturerLog: Changed State to : ' + state)
switch (state) {
case audio.AudioState.STATE_PREPARED:
console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------');
console.info('Audio State is : Prepared');
break;
case audio.AudioState.STATE_RUNNING:
console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------');
console.info('Audio State is : Running');
break;
case audio.AudioState.STATE_STOPPED:
console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------');
console.info('Audio State is : stopped');
break;
case audio.AudioState.STATE_RELEASED:
console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------');
console.info('Audio State is : released');
break;
default:
console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------');
console.info('Audio State is : invalid');
break;
}
});
```
\ No newline at end of file
......@@ -25,32 +25,244 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
## 开发指导
详细API含义可参考:[音频管理API文档AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8)
1. 使用createAudioRenderer()创建一个AudioRenderer实例。
在audioCapturerOptions中设置相关参数。该实例可用于音频渲染、控制和获取采集状态,以及注册通知回调。
在audioRendererOptions中设置相关参数。该实例可用于音频渲染、控制和获取渲染状态,以及注册通知回调。
```js
var audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}
import audio from '@ohos.multimedia.audio';
let audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}
let audioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH,
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
rendererFlags: 0 // 0是音频渲染器的扩展标志位,默认为0
}
let audioRendererOptions = {
streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo
}
let audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
console.log("Create audio renderer success.");
```
var audioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH,
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
rendererFlags: 1
}
2. 调用start()方法来启动/恢复播放任务。
```js
async function startRenderer() {
let state = audioRenderer.state;
// Renderer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一.
if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED ||
state != audio.AudioState.STATE_STOPPED) {
console.info('Renderer is not in a correct state to start');
return;
}
var audioRendererOptions = {
streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo
await audioRenderer.start();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_RUNNING) {
console.info('Renderer started');
} else {
console.error('Renderer start failed');
}
}
```
启动完成后,渲染器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。
3. 调用write()方法向缓冲区写入数据。
将需要播放的音频数据读入缓冲区,重复调用write()方法写入。
```js
import fileio from '@ohos.fileio';
async function writeBuffer(buf) {
let state = audioRenderer.state;
// 写入数据时,渲染器的状态必须为STATE_RUNNING
if (state != audio.AudioState.STATE_RUNNING) {
console.error('Renderer is not running, do not write');
this.isPlay = false;
return;
}
let writtenbytes = await audioRenderer.write(buf);
console.info('Actual written bytes: ' + writtenbytes);
if (writtenbytes < 0) {
console.error('Write buffer failed. check the state of renderer');
}
}
// 此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区)
const bufferSize = await audioRenderer.getBufferSize();
const path = '/data/file_example_WAV_2MG.wav'; // 需要渲染的音乐文件
let ss = fileio.createStreamSync(path, 'r');
const totalSize = fileio.statSync(path).size; // 音乐文件大小
let discardHeader = new ArrayBuffer(bufferSize);
ss.readSync(discardHeader);
let rlen = 0;
rlen += bufferSize;
let id = setInterval(() => {
if (this.isRelease) { // 如果渲染器状态为release,停止渲染
ss.closeSync();
stopRenderer();
clearInterval(id);
}
if (this.isPlay) {
if (rlen >= totalSize) { // 如果音频文件已经被读取完,停止渲染
ss.closeSync();
stopRenderer();
clearInterval(id);
}
let buf = new ArrayBuffer(bufferSize);
rlen += ss.readSync(buf);
console.info('Total bytes read from file: ' + rlen);
writeBuffer(buf);
} else {
console.info('check after next interval');
}
}, 30); // 定时器区间根据音频格式设置,单位为毫秒
```
4. (可选)调用pause()方法或stop()方法暂停/停止渲染音频数据。
```js
async function pauseRenderer() {
let state = audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING的时候才能暂停
if (state != audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running');
return;
}
await audioRenderer.pause();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_PAUSED) {
console.info('Renderer paused');
} else {
console.error('Renderer pause failed');
}
}
async function stopRenderer() {
let state = audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused');
return;
}
await audioRenderer.stop();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped');
} else {
console.error('Renderer stop failed');
}
}
```
5. (可选)调用drain()方法清空缓冲区。
```js
async function drainRenderer() {
let state = audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING的时候才能使用drain()
if (state != audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running');
return;
}
await audioRenderer.drain();
state = audioRenderer.state;
}
```
6. 任务完成,调用release()方法释放相关资源。
let audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
AudioRenderer会使用大量的系统资源,所以请确保完成相关任务后,进行资源释放。
```js
async function releaseRenderer() {
let state = audioRenderer.state;
// 渲染器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
console.info('Renderer already released');
return;
}
await audioRenderer.release();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_RELEASED) {
console.info('Renderer released');
} else {
console.info('Renderer release failed');
}
}
```
2. 使用on('interrupt')方法订阅音频中断事件。
7. (可选)获取渲染器相关信息
通过以下代码,可以获取渲染器的相关信息。
```js
// 获取当前渲染器状态
let state = audioRenderer.state;
// 获取渲染器信息
let audioRendererInfo : audio.AudioRendererInfo = await audioRenderer.getRendererInfo();
// 获取音频流信息
let audioStreamInfo : audio.AudioStreamInfo = await audioRenderer.getStreamInfo();
// 获取音频流ID
let audioStreamId : number = await audioRenderer.getAudioStreamId();
// 获取纳秒形式的Unix时间戳
let audioTime : number = await audioRenderer.getAudioTime();
// 获取合理的最小缓冲区大小
let bufferSize : number = await audioRenderer.getBuffersize();
// 获取渲染速率
let renderRate : audio.AudioRendererRate = await audioRenderer.getRendererRate();
```
8. (可选)设置渲染器相关信息
通过以下代码,可以设置渲染器的相关信息。
```js
// 设置渲染速率为正常速度
let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL;
await audioRenderer.setRenderRate(renderRate);
// 设置渲染器音频中断模式为SHARE_MODE
let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE;
await audioRenderer.setInterruptMode(interruptMode);
// 设置一个流的音量为10
let volume : number = 10;
await audioRenderer.setVolume(volume);
```
9. (可选)使用on('audioInterrupt')方法订阅渲染器音频中断事件,使用off('audioInterrupt')取消订阅事件。
当优先级更高或相等的Stream-B请求激活并使用输出设备时,Stream-A被中断。
......@@ -59,7 +271,7 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
在音频中断的情况下,应用可能会碰到音频数据写入失败的问题。所以建议不感知、不处理中断的应用在写入音频数据前,使用audioRenderer.state检查播放器状态。而订阅音频中断事件,可以获取到更多详细信息,具体可参考[InterruptEvent](../reference/apis/js-apis-audio.md#interruptevent9)
```js
audioRenderer.on('interrupt', (interruptEvent) => {
audioRenderer.on('audioInterrupt', (interruptEvent) => {
console.info('InterruptEvent Received');
console.info('InterruptType: ' + interruptEvent.eventType);
console.info('InterruptForceType: ' + interruptEvent.forceType);
......@@ -67,34 +279,28 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) {
switch (interruptEvent.hintType) {
// Force Pause: Action was taken by framework.
// Halt the write calls to avoid data loss.
// 音频框架发起的强制暂停操作,为防止数据丢失,此时应该停止数据的写操作
case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
isPlay = false;
break;
// Force Stop: Action was taken by framework.
// Halt the write calls to avoid data loss.
// 音频框架发起的强制停止操作,为防止数据丢失,此时应该停止数据的写操作
case audio.InterruptHint.INTERRUPT_HINT_STOP:
isPlay = false;
break;
// Force Duck: Action was taken by framework,
// just notifying the app that volume has been reduced.
// 音频框架发起的强制降低音量操作
case audio.InterruptHint.INTERRUPT_HINT_DUCK:
break;
// Force Unduck: Action was taken by framework,
// just notifying the app that volume has been restored.
// 音频框架发起的恢复音量操作
case audio.InterruptHint.INTERRUPT_HINT_UNDUCK:
break;
}
} else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) {
switch (interruptEvent.hintType) {
// Share Resume: Action is to be taken by App.
// Resume the force paused stream if required.
// 提醒App开始渲染
case audio.InterruptHint.INTERRUPT_HINT_RESUME:
startRenderer();
break;
// Share Pause: Stream has been interrupted,
// It can choose to pause or play concurrently.
// 提醒App音频流被中断,由App自主决定是否继续(此处选择暂停)
case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
isPlay = false;
pauseRenderer();
......@@ -102,137 +308,63 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
}
}
});
```
3. 调用start()方法来启动/恢复播放任务。
启动完成后,渲染器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。
```js
async function startRenderer() {
var state = audioRenderer.state;
// state should be prepared, paused or stopped.
if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED ||
state != audio.AudioState.STATE_STOPPED) {
console.info('Renderer is not in a correct state to start');
return;
}
await audioRenderer.start();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_RUNNING) {
console.info('Renderer started');
} else {
console.error('Renderer start failed');
}
}
audioRenderer.off('audioInterrupt'); // 取消音频中断事件的订阅,后续将无法监听到音频中断事件
```
4. 调用write()方法向缓冲区写入数据。
将需要播放的音频数据读入缓冲区,重复调用write()方法写入。
10. (可选)使用on('markReach')方法订阅渲染器标记到达事件,使用off('markReach')取消订阅事件。
```js
async function writeBuffer(buf) {
var state = audioRenderer.state;
if (state != audio.AudioState.STATE_RUNNING) {
console.error('Renderer is not running, do not write');
isPlay = false;
return;
}
let writtenbytes = await audioRenderer.write(buf);
console.info('Actual written bytes: ' + writtenbytes);
if (writtenbytes < 0) {
console.error('Write buffer failed. check the state of renderer');
}
}
注册markReach监听后,当渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。
// Reasonable minimum buffer size for renderer. However, the renderer can accept other read sizes as well.
const bufferSize = await audioRenderer.getBufferSize();
const path = '/data/file_example_WAV_2MG.wav';
let ss = fileio.createStreamSync(path, 'r');
const totalSize = 2146166; // file_example_WAV_2MG.wav
let rlen = 0;
let discardHeader = new ArrayBuffer(44);
ss.readSync(discardHeader);
rlen += 44;
var id = setInterval(() => {
if (isPlay || isRelease) {
if (rlen >= totalSize || isRelease) {
ss.closeSync();
stopRenderer();
clearInterval(id);
}
let buf = new ArrayBuffer(bufferSize);
rlen += ss.readSync(buf);
console.info('Total bytes read from file: ' + rlen);
writeBuffer(buf);
} else {
console.info('check after next interval');
}
} , 30); // interval to be set based on audio file format
```
```js
audioRenderer.on('markReach', (reachNumber) => {
console.info('Mark reach event Received');
console.info('The renderer reached frame: ' + reachNumber);
});
5. (可选)调用pause()方法或stop()方法暂停/停止渲染音频数据。
audioRenderer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件
```
```js
async function pauseRenderer() {
var state = audioRenderer.state;
if (state != audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running');
return;
}
await audioRenderer.pause();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_PAUSED) {
console.info('Renderer paused');
} else {
console.error('Renderer pause failed');
}
}
async function stopRenderer() {
var state = audioRenderer.state;
if (state != audio.AudioState.STATE_RUNNING || state != audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused');
return;
}
11. (可选)使用on('periodReach')方法订阅渲染器区间标记到达事件,使用off('periodReach')取消订阅事件。
await audioRenderer.stop();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped');
} else {
console.error('Renderer stop failed');
}
}
```
注册periodReach监听后,**每当**渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。
```js
audioRenderer.on('periodReach', (reachNumber) => {
console.info('Period reach event Received');
console.info('In this period, the renderer reached frame: ' + reachNumber);
});
6. 任务完成,调用release()方法释放相关资源。
audioRenderer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件
```
AudioRenderer会使用大量的系统资源,所以请确保完成相关任务后,进行资源释放
12. (可选)使用on('stateChange')方法订阅渲染器音频状态变化事件
```js
async function releaseRenderer() {
if (state_ == RELEASED || state_ == NEW) {
console.info('Resourced already released');
return;
}
注册stateChange监听后,当渲染器的状态发生改变时,会触发回调并返回当前渲染器的状态。
```js
audioRenderer.on('stateChange', (audioState) => {
console.info('State change event Received');
console.info('Current renderer state is: ' + audioState);
});
```
await audioRenderer.release();
13. (可选)对on()方法的异常处理。
state = audioRenderer.state;
if (state == STATE_RELEASED) {
console.info('Renderer released');
} else {
console.info('Renderer release failed');
}
}
```
\ No newline at end of file
在使用on()方法时,如果传入的字符串错误或传入的参数类型错误,程序会抛出异常,需要用try catch来捕获。
```js
try {
audioRenderer.on('invalidInput', () => { // 字符串不匹配
})
} catch (err) {
console.info(`Call on function error, ${err}`); // 程序抛出401异常
}
try {
audioRenderer.on(1, () => { // 入参类型错误
})
} catch (err) {
console.info(`Call on function error, ${err}`); // 程序抛出6800101异常
}
```
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册