提交 eb65b5aa 编写于 作者: J jiao_yanlin

Sample code problem modification

Signed-off-by: Njiao_yanlin <jiaoyanlin@huawei.com>
上级 06d61182
...@@ -21,38 +21,48 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -21,38 +21,48 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
## 约束与限制 ## 约束与限制
开发者在进行音频数据采集功能开发前,需要先对所开发的应用配置麦克风权限(ohos.permission.MICROPHONE),权限配置相关内容可参考:[访问控制授权申请指导](../security/accesstoken-guidelines.md) 开发者在进行音频数据采集功能开发前,需要先对所开发的应用配置麦克风权限(ohos.permission.MICROPHONE),配置方式请参见[访问控制授权申请](../security/accesstoken-guidelines.md#配置文件权限声明)
## 开发指导 ## 开发指导
详细API含义可参考:[音频管理API文档AudioCapturer](../reference/apis/js-apis-audio.md#audiocapturer8) 详细API含义可参考:[音频管理API文档AudioCapturer](../reference/apis/js-apis-audio.md#audiocapturer8)
1. 使用createAudioCapturer()创建一个AudioCapturer实例。 1. 使用createAudioCapturer()创建一个全局的AudioCapturer实例。
在audioCapturerOptions中设置音频采集器的相关参数。该实例可用于音频采集、控制和获取采集状态,以及注册通知回调。 在audioCapturerOptions中设置音频采集器的相关参数。该实例可用于音频采集、控制和获取采集状态,以及注册通知回调。
```js ```js
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs'; //便于步骤3 read函数调用
let audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, //音频渲染相关接口自测试
channels: audio.AudioChannel.CHANNEL_1, @Entry
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, @Component
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW struct AudioRenderer {
} @State message: string = 'Hello World'
private audioCapturer : audio.AudioCapturer; //供全局调用
let audioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_MIC, async initAudioCapturer(){
capturerFlags: 0 // 0是音频采集器的扩展标志位,默认为0 let audioStreamInfo = {
} samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
let audioCapturerOptions = { sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
streamInfo: audioStreamInfo, encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
capturerInfo: audioCapturerInfo }
}
let audioCapturerInfo = {
let audioCapturer = await audio.createAudioCapturer(audioCapturerOptions); source: audio.SourceType.SOURCE_TYPE_MIC,
console.log('AudioRecLog: Create audio capturer success.'); capturerFlags: 0 // 0是音频采集器的扩展标志位,默认为0
}
let audioCapturerOptions = {
streamInfo: audioStreamInfo,
capturerInfo: audioCapturerInfo
}
this.audioCapturer = await audio.createAudioCapturer(audioCapturerOptions);
console.log('AudioRecLog: Create audio capturer success.');
}
``` ```
2. 调用start()方法来启动/恢复采集任务。 2. 调用start()方法来启动/恢复采集任务。
...@@ -60,23 +70,18 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -60,23 +70,18 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
启动完成后,采集器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。 启动完成后,采集器状态将变更为STATE_RUNNING,然后应用可以开始读取缓冲区。
```js ```js
import audio from '@ohos.multimedia.audio'; async startCapturer() {
let state = this.audioCapturer.state;
async function startCapturer() {
let state = audioCapturer.state;
// Capturer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一. // Capturer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一.
if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED || if (state == audio.AudioState.STATE_PREPARED || state == audio.AudioState.STATE_PAUSED ||
state != audio.AudioState.STATE_STOPPED) { state == audio.AudioState.STATE_STOPPED) {
console.info('Capturer is not in a correct state to start'); await this.audioCapturer.start();
return; state = this.audioCapturer.state;
} if (state == audio.AudioState.STATE_RUNNING) {
await audioCapturer.start(); console.info('AudioRecLog: Capturer started');
} else {
state = audioCapturer.state; console.error('AudioRecLog: Capturer start failed');
if (state == audio.AudioState.STATE_RUNNING) { }
console.info('AudioRecLog: Capturer started');
} else {
console.error('AudioRecLog: Capturer start failed');
} }
} }
``` ```
...@@ -86,91 +91,88 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -86,91 +91,88 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
参考以下示例,将采集到的数据写入文件。 参考以下示例,将采集到的数据写入文件。
```js ```js
import fs from '@ohos.file.fs'; async readData(){
let state = this.audioCapturer.state;
let state = audioCapturer.state; // 只有状态为STATE_RUNNING的时候才可以read.
// 只有状态为STATE_RUNNING的时候才可以read. if (state != audio.AudioState.STATE_RUNNING) {
if (state != audio.AudioState.STATE_RUNNING) { console.info('Capturer is not in a correct state to read');
console.info('Capturer is not in a correct state to read'); return;
return;
}
const path = '/data/data/.pulse_dir/capture_js.wav'; // 采集到的音频文件存储路径
let file = fs.openSync(filePath, 0o2);
let fd = file.fd;
if (file !== null) {
console.info('AudioRecLog: file created');
} else {
console.info('AudioRecLog: file create : FAILED');
return;
}
if (fd !== null) {
console.info('AudioRecLog: file fd opened in append mode');
}
let numBuffersToCapture = 150; // 循环写入150次
let count = 0;
while (numBuffersToCapture) {
let bufferSize = await audioCapturer.getBufferSize();
let buffer = await audioCapturer.read(bufferSize, true);
let options = {
offset: count * this.bufferSize,
length: this.bufferSize
} }
if (typeof(buffer) == undefined) { const path = '/data/data/.pulse_dir/capture_js.wav'; // 采集到的音频文件存储路径
console.info('AudioRecLog: read buffer failed'); let file = fs.openSync(path, 0o2);
let fd = file.fd;
if (file !== null) {
console.info('AudioRecLog: file created');
} else { } else {
let number = fs.writeSync(fd, buffer, options); console.info('AudioRecLog: file create : FAILED');
console.info(`AudioRecLog: data written: ${number}`); return;
} }
numBuffersToCapture--; if (fd !== null) {
count++; console.info('AudioRecLog: file fd opened in append mode');
}
let numBuffersToCapture = 150; // 循环写入150次
let count = 0;
while (numBuffersToCapture) {
this.bufferSize = await this.audioCapturer.getBufferSize();
let buffer = await this.audioCapturer.read(this.bufferSize, true);
let options = {
offset: count * this.bufferSize,
length: this.bufferSize
}
if (typeof(buffer) == undefined) {
console.info('AudioRecLog: read buffer failed');
} else {
let number = fs.writeSync(fd, buffer, options);
console.info(`AudioRecLog: data written: ${number}`);
}
numBuffersToCapture--;
count++;
}
} }
``` ```
4. 采集完成后,调用stop方法,停止录制。 4. 采集完成后,调用stop方法,停止录制。
```js ```js
async function StopCapturer() { async StopCapturer() {
let state = audioCapturer.state; let state = this.audioCapturer.state;
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
console.info('AudioRecLog: Capturer is not running or paused'); console.info('AudioRecLog: Capturer is not running or paused');
return; return;
} }
await audioCapturer.stop(); await this.audioCapturer.stop();
state = audioCapturer.state; state = this.audioCapturer.state;
if (state == audio.AudioState.STATE_STOPPED) { if (state == audio.AudioState.STATE_STOPPED) {
console.info('AudioRecLog: Capturer stopped'); console.info('AudioRecLog: Capturer stopped');
} else { } else {
console.error('AudioRecLog: Capturer stop failed'); console.error('AudioRecLog: Capturer stop failed');
} }
} }
``` ```
5. 任务结束,调用release()方法释放相关资源。 5. 任务结束,调用release()方法释放相关资源。
```js ```js
async function releaseCapturer() { async releaseCapturer() {
let state = audioCapturer.state; let state = this.audioCapturer.state;
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
console.info('AudioRecLog: Capturer already released'); console.info('AudioRecLog: Capturer already released');
return; return;
} }
await audioCapturer.release(); await this.audioCapturer.release();
state = audioCapturer.state; state = this.audioCapturer.state;
if (state == audio.AudioState.STATE_RELEASED) { if (state == audio.AudioState.STATE_RELEASED) {
console.info('AudioRecLog: Capturer released'); console.info('AudioRecLog: Capturer released');
} else { } else {
console.info('AudioRecLog: Capturer release failed'); console.info('AudioRecLog: Capturer release failed');
} }
} }
``` ```
6. (可选)获取采集器相关信息 6. (可选)获取采集器相关信息
...@@ -178,23 +180,20 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -178,23 +180,20 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
通过以下代码,可以获取采集器的相关信息。 通过以下代码,可以获取采集器的相关信息。
```js ```js
// 获取当前采集器状态 async getAudioCapturerInfo(){
let state = audioCapturer.state; // 获取当前采集器状态
let state = this.audioCapturer.state;
// 获取采集器信息 // 获取采集器信息
let audioCapturerInfo : audio.AuduioCapturerInfo = await audioCapturer.getCapturerInfo(); let audioCapturerInfo : audio.AudioCapturerInfo = await this.audioCapturer.getCapturerInfo();
// 获取音频流信息
// 获取音频流信息 let audioStreamInfo : audio.AudioStreamInfo = await this.audioCapturer.getStreamInfo();
let audioStreamInfo : audio.AudioStreamInfo = await audioCapturer.getStreamInfo(); // 获取音频流ID
let audioStreamId : number = await this.audioCapturer.getAudioStreamId();
// 获取音频流ID // 获取纳秒形式的Unix时间戳
let audioStreamId : number = await audioCapturer.getAudioStreamId(); let audioTime : number = await this.audioCapturer.getAudioTime();
// 获取合理的最小缓冲区大小
// 获取纳秒形式的Unix时间戳 let bufferSize : number = await this.audioCapturer.getBufferSize();
let audioTime : number = await audioCapturer.getAudioTime(); }
// 获取合理的最小缓冲区大小
let bufferSize : number = await audioCapturer.getBufferSize();
``` ```
7. (可选)使用on('markReach')方法订阅采集器标记到达事件,使用off('markReach')取消订阅事件。 7. (可选)使用on('markReach')方法订阅采集器标记到达事件,使用off('markReach')取消订阅事件。
...@@ -202,12 +201,13 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -202,12 +201,13 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
注册markReach监听后,当采集器采集的帧数到达设定值时,会触发回调并返回设定的值。 注册markReach监听后,当采集器采集的帧数到达设定值时,会触发回调并返回设定的值。
```js ```js
audioCapturer.on('markReach', (reachNumber) => { async markReach(){
console.info('Mark reach event Received'); this.audioCapturer.on('markReach', 10, (reachNumber) => {
console.info(`The Capturer reached frame: ${reachNumber}`); console.info('Mark reach event Received');
}); console.info(`The Capturer reached frame: ${reachNumber}`);
});
audioCapturer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 this.audioCapturer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件
}
``` ```
8. (可选)使用on('periodReach')方法订阅采集器区间标记到达事件,使用off('periodReach')取消订阅事件。 8. (可选)使用on('periodReach')方法订阅采集器区间标记到达事件,使用off('periodReach')取消订阅事件。
...@@ -215,40 +215,43 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以 ...@@ -215,40 +215,43 @@ AudioCapturer提供了用于获取原始音频文件的方法。开发者可以
注册periodReach监听后,**每当**采集器采集的帧数到达设定值时,会触发回调并返回设定的值。 注册periodReach监听后,**每当**采集器采集的帧数到达设定值时,会触发回调并返回设定的值。
```js ```js
audioCapturer.on('periodReach', (reachNumber) => { async periodReach(){
console.info('Period reach event Received'); this.audioCapturer.on('periodReach', 10, (reachNumber) => {
console.info(`In this period, the Capturer reached frame: ${reachNumber}`); console.info('Period reach event Received');
}); console.info(`In this period, the Capturer reached frame: ${reachNumber}`);
});
audioCapturer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 this.audioCapturer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件
}
``` ```
9. 如果应用需要在采集器状态更新时进行一些操作,可以订阅该事件,当采集器状态更新时,会受到一个包含有事件类型的回调。 9. 如果应用需要在采集器状态更新时进行一些操作,可以订阅该事件,当采集器状态更新时,会受到一个包含有事件类型的回调。
```js ```js
audioCapturer.on('stateChange', (state) => { async stateChange(){
console.info(`AudioCapturerLog: Changed State to : ${state}`) this.audioCapturer.on('stateChange', (state) => {
switch (state) { console.info(`AudioCapturerLog: Changed State to : ${state}`)
case audio.AudioState.STATE_PREPARED: switch (state) {
console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------'); case audio.AudioState.STATE_PREPARED:
console.info('Audio State is : Prepared'); console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------');
break; console.info('Audio State is : Prepared');
case audio.AudioState.STATE_RUNNING: break;
console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------'); case audio.AudioState.STATE_RUNNING:
console.info('Audio State is : Running'); console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------');
break; console.info('Audio State is : Running');
case audio.AudioState.STATE_STOPPED: break;
console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------'); case audio.AudioState.STATE_STOPPED:
console.info('Audio State is : stopped'); console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------');
break; console.info('Audio State is : stopped');
case audio.AudioState.STATE_RELEASED: break;
console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------'); case audio.AudioState.STATE_RELEASED:
console.info('Audio State is : released'); console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------');
break; console.info('Audio State is : released');
default: break;
console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------'); default:
console.info('Audio State is : invalid'); console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------');
break; console.info('Audio State is : invalid');
} break;
}); }
});
}
``` ```
\ No newline at end of file
...@@ -28,47 +28,59 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -28,47 +28,59 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
详细API含义可参考:[音频管理API文档AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8) 详细API含义可参考:[音频管理API文档AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8)
1. 使用createAudioRenderer()创建一个AudioRenderer实例 1. 使用createAudioRenderer()创建一个全局的AudioRenderer实例,以便后续步骤使用
在audioRendererOptions中设置相关参数。该实例可用于音频渲染、控制和获取渲染状态,以及注册通知回调。 在audioRendererOptions中设置相关参数。该实例可用于音频渲染、控制和获取渲染状态,以及注册通知回调。
```js ```js
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
let audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, //音频渲染相关接口自测试
channels: audio.AudioChannel.CHANNEL_1, @Entry
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, @Component
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW struct AudioRenderer1129 {
} private audioRenderer: audio.AudioRenderer;
let audioRendererInfo = { private bufferSize;//便于步骤3 write函数调用使用
content: audio.ContentType.CONTENT_TYPE_SPEECH, private audioRenderer1: audio.AudioRenderer; //便于步骤14 完整示例调用使用
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, private audioRenderer2: audio.AudioRenderer; //便于步骤14 完整示例调用使用
rendererFlags: 0 // 0是音频渲染器的扩展标志位,默认为0
} async initAudioRender(){
let audioRendererOptions = { let audioStreamInfo = {
streamInfo: audioStreamInfo, samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
rendererInfo: audioRendererInfo channels: audio.AudioChannel.CHANNEL_1,
} sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
let audioRenderer = await audio.createAudioRenderer(audioRendererOptions); }
console.log("Create audio renderer success."); let audioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH,
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
rendererFlags: 0 // 0是音频渲染器的扩展标志位,默认为0
}
let audioRendererOptions = {
streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo
}
this.audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
console.log("Create audio renderer success.");
}
}
``` ```
2. 调用start()方法来启动/恢复播放任务。 2. 调用start()方法来启动/恢复播放任务。
```js ```js
async function startRenderer() { async startRenderer() {
let state = audioRenderer.state; let state = this.audioRenderer.state;
// Renderer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一. // Renderer start时的状态应该是STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一.
if (state != audio.AudioState.STATE_PREPARED && state != audio.AudioState.STATE_PAUSED && if (state != audio.AudioState.STATE_PREPARED && state != audio.AudioState.STATE_PAUSED &&
state != audio.AudioState.STATE_STOPPED) { state != audio.AudioState.STATE_STOPPED) {
console.info('Renderer is not in a correct state to start'); console.info('Renderer is not in a correct state to start');
return; return;
} }
await audioRenderer.start(); await this.audioRenderer.start();
state = audioRenderer.state; state = this.audioRenderer.state;
if (state == audio.AudioState.STATE_RUNNING) { if (state == audio.AudioState.STATE_RUNNING) {
console.info('Renderer started'); console.info('Renderer started');
} else { } else {
...@@ -81,112 +93,97 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -81,112 +93,97 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
3. 调用write()方法向缓冲区写入数据。 3. 调用write()方法向缓冲区写入数据。
将需要播放的音频数据读入缓冲区,重复调用write()方法写入。 将需要播放的音频数据读入缓冲区,重复调用write()方法写入。请注意引入“import fs from '@ohos.file.fs';”,具体请参考步骤1。
```js ```js
import fs from '@ohos.file.fs'; async writeData(){
import audio from '@ohos.multimedia.audio'; // 此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区)
this.bufferSize = await this.audioRenderer.getBufferSize();
async function writeBuffer(buf) { let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径
// 写入数据时,渲染器的状态必须为STATE_RUNNING const filePath = dir + '/file_example_WAV_2MG.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav
if (audioRenderer.state != audio.AudioState.STATE_RUNNING) { console.info(`file filePath: ${ filePath}`);
console.error('Renderer is not running, do not write');
return; let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
} let stat = await fs.stat(filePath); //音乐文件信息
let writtenbytes = await audioRenderer.write(buf); let buf = new ArrayBuffer(this.bufferSize);
console.info(`Actual written bytes: ${writtenbytes} `); let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
if (writtenbytes < 0) { for (let i = 0;i < len; i++) {
console.error('Write buffer failed. check the state of renderer'); let options = {
} offset: i * this.bufferSize,
} length: this.bufferSize
}
// 此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区) let readsize = await fs.read(file.fd, buf, options)
const bufferSize = await audioRenderer.getBufferSize(); let writeSize = await new Promise((resolve,reject)=>{
let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 this.audioRenderer.write(buf,(err,writeSize)=>{
const filePath = dir + '/file_example_WAV_2MG.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav if(err){
console.info(`file filePath: ${ filePath}`); reject(err)
}else{
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); resolve(writeSize)
let stat = await fs.stat(filePath); //音乐文件信息 }
let buf = new ArrayBuffer(bufferSize); })
let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
for (let i = 0;i < len; i++) {
let options = {
offset: i * this.bufferSize,
length: this.bufferSize
}
let readsize = await fs.read(file.fd, buf, options)
let writeSize = await new Promise((resolve,reject)=>{
this.audioRenderer.write(buf,(err,writeSize)=>{
if(err){
reject(err)
}else{
resolve(writeSize)
}
}) })
}) }
fs.close(file)
await this.audioRenderer.stop(); //停止渲染
await this.audioRenderer.release(); //释放资源
} }
fs.close(file)
await audioRenderer.stop(); //停止渲染
await audioRenderer.release(); //释放资源
``` ```
4. (可选)调用pause()方法或stop()方法暂停/停止渲染音频数据。 4. (可选)调用pause()方法或stop()方法暂停/停止渲染音频数据。
```js ```js
async function pauseRenderer() { async pauseRenderer() {
let state = audioRenderer.state; let state = this.audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING的时候才能暂停 // 只有渲染器状态为STATE_RUNNING的时候才能暂停
if (state != audio.AudioState.STATE_RUNNING) { if (state != audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running'); console.info('Renderer is not running');
return; return;
} }
await audioRenderer.pause(); await this.audioRenderer.pause();
state = audioRenderer.state; state = this.audioRenderer.state;
if (state == audio.AudioState.STATE_PAUSED) { if (state == audio.AudioState.STATE_PAUSED) {
console.info('Renderer paused'); console.info('Renderer paused');
} else { } else {
console.error('Renderer pause failed'); console.error('Renderer pause failed');
} }
} }
async function stopRenderer() { async stopRenderer() {
let state = audioRenderer.state; let state = this.audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) { if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused'); console.info('Renderer is not running or paused');
return; return;
} }
await audioRenderer.stop(); await this.audioRenderer.stop();
state = audioRenderer.state; state = this.audioRenderer.state;
if (state == audio.AudioState.STATE_STOPPED) { if (state == audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped'); console.info('Renderer stopped');
} else { } else {
console.error('Renderer stop failed'); console.error('Renderer stop failed');
} }
} }
``` ```
5. (可选)调用drain()方法清空缓冲区。 5. (可选)调用drain()方法清空缓冲区。
```js ```js
async function drainRenderer() { async drainRenderer() {
let state = audioRenderer.state; let state = this.audioRenderer.state;
// 只有渲染器状态为STATE_RUNNING的时候才能使用drain() // 只有渲染器状态为STATE_RUNNING的时候才能使用drain()
if (state != audio.AudioState.STATE_RUNNING) { if (state != audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running'); console.info('Renderer is not running');
return; return;
}
await audioRenderer.drain();
state = audioRenderer.state;
} }
await this.audioRenderer.drain();
state = this.audioRenderer.state;
}
``` ```
6. 任务完成,调用release()方法释放相关资源。 6. 任务完成,调用release()方法释放相关资源。
...@@ -194,23 +191,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -194,23 +191,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
AudioRenderer会使用大量的系统资源,所以请确保完成相关任务后,进行资源释放。 AudioRenderer会使用大量的系统资源,所以请确保完成相关任务后,进行资源释放。
```js ```js
async function releaseRenderer() { async releaseRenderer() {
let state = audioRenderer.state; let state = this.audioRenderer.state;
// 渲染器状态不是STATE_RELEASED或STATE_NEW状态,才能release // 渲染器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) { if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
console.info('Renderer already released'); console.info('Renderer already released');
return; return;
} }
await this.audioRenderer.release();
await audioRenderer.release();
state = audioRenderer.state; state = this.audioRenderer.state;
if (state == audio.AudioState.STATE_RELEASED) { if (state == audio.AudioState.STATE_RELEASED) {
console.info('Renderer released'); console.info('Renderer released');
} else { } else {
console.info('Renderer release failed'); console.info('Renderer release failed');
}
} }
}
``` ```
7. (可选)获取渲染器相关信息 7. (可选)获取渲染器相关信息
...@@ -218,26 +214,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -218,26 +214,22 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
通过以下代码,可以获取渲染器的相关信息。 通过以下代码,可以获取渲染器的相关信息。
```js ```js
// 获取当前渲染器状态 async getRenderInfo(){
let state = audioRenderer.state; // 获取当前渲染器状态
let state = this.audioRenderer.state;
// 获取渲染器信息 // 获取渲染器信息
let audioRendererInfo : audio.AudioRendererInfo = await audioRenderer.getRendererInfo(); let audioRendererInfo : audio.AudioRendererInfo = await this.audioRenderer.getRendererInfo();
// 获取音频流信息
// 获取音频流信息 let audioStreamInfo : audio.AudioStreamInfo = await this.audioRenderer.getStreamInfo();
let audioStreamInfo : audio.AudioStreamInfo = await audioRenderer.getStreamInfo(); // 获取音频流ID
let audioStreamId : number = await this.audioRenderer.getAudioStreamId();
// 获取音频流ID // 获取纳秒形式的Unix时间戳
let audioStreamId : number = await audioRenderer.getAudioStreamId(); let audioTime : number = await this.audioRenderer.getAudioTime();
// 获取合理的最小缓冲区大小
// 获取纳秒形式的Unix时间戳 let bufferSize : number = await this.audioRenderer.getBufferSize();
let audioTime : number = await audioRenderer.getAudioTime(); // 获取渲染速率
let renderRate : audio.AudioRendererRate = await this.audioRenderer.getRenderRate();
// 获取合理的最小缓冲区大小 }
let bufferSize : number = await audioRenderer.getBufferSize();
// 获取渲染速率
let renderRate : audio.AudioRendererRate = await audioRenderer.getRenderRate();
``` ```
8. (可选)设置渲染器相关信息 8. (可选)设置渲染器相关信息
...@@ -245,17 +237,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -245,17 +237,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
通过以下代码,可以设置渲染器的相关信息。 通过以下代码,可以设置渲染器的相关信息。
```js ```js
// 设置渲染速率为正常速度 async setAudioRenderInfo(){
let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL; // 设置渲染速率为正常速度
await audioRenderer.setRenderRate(renderRate); let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL;
await this.audioRenderer.setRenderRate(renderRate);
// 设置渲染器音频中断模式为SHARE_MODE // 设置渲染器音频中断模式为SHARE_MODE
let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE; let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE;
await audioRenderer.setInterruptMode(interruptMode); await this.audioRenderer.setInterruptMode(interruptMode);
// 设置一个流的音量为0.5
// 设置一个流的音量为0.5 let volume : number = 0.5;
let volume : number = 0.5; await this.audioRenderer.setVolume(volume);
await audioRenderer.setVolume(volume); }
``` ```
9. (可选)使用on('audioInterrupt')方法订阅渲染器音频中断事件,使用off('audioInterrupt')取消订阅事件。 9. (可选)使用on('audioInterrupt')方法订阅渲染器音频中断事件,使用off('audioInterrupt')取消订阅事件。
...@@ -269,45 +261,45 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -269,45 +261,45 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
需要说明的是,本模块的订阅音频中断事件与[AudioManager](../reference/apis/js-apis-audio.md#audiomanager)模块中的on('interrupt')稍有不同。自api9以来,on('interrupt')和off('interrupt')均被废弃。在AudioRenderer模块,当开发者需要监听焦点变化事件时,只需要调用on('audioInterrupt')函数,当应用内部的AudioRenderer对象在start\stop\pause等动作发生时,会主动请求焦点,从而发生焦点转移,相关的AudioRenderer对象即可获取到对应的回调信息。但对除AudioRenderer的其他对象,例如FM、语音唤醒等,应用不会创建对象,此时可调用AudioManager中的on('interrupt')获取焦点变化通知。 需要说明的是,本模块的订阅音频中断事件与[AudioManager](../reference/apis/js-apis-audio.md#audiomanager)模块中的on('interrupt')稍有不同。自api9以来,on('interrupt')和off('interrupt')均被废弃。在AudioRenderer模块,当开发者需要监听焦点变化事件时,只需要调用on('audioInterrupt')函数,当应用内部的AudioRenderer对象在start\stop\pause等动作发生时,会主动请求焦点,从而发生焦点转移,相关的AudioRenderer对象即可获取到对应的回调信息。但对除AudioRenderer的其他对象,例如FM、语音唤醒等,应用不会创建对象,此时可调用AudioManager中的on('interrupt')获取焦点变化通知。
```js ```js
audioRenderer.on('audioInterrupt', (interruptEvent) => { async subscribeAudioRender(){
console.info('InterruptEvent Received'); this.audioRenderer.on('audioInterrupt', (interruptEvent) => {
console.info(`InterruptType: ${interruptEvent.eventType}`); console.info('InterruptEvent Received');
console.info(`InterruptForceType: ${interruptEvent.forceType}`); console.info(`InterruptType: ${interruptEvent.eventType}`);
console.info(`AInterruptHint: ${interruptEvent.hintType}`); console.info(`InterruptForceType: ${interruptEvent.forceType}`);
console.info(`AInterruptHint: ${interruptEvent.hintType}`);
if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) {
switch (interruptEvent.hintType) { if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) {
switch (interruptEvent.hintType) {
// 音频框架发起的强制暂停操作,为防止数据丢失,此时应该停止数据的写操作 // 音频框架发起的强制暂停操作,为防止数据丢失,此时应该停止数据的写操作
case audio.InterruptHint.INTERRUPT_HINT_PAUSE: case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
isPlay = false; console.info('isPlay is false');
break; break;
// 音频框架发起的强制停止操作,为防止数据丢失,此时应该停止数据的写操作 // 音频框架发起的强制停止操作,为防止数据丢失,此时应该停止数据的写操作
case audio.InterruptHint.INTERRUPT_HINT_STOP: case audio.InterruptHint.INTERRUPT_HINT_STOP:
isPlay = false; console.info('isPlay is false');
break; break;
// 音频框架发起的强制降低音量操作 // 音频框架发起的强制降低音量操作
case audio.InterruptHint.INTERRUPT_HINT_DUCK: case audio.InterruptHint.INTERRUPT_HINT_DUCK:
break; break;
// 音频框架发起的恢复音量操作 // 音频框架发起的恢复音量操作
case audio.InterruptHint.INTERRUPT_HINT_UNDUCK: case audio.InterruptHint.INTERRUPT_HINT_UNDUCK:
break; break;
} }
} else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) { } else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) {
switch (interruptEvent.hintType) { switch (interruptEvent.hintType) {
// 提醒App开始渲染 // 提醒App开始渲染
case audio.InterruptHint.INTERRUPT_HINT_RESUME: case audio.InterruptHint.INTERRUPT_HINT_RESUME:
startRenderer(); this.startRenderer();
break; break;
// 提醒App音频流被中断,由App自主决定是否继续(此处选择暂停) // 提醒App音频流被中断,由App自主决定是否继续(此处选择暂停)
case audio.InterruptHint.INTERRUPT_HINT_PAUSE: case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
isPlay = false; console.info('isPlay is false');
pauseRenderer(); this.pauseRenderer();
break; break;
}
} }
} });
}); }
audioRenderer.off('audioInterrupt'); // 取消音频中断事件的订阅,后续将无法监听到音频中断事件
``` ```
10. (可选)使用on('markReach')方法订阅渲染器标记到达事件,使用off('markReach')取消订阅事件。 10. (可选)使用on('markReach')方法订阅渲染器标记到达事件,使用off('markReach')取消订阅事件。
...@@ -315,12 +307,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -315,12 +307,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
注册markReach监听后,当渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。 注册markReach监听后,当渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。
```js ```js
audioRenderer.on('markReach', (reachNumber) => { async markReach(){
console.info('Mark reach event Received'); this.audioRenderer.on('markReach', 50, (position) => {
console.info(`The renderer reached frame: ${reachNumber}`); if (position == 50) {
}); console.info('ON Triggered successfully');
}
audioRenderer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件 });
this.audioRenderer.off('markReach'); // 取消markReach事件的订阅,后续将无法监听到“标记到达”事件
}
``` ```
11. (可选)使用on('periodReach')方法订阅渲染器区间标记到达事件,使用off('periodReach')取消订阅事件。 11. (可选)使用on('periodReach')方法订阅渲染器区间标记到达事件,使用off('periodReach')取消订阅事件。
...@@ -328,12 +322,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -328,12 +322,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
注册periodReach监听后,**每当**渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。 注册periodReach监听后,**每当**渲染器渲染的帧数到达设定值时,会触发回调并返回设定的值。
```js ```js
audioRenderer.on('periodReach', (reachNumber) => { async periodReach(){
console.info('Period reach event Received'); this.audioRenderer.on('periodReach',10, (reachNumber) => {
console.info(`In this period, the renderer reached frame: ${reachNumber} `); console.info(`In this period, the renderer reached frame: ${reachNumber} `);
}); });
audioRenderer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件 this.audioRenderer.off('periodReach'); // 取消periodReach事件的订阅,后续将无法监听到“区间标记到达”事件
}
``` ```
12. (可选)使用on('stateChange')方法订阅渲染器音频状态变化事件。 12. (可选)使用on('stateChange')方法订阅渲染器音频状态变化事件。
...@@ -341,10 +336,12 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -341,10 +336,12 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
注册stateChange监听后,当渲染器的状态发生改变时,会触发回调并返回当前渲染器的状态。 注册stateChange监听后,当渲染器的状态发生改变时,会触发回调并返回当前渲染器的状态。
```js ```js
audioRenderer.on('stateChange', (audioState) => { async stateChange(){
console.info('State change event Received'); this.audioRenderer.on('stateChange', (audioState) => {
console.info(`Current renderer state is: ${audioState}`); console.info('State change event Received');
}); console.info(`Current renderer state is: ${audioState}`);
});
}
``` ```
13. (可选)对on()方法的异常处理。 13. (可选)对on()方法的异常处理。
...@@ -352,21 +349,24 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -352,21 +349,24 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
在使用on()方法时,如果传入的字符串错误或传入的参数类型错误,程序会抛出异常,需要用try catch来捕获。 在使用on()方法时,如果传入的字符串错误或传入的参数类型错误,程序会抛出异常,需要用try catch来捕获。
```js ```js
try { async errorCall(){
audioRenderer.on('invalidInput', () => { // 字符串不匹配 try {
}) this.audioRenderer.on('invalidInput', () => { // 字符串不匹配
} catch (err) { })
console.info(`Call on function error, ${err}`); // 程序抛出401异常 } catch (err) {
} console.info(`Call on function error, ${err}`); // 程序抛出401异常
try { }
audioRenderer.on(1, () => { // 入参类型错误 try {
}) this.audioRenderer.on(1, () => { // 入参类型错误
} catch (err) { })
console.info(`Call on function error, ${err}`); // 程序抛出6800101异常 } catch (err) {
console.info(`Call on function error, ${err}`); // 程序抛出6800101异常
}
} }
``` ```
14. (可选)on('audioInterrupt')方法完整示例。 14. (可选)on('audioInterrupt')方法完整示例。
请注意:在调用前声明audioRenderer1与audioRenderer2对象,具体请参考步骤1。
同一个应用中的AudioRender1和AudioRender2在创建时均设置了焦点模式为独立,并且调用on('audioInterrupt')监听焦点变化。刚开始AudioRender1拥有焦点,当AudioRender2获取到焦点时,audioRenderer1将收到焦点转移的通知,打印相关日志。如果AudioRender1和AudioRender2不将焦点模式设置为独立,则监听处理中的日志在应用运行过程中永远不会被打印。 同一个应用中的AudioRender1和AudioRender2在创建时均设置了焦点模式为独立,并且调用on('audioInterrupt')监听焦点变化。刚开始AudioRender1拥有焦点,当AudioRender2获取到焦点时,audioRenderer1将收到焦点转移的通知,打印相关日志。如果AudioRender1和AudioRender2不将焦点模式设置为独立,则监听处理中的日志在应用运行过程中永远不会被打印。
```js ```js
async runningAudioRender1(){ async runningAudioRender1(){
...@@ -385,31 +385,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -385,31 +385,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
streamInfo: audioStreamInfo, streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo rendererInfo: audioRendererInfo
} }
//1.1 创建对象 //1.1 创建对象
audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions); this.audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions);
console.info("Create audio renderer 1 success."); console.info("Create audio renderer 1 success.");
//1.2 设置焦点模式为独立模式 :1 //1.2 设置焦点模式为独立模式 :1
audioRenderer1.setInterruptMode(1).then( data => { this.audioRenderer1.setInterruptMode(1).then( data => {
console.info('audioRenderer1 setInterruptMode Success!'); console.info('audioRenderer1 setInterruptMode Success!');
}).catch((err) => { }).catch((err) => {
console.error(`audioRenderer1 setInterruptMode Fail: ${err}`); console.error(`audioRenderer1 setInterruptMode Fail: ${err}`);
}); });
//1.3 设置监听 //1.3 设置监听
audioRenderer1.on('audioInterrupt', async(interruptEvent) => { this.audioRenderer1.on('audioInterrupt', async(interruptEvent) => {
console.info(`audioRenderer1 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) console.info(`audioRenderer1 on audioInterrupt : ${JSON.stringify(interruptEvent)}`)
}); });
//1.4 启动渲染 //1.4 启动渲染
await audioRenderer1.start(); await this.audioRenderer1.start();
console.info('startAudioRender1 success'); console.info('startAudioRender1 success');
//1.5 获取缓存区大小,此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区) //1.5 获取缓存区大小,此处是渲染器的合理的最小缓冲区大小(也可以选择其它大小的缓冲区)
const bufferSize = await audioRenderer1.getBufferSize(); const bufferSize = await this.audioRenderer1.getBufferSize();
console.info(`audio bufferSize: ${bufferSize}`); console.info(`audio bufferSize: ${bufferSize}`);
//1.6 获取原始音频数据文件 //1.6 获取原始音频数据文件
let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径
const path1 = dir + '/music001_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music001_48000_32_1.wav const path1 = dir + '/music001_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music001_48000_32_1.wav
...@@ -418,14 +418,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -418,14 +418,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
let stat = await fs.stat(path1); //音乐文件信息 let stat = await fs.stat(path1); //音乐文件信息
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
//1.7 通过audioRender对缓存区的原始音频数据进行渲染 //1.7 通过audioRender对缓存区的原始音频数据进行渲染
for (let i = 0;i < len; i++) { for (let i = 0;i < len; i++) {
let options = { let options = {
offset: i * this.bufferSize, offset: i * this.bufferSize,
length: this.bufferSize length: this.bufferSize
} }
let readsize = await fs.read(file.fd, buf, options) let readsize = await fs.read(file1.fd, buf, options)
let writeSize = await new Promise((resolve,reject)=>{ let writeSize = await new Promise((resolve,reject)=>{
this.audioRenderer1.write(buf,(err,writeSize)=>{ this.audioRenderer1.write(buf,(err,writeSize)=>{
if(err){ if(err){
...@@ -434,13 +434,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -434,13 +434,13 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
resolve(writeSize) resolve(writeSize)
} }
}) })
}) })
} }
fs.close(file1) fs.close(file1)
await audioRenderer1.stop(); //停止渲染 await this.audioRenderer1.stop(); //停止渲染
await audioRenderer1.release(); //释放资源 await this.audioRenderer1.release(); //释放资源
} }
async runningAudioRender2(){ async runningAudioRender2(){
let audioStreamInfo = { let audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000,
...@@ -457,31 +457,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -457,31 +457,31 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
streamInfo: audioStreamInfo, streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo rendererInfo: audioRendererInfo
} }
//2.1 创建对象 //2.1 创建对象
audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions); this.audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions);
console.info("Create audio renderer 2 success."); console.info("Create audio renderer 2 success.");
//2.2 设置焦点模式为独立模式 :1 //2.2 设置焦点模式为独立模式 :1
audioRenderer2.setInterruptMode(1).then( data => { this.audioRenderer2.setInterruptMode(1).then( data => {
console.info('audioRenderer2 setInterruptMode Success!'); console.info('audioRenderer2 setInterruptMode Success!');
}).catch((err) => { }).catch((err) => {
console.error(`audioRenderer2 setInterruptMode Fail: ${err}`); console.error(`audioRenderer2 setInterruptMode Fail: ${err}`);
}); });
//2.3 设置监听 //2.3 设置监听
audioRenderer2.on('audioInterrupt', async(interruptEvent) => { this.audioRenderer2.on('audioInterrupt', async(interruptEvent) => {
console.info(`audioRenderer2 on audioInterrupt : ${JSON.stringify(interruptEvent)}`) console.info(`audioRenderer2 on audioInterrupt : ${JSON.stringify(interruptEvent)}`)
}); });
//2.4 启动渲染 //2.4 启动渲染
await audioRenderer2.start(); await this.audioRenderer2.start();
console.info('startAudioRender2 success'); console.info('startAudioRender2 success');
//2.5 获取缓存区大小 //2.5 获取缓存区大小
const bufferSize = await audioRenderer2.getBufferSize(); const bufferSize = await this.audioRenderer2.getBufferSize();
console.info(`audio bufferSize: ${bufferSize}`); console.info(`audio bufferSize: ${bufferSize}`);
//2.6 获取原始音频数据文件 //2.6 获取原始音频数据文件
let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径 let dir = globalThis.fileDir; //不可直接访问,没权限,切记!!!一定要使用沙箱路径
const path2 = dir + '/music002_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music002_48000_32_1.wav const path2 = dir + '/music002_48000_32_1.wav'; // 需要渲染的音乐文件 实际路径为:/data/storage/el2/base/haps/entry/files/music002_48000_32_1.wav
...@@ -490,14 +490,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -490,14 +490,14 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
let stat = await fs.stat(path2); //音乐文件信息 let stat = await fs.stat(path2); //音乐文件信息
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
//2.7 通过audioRender对缓存区的原始音频数据进行渲染 //2.7 通过audioRender对缓存区的原始音频数据进行渲染
for (let i = 0;i < len; i++) { for (let i = 0;i < len; i++) {
let options = { let options = {
offset: i * this.bufferSize, offset: i * this.bufferSize,
length: this.bufferSize length: this.bufferSize
} }
let readsize = await fs.read(file.fd, buf, options) let readsize = await fs.read(file2.fd, buf, options)
let writeSize = await new Promise((resolve,reject)=>{ let writeSize = await new Promise((resolve,reject)=>{
this.audioRenderer2.write(buf,(err,writeSize)=>{ this.audioRenderer2.write(buf,(err,writeSize)=>{
if(err){ if(err){
...@@ -506,28 +506,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可 ...@@ -506,28 +506,17 @@ AudioRenderer提供了渲染音频文件和控制播放的接口,开发者可
resolve(writeSize) resolve(writeSize)
} }
}) })
}) })
} }
fs.close(file2) fs.close(file2)
await audioRenderer2.stop(); //停止渲染 await this.audioRenderer2.stop(); //停止渲染
await audioRenderer2.release(); //释放资源 await this.audioRenderer2.release(); //释放资源
} }
async writeBuffer(buf, audioRender) {
let writtenbytes;
await audioRender.write(buf).then((value) => {
writtenbytes = value;
console.info(`Actual written bytes: ${writtenbytes} `);
});
if (typeof(writtenbytes) != 'number' || writtenbytes < 0) {
console.error('get Write buffer failed. check the state of renderer');
}
}
//综合调用入口 //综合调用入口
async test(){ async test(){
await runningAudioRender1(); await this.runningAudioRender1();
await runningAudioRender2(); await this.runningAudioRender2();
} }
``` ```
\ No newline at end of file
...@@ -4533,15 +4533,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; ...@@ -4533,15 +4533,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav';
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let stat = await fs.stat(path); let stat = await fs.stat(path);
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); let len = stat.size % bufferSize == 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
for (let i = 0;i < len; i++) { for (let i = 0;i < len; i++) {
let options = { let options = {
offset: i * this.bufferSize, offset: i * bufferSize,
length: this.bufferSize length: bufferSize
} }
let readsize = await fs.read(file.fd, buf, options) let readsize = await fs.read(file.fd, buf, options)
let writeSize = await new Promise((resolve,reject)=>{ let writeSize = await new Promise((resolve,reject)=>{
this.audioRenderer.write(buf,(err,writeSize)=>{ audioRenderer.write(buf,(err,writeSize)=>{
if(err){ if(err){
reject(err) reject(err)
}else{ }else{
...@@ -4586,15 +4586,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav'; ...@@ -4586,15 +4586,15 @@ let filePath = path + '/StarWars10s-2C-48000-4SW.wav';
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let stat = await fs.stat(path); let stat = await fs.stat(path);
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1); let len = stat.size % bufferSize == 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
for (let i = 0;i < len; i++) { for (let i = 0;i < len; i++) {
let options = { let options = {
offset: i * this.bufferSize, offset: i * bufferSize,
length: this.bufferSize length: bufferSize
} }
let readsize = await fs.read(file.fd, buf, options) let readsize = await fs.read(file.fd, buf, options)
try{ try{
let writeSize = await this.audioRenderer.write(buf); let writeSize = await audioRenderer.write(buf);
} catch(err) { } catch(err) {
console.error(`audioRenderer.write err: ${err}`); console.error(`audioRenderer.write err: ${err}`);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册