提交 89a1137d 编写于 作者: Z zhangkai269

arkts格式整改

Signed-off-by: Nzhangkai269 <zhangkai269@huawei.com>
上级 c3da6d1e
...@@ -13,33 +13,34 @@ ...@@ -13,33 +13,34 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs'; import fs from '@ohos.file.fs';
import { BusinessError } from '@ohos.base';
const TAG = 'VoiceCallDemoForAudioRenderer'; const TAG = 'VoiceCallDemoForAudioRenderer';
// 与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRendererInfo参数和音频数据来源 // 与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRendererInfo参数和音频数据来源
export default class VoiceCallDemoForAudioRenderer { export default class VoiceCallDemoForAudioRenderer {
private renderModel = undefined; private renderModel: audio.AudioRenderer = undefined;
private audioStreamInfo = { private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率
channels: audio.AudioChannel.CHANNEL_2, // 通道 channels: audio.AudioChannel.CHANNEL_2, // 通道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
} }
private audioRendererInfo = { private audioRendererInfo: audio.AudioRendererInfo = {
// 需使用通话场景相应的参数 // 需使用通话场景相应的参数
content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音频内容类型:语音 content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音频内容类型:语音
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:语音通信 usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:语音通信
rendererFlags: 0 // 音频渲染器标志:默认为0即可 rendererFlags: 0 // 音频渲染器标志:默认为0即可
} }
private audioRendererOptions = { private audioRendererOptions: audio.AudioRendererOptions = {
streamInfo: this.audioStreamInfo, streamInfo: this.audioStreamInfo,
rendererInfo: this.audioRendererInfo rendererInfo: this.audioRendererInfo
} }
// 初始化,创建实例,设置监听事件 // 初始化,创建实例,设置监听事件
init() { init() {
audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // 创建AudioRenderer实例 audio.createAudioRenderer(this.audioRendererOptions, (err: BusinessError, renderer: audio.AudioRenderer) => { // 创建AudioRenderer实例
if (!err) { if (!err) {
console.info(`${TAG}: creating AudioRenderer success`); console.info(`${TAG}: creating AudioRenderer success`);
this.renderModel = renderer; this.renderModel = renderer;
this.renderModel.on('stateChange', (state) => { // 设置监听事件,当转换到指定的状态时触发回调 this.renderModel.on('stateChange', (state: audio.AudioState) => { // 设置监听事件,当转换到指定的状态时触发回调
if (state == 1) { if (state == 1) {
console.info('audio renderer state is: STATE_PREPARED'); console.info('audio renderer state is: STATE_PREPARED');
} }
...@@ -47,7 +48,7 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -47,7 +48,7 @@ export default class VoiceCallDemoForAudioRenderer {
console.info('audio renderer state is: STATE_RUNNING'); console.info('audio renderer state is: STATE_RUNNING');
} }
}); });
this.renderModel.on('markReach', 1000, (position) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调 this.renderModel.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
if (position == 1000) { if (position == 1000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
...@@ -59,13 +60,13 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -59,13 +60,13 @@ export default class VoiceCallDemoForAudioRenderer {
} }
// 开始一次音频渲染 // 开始一次音频渲染
async start() { async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.renderModel.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染 if (stateGroup.indexOf(this.renderModel.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染
console.error(TAG + 'start failed'); console.error(TAG + 'start failed');
return; return;
} }
await this.renderModel.start(); // 启动渲染 await this.renderModel.start(); // 启动渲染
const bufferSize = await this.renderModel.getBufferSize(); const bufferSize: number = await this.renderModel.getBufferSize();
// 此处仅以读取音频文件的数据举例,实际音频通话开发中,需要读取的是通话对端传输来的音频数据 // 此处仅以读取音频文件的数据举例,实际音频通话开发中,需要读取的是通话对端传输来的音频数据
let context = getContext(this); let context = getContext(this);
let path = context.filesDir; let path = context.filesDir;
...@@ -75,15 +76,19 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -75,15 +76,19 @@ export default class VoiceCallDemoForAudioRenderer {
let stat = await fs.stat(filePath); let stat = await fs.stat(filePath);
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
class Option {
offset: number = 0
length: number = 0
}
for (let i = 0; i < len; i++) { for (let i = 0; i < len; i++) {
let options = { let options: Option = {
offset: i * bufferSize, offset: i * bufferSize,
length: bufferSize length: bufferSize
}; };
let readsize = await fs.read(file.fd, buf, options); let readsize = await fs.read(file.fd, buf, options);
// buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染 // buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
let writeSize = await new Promise((resolve, reject) => { let writeSize: number = await new Promise((resolve, reject) => {
this.renderModel.write(buf, (err, writeSize) => { this.renderModel.write(buf, (err: BusinessError, writeSize: number) => {
if (err) { if (err) {
reject(err); reject(err);
} else { } else {
...@@ -91,11 +96,11 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -91,11 +96,11 @@ export default class VoiceCallDemoForAudioRenderer {
} }
}); });
}); });
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为STATE_RELEASED,停止渲染 if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为STATE_RELEASED,停止渲染
fs.close(file); fs.close(file);
await this.renderModel.stop(); await this.renderModel.stop();
} }
if (this.renderModel.state === audio.AudioState.STATE_RUNNING) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RUNNING) {
if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染 if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
fs.close(file); fs.close(file);
await this.renderModel.stop(); await this.renderModel.stop();
...@@ -106,12 +111,12 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -106,12 +111,12 @@ export default class VoiceCallDemoForAudioRenderer {
// 暂停渲染 // 暂停渲染
async pause() { async pause() {
// 只有渲染器状态为STATE_RUNNING的时候才能暂停 // 只有渲染器状态为STATE_RUNNING的时候才能暂停
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) { if (this.renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running'); console.info('Renderer is not running');
return; return;
} }
await this.renderModel.pause(); // 暂停渲染 await this.renderModel.pause(); // 暂停渲染
if (this.renderModel.state === audio.AudioState.STATE_PAUSED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_PAUSED) {
console.info('Renderer is paused.'); console.info('Renderer is paused.');
} else { } else {
console.error('Pausing renderer failed.'); console.error('Pausing renderer failed.');
...@@ -120,12 +125,12 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -120,12 +125,12 @@ export default class VoiceCallDemoForAudioRenderer {
// 停止渲染 // 停止渲染
async stop() { async stop() {
// 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) { if (this.renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING && this.renderModel.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused.'); console.info('Renderer is not running or paused.');
return; return;
} }
await this.renderModel.stop(); // 停止渲染 await this.renderModel.stop(); // 停止渲染
if (this.renderModel.state === audio.AudioState.STATE_STOPPED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped.'); console.info('Renderer stopped.');
} else { } else {
console.error('Stopping renderer failed.'); console.error('Stopping renderer failed.');
...@@ -134,12 +139,12 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -134,12 +139,12 @@ export default class VoiceCallDemoForAudioRenderer {
// 销毁实例,释放资源 // 销毁实例,释放资源
async release() { async release() {
// 渲染器状态不是STATE_RELEASED状态,才能release // 渲染器状态不是STATE_RELEASED状态,才能release
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer already released'); console.info('Renderer already released');
return; return;
} }
await this.renderModel.release(); // 释放资源 await this.renderModel.release(); // 释放资源
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer released'); console.info('Renderer released');
} else { } else {
console.error('Renderer release failed.'); console.error('Renderer release failed.');
...@@ -155,40 +160,41 @@ export default class VoiceCallDemoForAudioRenderer { ...@@ -155,40 +160,41 @@ export default class VoiceCallDemoForAudioRenderer {
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs'; import fs from '@ohos.file.fs';
import { BusinessError } from '@ohos.base';
const TAG = 'VoiceCallDemoForAudioCapturer'; const TAG = 'VoiceCallDemoForAudioCapturer';
// 与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向 // 与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向
export default class VoiceCallDemoForAudioCapturer { export default class VoiceCallDemoForAudioCapturer {
private audioCapturer = undefined; private audioCapturer: audio.AudioCapturer = undefined;
private audioStreamInfo = { private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采样率 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采样率
channels: audio.AudioChannel.CHANNEL_1, // 通道 channels: audio.AudioChannel.CHANNEL_1, // 通道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
} }
private audioCapturerInfo = { private audioCapturerInfo: audio.AudioCapturerInfo = {
// 需使用通话场景相应的参数 // 需使用通话场景相应的参数
source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话 source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话
capturerFlags: 0 // 音频采集器标志:默认为0即可 capturerFlags: 0 // 音频采集器标志:默认为0即可
} }
private audioCapturerOptions = { private audioCapturerOptions: audio.AudioCapturerOptions = {
streamInfo: this.audioStreamInfo, streamInfo: this.audioStreamInfo,
capturerInfo: this.audioCapturerInfo capturerInfo: this.audioCapturerInfo
} }
// 初始化,创建实例,设置监听事件 // 初始化,创建实例,设置监听事件
init() { init() {
audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // 创建AudioCapturer实例 audio.createAudioCapturer(this.audioCapturerOptions, (err: BusinessError, capturer: audio.AudioCapturer) => { // 创建AudioCapturer实例
if (err) { if (err) {
console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
return; return;
} }
console.info(`${TAG}: create AudioCapturer success`); console.info(`${TAG}: create AudioCapturer success`);
this.audioCapturer = capturer; this.audioCapturer = capturer;
this.audioCapturer.on('markReach', 1000, (position) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调 this.audioCapturer.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
if (position === 1000) { if (position === 1000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
}); });
this.audioCapturer.on('periodReach', 2000, (position) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调 this.audioCapturer.on('periodReach', 2000, (position: number) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
if (position === 2000) { if (position === 2000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
...@@ -197,8 +203,8 @@ export default class VoiceCallDemoForAudioCapturer { ...@@ -197,8 +203,8 @@ export default class VoiceCallDemoForAudioCapturer {
} }
// 开始一次音频采集 // 开始一次音频采集
async start() { async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集 if (stateGroup.indexOf(this.audioCapturer.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
console.error(`${TAG}: start failed`); console.error(`${TAG}: start failed`);
return; return;
} }
...@@ -210,10 +216,14 @@ export default class VoiceCallDemoForAudioCapturer { ...@@ -210,10 +216,14 @@ export default class VoiceCallDemoForAudioCapturer {
let fd = file.fd; let fd = file.fd;
let numBuffersToCapture = 150; // 循环写入150次 let numBuffersToCapture = 150; // 循环写入150次
let count = 0; let count = 0;
class Options {
offset: number = 0
length: number = 0
}
while (numBuffersToCapture) { while (numBuffersToCapture) {
let bufferSize = await this.audioCapturer.getBufferSize(); let bufferSize: number = await this.audioCapturer.getBufferSize();
let buffer = await this.audioCapturer.read(bufferSize, true); let buffer: ArrayBuffer = await this.audioCapturer.read(bufferSize, true);
let options = { let options: Options = {
offset: count * bufferSize, offset: count * bufferSize,
length: bufferSize length: bufferSize
}; };
...@@ -230,12 +240,12 @@ export default class VoiceCallDemoForAudioCapturer { ...@@ -230,12 +240,12 @@ export default class VoiceCallDemoForAudioCapturer {
// 停止采集 // 停止采集
async stop() { async stop() {
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) { if (this.audioCapturer.state.valueOf() !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Capturer is not running or paused'); console.info('Capturer is not running or paused');
return; return;
} }
await this.audioCapturer.stop(); // 停止采集 await this.audioCapturer.stop(); // 停止采集
if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Capturer stopped'); console.info('Capturer stopped');
} else { } else {
console.error('Capturer stop failed'); console.error('Capturer stop failed');
...@@ -244,12 +254,12 @@ export default class VoiceCallDemoForAudioCapturer { ...@@ -244,12 +254,12 @@ export default class VoiceCallDemoForAudioCapturer {
// 销毁实例,释放资源 // 销毁实例,释放资源
async release() { async release() {
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED || this.audioCapturer.state.valueOf() === audio.AudioState.STATE_NEW) {
console.info('Capturer already released'); console.info('Capturer already released');
return; return;
} }
await this.audioCapturer.release(); // 释放资源 await this.audioCapturer.release(); // 释放资源
if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Capturer released'); console.info('Capturer released');
} else { } else {
console.error('Capturer release failed'); console.error('Capturer release failed');
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
主要包括查询和设置当前音频播放流的音效模式,音效模式包括EFFECT_NONE关闭音效模式和EFFECT_DEFAULT默认音效模式。默认音效模式会根据创建音频流的ContentType和StreamUsage自动加载对应场景的音效。 主要包括查询和设置当前音频播放流的音效模式,音效模式包括EFFECT_NONE关闭音效模式和EFFECT_DEFAULT默认音效模式。默认音效模式会根据创建音频流的ContentType和StreamUsage自动加载对应场景的音效。
### 获取播放实例 ### 获取播放实例(示例代码仅支持JS格式)
管理播放实例音效的接口是getAudioEffectMode()查询当前音频播放流的音效模式和setAudioEffectMode(mode: AudioEffectMode)设置当前音频播放流的音效模式,在使用之前,需要使用createAudioRenderer(options: AudioRendererOptions)先创建音频播放流AudioRenderer实例。 管理播放实例音效的接口是getAudioEffectMode()查询当前音频播放流的音效模式和setAudioEffectMode(mode: AudioEffectMode)设置当前音频播放流的音效模式,在使用之前,需要使用createAudioRenderer(options: AudioRendererOptions)先创建音频播放流AudioRenderer实例。
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
}); });
``` ```
### 查询当前播放实例的音效模式 ### 查询当前播放实例的音效模式(示例代码仅支持JS格式)
```js ```js
audioRenderer.getAudioEffectMode((err, effectmode) => { audioRenderer.getAudioEffectMode((err, effectmode) => {
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
}); });
``` ```
### 设置当前播放实例的音效模式 ### 设置当前播放实例的音效模式(示例代码仅支持JS格式)
关闭系统音效: 关闭系统音效:
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
主要包括全局音效查询相应StreamUsage对应场景的音效模式。 主要包括全局音效查询相应StreamUsage对应场景的音效模式。
对于播放音频类的应用,开发者需要关注该应用的音频流使用什么音效模式并做出相应的操作,比如音乐App播放时,应选择音乐场景下的模式。在使用查询接口前,开发者需要使用getStreamManager()创建一个AudioStreamManager音频流管理实例。 对于播放音频类的应用,开发者需要关注该应用的音频流使用什么音效模式并做出相应的操作,比如音乐App播放时,应选择音乐场景下的模式。在使用查询接口前,开发者需要使用getStreamManager()创建一个AudioStreamManager音频流管理实例。
### 获取音频流管理接口 ### 获取音频流管理接口(示例代码仅支持JS格式)
1.创建AudioStreamManager实例。在使用AudioStreamManager的API前,需要使用getStreamManager()创建一个AudioStreamManager实例。 1.创建AudioStreamManager实例。在使用AudioStreamManager的API前,需要使用getStreamManager()创建一个AudioStreamManager实例。
...@@ -104,7 +104,7 @@ ...@@ -104,7 +104,7 @@
let audioStreamManager = audioManager.getStreamManager(); let audioStreamManager = audioManager.getStreamManager();
``` ```
### 查询对应场景的音效模式 ### 查询对应场景的音效模式(示例代码仅支持JS格式)
```js ```js
audioStreamManager.getAudioEffectInfoArray(audio.StreamUsage.STREAM_USAGE_MEDIA, async (err, audioEffectInfoArray) => { audioStreamManager.getAudioEffectInfoArray(audio.StreamUsage.STREAM_USAGE_MEDIA, async (err, audioEffectInfoArray) => {
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; // 导入audio模块 import audio from '@ohos.multimedia.audio'; // 导入audio模块
import { BusinessError } from '@ohos.base'; // 导入BusinessError
let audioManager = audio.getAudioManager(); // 需要先创建AudioManager实例 let audioManager = audio.getAudioManager(); // 需要先创建AudioManager实例
let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioManager的方法创建AudioRoutingManager实例 let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioManager的方法创建AudioRoutingManager实例
...@@ -30,7 +32,7 @@ let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioMa ...@@ -30,7 +32,7 @@ let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioMa
使用getDevices()方法可以获取当前所有输入设备的信息。 使用getDevices()方法可以获取当前所有输入设备的信息。
```ts ```ts
audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data) => { audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data: audio.AudioDeviceDescriptors) => {
console.info('Promise returned to indicate that the device list is obtained.'); console.info('Promise returned to indicate that the device list is obtained.');
}); });
``` ```
...@@ -41,7 +43,7 @@ audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data) ...@@ -41,7 +43,7 @@ audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data)
```ts ```ts
// 监听音频设备状态变化 // 监听音频设备状态变化
audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (deviceChanged) => { audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (deviceChanged: audio.DeviceChangeAction) => {
console.info('device change type : ' + deviceChanged.type); // 设备连接状态变化,0为连接,1为断开连接 console.info('device change type : ' + deviceChanged.type); // 设备连接状态变化,0为连接,1为断开连接
console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length); console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length);
console.info('device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole); // 设备角色 console.info('device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole); // 设备角色
...@@ -49,7 +51,7 @@ audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (dev ...@@ -49,7 +51,7 @@ audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (dev
}); });
// 取消监听音频设备状态变化 // 取消监听音频设备状态变化
audioRoutingManager.off('deviceChange', (deviceChanged) => { audioRoutingManager.off('deviceChange', (deviceChanged: audio.DeviceChangeAction) => {
console.info('Should be no callback.'); console.info('Should be no callback.');
}); });
``` ```
...@@ -63,7 +65,7 @@ audioRoutingManager.off('deviceChange', (deviceChanged) => { ...@@ -63,7 +65,7 @@ audioRoutingManager.off('deviceChange', (deviceChanged) => {
> 用户可以选择连接一组音频设备(如一对蓝牙耳机),但系统侧只感知为一个设备,该组设备共用一个设备id。 > 用户可以选择连接一组音频设备(如一对蓝牙耳机),但系统侧只感知为一个设备,该组设备共用一个设备id。
```ts ```ts
let inputAudioDeviceDescriptor = [{ let inputAudioDeviceDescriptor: audio.AudioDeviceDescriptors = [{
deviceRole : audio.DeviceRole.INPUT_DEVICE, deviceRole : audio.DeviceRole.INPUT_DEVICE,
deviceType : audio.DeviceType.EARPIECE, deviceType : audio.DeviceType.EARPIECE,
id : 1, id : 1,
...@@ -75,12 +77,13 @@ let inputAudioDeviceDescriptor = [{ ...@@ -75,12 +77,13 @@ let inputAudioDeviceDescriptor = [{
networkId : audio.LOCAL_NETWORK_ID, networkId : audio.LOCAL_NETWORK_ID,
interruptGroupId : 1, interruptGroupId : 1,
volumeGroupId : 1, volumeGroupId : 1,
displayName : ""
}]; }];
async function getRoutingManager(){ async function getRoutingManager() {
audioRoutingManager.selectInputDevice(inputAudioDeviceDescriptor).then(() => { audioRoutingManager.selectInputDevice(inputAudioDeviceDescriptor).then(() => {
console.info('Invoke selectInputDevice succeeded.'); console.info('Invoke selectInputDevice succeeded.');
}).catch((err) => { }).catch((err: BusinessError) => {
console.error(`Invoke selectInputDevice failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke selectInputDevice failed, code is ${err.code}, message is ${err.message}`);
}); });
} }
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; // 导入audio模块 import audio from '@ohos.multimedia.audio'; // 导入audio模块
import { BusinessError } from '@ohos.base'; // 导入BusinessError
let audioManager = audio.getAudioManager(); // 需要先创建AudioManager实例 let audioManager = audio.getAudioManager(); // 需要先创建AudioManager实例
let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioManager的方法创建AudioRoutingManager实例 let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioManager的方法创建AudioRoutingManager实例
...@@ -33,7 +35,7 @@ let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioMa ...@@ -33,7 +35,7 @@ let audioRoutingManager = audioManager.getRoutingManager(); // 再调用AudioMa
使用getDevices()方法可以获取当前所有输出设备的信息。 使用getDevices()方法可以获取当前所有输出设备的信息。
```ts ```ts
audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => { audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data: audio.AudioDeviceDescriptors) => {
console.info('Promise returned to indicate that the device list is obtained.'); console.info('Promise returned to indicate that the device list is obtained.');
}); });
``` ```
...@@ -44,7 +46,7 @@ audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) ...@@ -44,7 +46,7 @@ audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data)
```ts ```ts
// 监听音频设备状态变化 // 监听音频设备状态变化
audioRoutingManager.on('deviceChange', audio.DeviceFlag.OUTPUT_DEVICES_FLAG, (deviceChanged) => { audioRoutingManager.on('deviceChange', audio.DeviceFlag.OUTPUT_DEVICES_FLAG, (deviceChanged: audio.DeviceChangeAction) => {
console.info(`device change type : ${deviceChanged.type}`); // 设备连接状态变化,0为连接,1为断开连接 console.info(`device change type : ${deviceChanged.type}`); // 设备连接状态变化,0为连接,1为断开连接
console.info(`device descriptor size : ${deviceChanged.deviceDescriptors.length}`); console.info(`device descriptor size : ${deviceChanged.deviceDescriptors.length}`);
console.info(`device change descriptor : ${deviceChanged.deviceDescriptors[0].deviceRole}`); // 设备角色 console.info(`device change descriptor : ${deviceChanged.deviceDescriptors[0].deviceRole}`); // 设备角色
...@@ -64,7 +66,7 @@ audioRoutingManager.off('deviceChange'); ...@@ -64,7 +66,7 @@ audioRoutingManager.off('deviceChange');
> 用户可以选择连接一组音频设备(如一对蓝牙耳机),但系统侧只感知为一个设备,该组设备共用一个设备ID。 > 用户可以选择连接一组音频设备(如一对蓝牙耳机),但系统侧只感知为一个设备,该组设备共用一个设备ID。
```ts ```ts
let outputAudioDeviceDescriptor = [{ let outputAudioDeviceDescriptor: audio.AudioDeviceDescriptors = [{
deviceRole : audio.DeviceRole.OUTPUT_DEVICE, deviceRole : audio.DeviceRole.OUTPUT_DEVICE,
deviceType : audio.DeviceType.SPEAKER, deviceType : audio.DeviceType.SPEAKER,
id : 1, id : 1,
...@@ -76,12 +78,13 @@ let outputAudioDeviceDescriptor = [{ ...@@ -76,12 +78,13 @@ let outputAudioDeviceDescriptor = [{
networkId : audio.LOCAL_NETWORK_ID, networkId : audio.LOCAL_NETWORK_ID,
interruptGroupId : 1, interruptGroupId : 1,
volumeGroupId : 1, volumeGroupId : 1,
displayName : ""
}]; }];
async function selectOutputDevice(){ async function selectOutputDevice() {
audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor).then(() => { audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor).then(() => {
console.info('Invoke selectOutputDevice succeeded.'); console.info('Invoke selectOutputDevice succeeded.');
}).catch((err) => { }).catch((err: BusinessError) => {
console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`);
}); });
} }
...@@ -96,16 +99,17 @@ async function selectOutputDevice(){ ...@@ -96,16 +99,17 @@ async function selectOutputDevice(){
> 最高优先级输出设备表示声音将在此设备输出的设备。 > 最高优先级输出设备表示声音将在此设备输出的设备。
```ts ```ts
let rendererInfo = { import audio from '@ohos.multimedia.audio';
let rendererInfo: audio.AudioRendererInfo = {
content : audio.ContentType.CONTENT_TYPE_MUSIC, content : audio.ContentType.CONTENT_TYPE_MUSIC,
usage : audio.StreamUsage.STREAM_USAGE_MEDIA, usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
rendererFlags : 0, rendererFlags : 0,
} }
async function getPreferOutputDeviceForRendererInfo() { async function getPreferredOutputDeviceForRendererInfo() {
audioRoutingManager.getPreferOutputDeviceForRendererInfo(rendererInfo).then((desc) => { audioRoutingManager.getPreferredOutputDeviceForRendererInfo(rendererInfo).then((desc: audio.AudioDeviceDescriptors) => {
console.info(`device descriptor: ${desc}`); console.info(`device descriptor: ${desc}`);
}).catch((err) => { }).catch((err: BusinessError) => {
console.error(`Result ERROR: ${err}`); console.error(`Result ERROR: ${err}`);
}) })
} }
...@@ -114,18 +118,19 @@ async function getPreferOutputDeviceForRendererInfo() { ...@@ -114,18 +118,19 @@ async function getPreferOutputDeviceForRendererInfo() {
## 监听最高优先级输出设备变化 ## 监听最高优先级输出设备变化
```ts ```ts
let rendererInfo = { import audio from '@ohos.multimedia.audio';
let rendererInfo: audio.AudioRendererInfo = {
content : audio.ContentType.CONTENT_TYPE_MUSIC, content : audio.ContentType.CONTENT_TYPE_MUSIC,
usage : audio.StreamUsage.STREAM_USAGE_MEDIA, usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
rendererFlags : 0, rendererFlags : 0,
} }
// 监听最高优先级输出设备变化 // 监听最高优先级输出设备变化
audioRoutingManager.on('preferOutputDeviceChangeForRendererInfo', rendererInfo, (desc) => { audioRoutingManager.on('preferredOutputDeviceChangeForRendererInfo', rendererInfo, (desc: audio.AudioDeviceDescriptors) => {
console.info(`device change descriptor : ${desc.deviceDescriptors[0].deviceRole}`); // 设备角色 console.info(`device change descriptor : ${desc[0].deviceRole}`); // 设备角色
console.info(`device change descriptor : ${desc.deviceDescriptors[0].deviceType}`); // 设备类型 console.info(`device change descriptor : ${desc[0].deviceType}`); // 设备类型
}); });
// 取消监听最高优先级输出设备变化 // 取消监听最高优先级输出设备变化
audioRoutingManager.off('preferOutputDeviceChangeForRendererInfo'); audioRoutingManager.off('preferredOutputDeviceChangeForRendererInfo');
``` ```
...@@ -56,13 +56,16 @@ ...@@ -56,13 +56,16 @@
为了带给用户更好的体验,针对不同的音频打断事件内容,应用需要做出相应的处理操作。此处以使用AudioRenderer开发音频播放功能为例,展示推荐应用采取的处理方法,提供伪代码供开发者参考(若使用AVPlayer开发音频播放功能,处理方法类似),具体的代码实现,开发者可结合实际情况编写,处理方法也可自行调整。 为了带给用户更好的体验,针对不同的音频打断事件内容,应用需要做出相应的处理操作。此处以使用AudioRenderer开发音频播放功能为例,展示推荐应用采取的处理方法,提供伪代码供开发者参考(若使用AVPlayer开发音频播放功能,处理方法类似),具体的代码实现,开发者可结合实际情况编写,处理方法也可自行调整。
```ts ```ts
let isPlay; // 是否正在播放,实际开发中,对应与音频播放状态相关的模块 import audio from '@ohos.multimedia.audio'; // 导入audio模块
let isDucked; //是否降低音量,实际开发中,对应与音频音量相关的模块 import { BusinessError } from '@ohos.base'; // 导入BusinessError
let started; // 标识符,记录“开始播放(start)”操作是否成功
async function onAudioInterrupt(){ let isPlay: boolean; // 是否正在播放,实际开发中,对应与音频播放状态相关的模块
let isDucked: boolean; //是否降低音量,实际开发中,对应与音频音量相关的模块
let started: boolean; // 标识符,记录“开始播放(start)”操作是否成功
async function onAudioInterrupt(): Promise<void> {
// 此处以使用AudioRenderer开发音频播放功能举例,变量audioRenderer即为播放时创建的AudioRenderer实例。 // 此处以使用AudioRenderer开发音频播放功能举例,变量audioRenderer即为播放时创建的AudioRenderer实例。
audioRenderer.on('audioInterrupt', async(interruptEvent) => { audioRenderer.on('audioInterrupt', async(interruptEvent: audio.InterruptEvent) => {
// 在发生音频打断事件时,audioRenderer收到interruptEvent回调,此处根据其内容做相应处理 // 在发生音频打断事件时,audioRenderer收到interruptEvent回调,此处根据其内容做相应处理
// 先读取interruptEvent.forceType的类型,判断系统是否已强制执行相应操作 // 先读取interruptEvent.forceType的类型,判断系统是否已强制执行相应操作
// 再读取interruptEvent.hintType的类型,做出相应的处理 // 再读取interruptEvent.hintType的类型,做出相应的处理
...@@ -98,9 +101,9 @@ async function onAudioInterrupt(){ ...@@ -98,9 +101,9 @@ async function onAudioInterrupt(){
// 此分支表示临时失去焦点后被暂停的音频流此时可以继续播放,建议应用继续播放,切换至音频播放状态 // 此分支表示临时失去焦点后被暂停的音频流此时可以继续播放,建议应用继续播放,切换至音频播放状态
// 若应用此时不想继续播放,可以忽略此音频打断事件,不进行处理即可 // 若应用此时不想继续播放,可以忽略此音频打断事件,不进行处理即可
// 继续播放,此处主动执行start(),以标识符变量started记录start()的执行结果 // 继续播放,此处主动执行start(),以标识符变量started记录start()的执行结果
await audioRenderer.start().then(async function () { await audioRenderer.start().then(() => {
started = true; // start()执行成功 started = true; // start()执行成功
}).catch((err) => { }).catch((err: BusinessError) => {
started = false; // start()执行失败 started = false; // start()执行失败
}); });
// 若start()执行成功,则切换至音频播放状态 // 若start()执行成功,则切换至音频播放状态
......
...@@ -9,14 +9,14 @@ ...@@ -9,14 +9,14 @@
- 方法1:直接查看AudioRenderer的[state](../reference/apis/js-apis-audio.md#属性) - 方法1:直接查看AudioRenderer的[state](../reference/apis/js-apis-audio.md#属性)
```ts ```ts
let audioRendererState = audioRenderer.state; let audioRendererState: audio.AudioState = audioRenderer.state;
console.info(`Current state is: ${audioRendererState }`) console.info(`Current state is: ${audioRendererState }`)
``` ```
- 方法2:注册stateChange监听AudioRenderer的状态变化: - 方法2:注册stateChange监听AudioRenderer的状态变化:
```ts ```ts
audioRenderer.on('stateChange', (rendererState) => { audioRenderer.on('stateChange', (rendererState: audio.AudioState) => {
console.info(`State change to: ${rendererState}`) console.info(`State change to: ${rendererState}`)
}); });
``` ```
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import { BusinessError } from '@ohos.base';
let audioManager = audio.getAudioManager(); let audioManager = audio.getAudioManager();
let audioStreamManager = audioManager.getStreamManager(); let audioStreamManager = audioManager.getStreamManager();
``` ```
...@@ -53,7 +54,8 @@ ...@@ -53,7 +54,8 @@
2. 使用on('audioRendererChange')监听音频播放流的变化。 如果音频流监听应用需要在音频播放流状态变化、设备变化时获取通知,可以订阅该事件。 2. 使用on('audioRendererChange')监听音频播放流的变化。 如果音频流监听应用需要在音频播放流状态变化、设备变化时获取通知,可以订阅该事件。
```ts ```ts
audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray) => { import audio from '@ohos.multimedia.audio';
audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray: audio.AudioRendererChangeInfoArray) => {
for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i]; let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i];
console.info(`## RendererChange on is called for ${i} ##`); console.info(`## RendererChange on is called for ${i} ##`);
...@@ -89,8 +91,9 @@ ...@@ -89,8 +91,9 @@
> 对所有音频流状态进行监听的应用需要[申请权限](../security/accesstoken-guidelines.md)ohos.permission.USE_BLUETOOTH,否则无法获得实际的设备名称和设备地址信息,查询到的设备名称和设备地址(蓝牙设备的相关属性)将为空字符串。 > 对所有音频流状态进行监听的应用需要[申请权限](../security/accesstoken-guidelines.md)ohos.permission.USE_BLUETOOTH,否则无法获得实际的设备名称和设备地址信息,查询到的设备名称和设备地址(蓝牙设备的相关属性)将为空字符串。
```ts ```ts
async function getCurrentAudioRendererInfoArray(){ import audio from '@ohos.multimedia.audio';
await audioStreamManager.getCurrentAudioRendererInfoArray().then( function (AudioRendererChangeInfoArray) { async function getCurrentAudioRendererInfoArray(): Promise<void> {
await audioStreamManager.getCurrentAudioRendererInfoArray().then((AudioRendererChangeInfoArray: audio.AudioRendererChangeInfoArray) => {
console.info(`getCurrentAudioRendererInfoArray Get Promise is called `); console.info(`getCurrentAudioRendererInfoArray Get Promise is called `);
if (AudioRendererChangeInfoArray != null) { if (AudioRendererChangeInfoArray != null) {
for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) { for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
...@@ -111,7 +114,7 @@ ...@@ -111,7 +114,7 @@
} }
} }
} }
}).catch((err) => { }).catch((err: BusinessError ) => {
console.error(`Invoke getCurrentAudioRendererInfoArray failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke getCurrentAudioRendererInfoArray failed, code is ${err.code}, message is ${err.message}`);
}); });
} }
......
...@@ -9,14 +9,14 @@ ...@@ -9,14 +9,14 @@
- 方法1:直接查看AudioCapturer的[state](../reference/apis/js-apis-audio.md#属性) - 方法1:直接查看AudioCapturer的[state](../reference/apis/js-apis-audio.md#属性)
```ts ```ts
let audioCapturerState = audioCapturer.state; let audioCapturerState: audio.AudioState = audioCapturer.state;
console.info(`Current state is: ${audioCapturerState }`) console.info(`Current state is: ${audioCapturerState }`)
``` ```
- 方法2:注册stateChange监听AudioCapturer的状态变化: - 方法2:注册stateChange监听AudioCapturer的状态变化:
```ts ```ts
audioCapturer.on('stateChange', (capturerState) => { audioCapturer.on('stateChange', (capturerState: audio.AudioState) => {
console.info(`State change to: ${capturerState}`) console.info(`State change to: ${capturerState}`)
}); });
``` ```
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import { BusinessError } from '@ohos.base';
let audioManager = audio.getAudioManager(); let audioManager = audio.getAudioManager();
let audioStreamManager = audioManager.getStreamManager(); let audioStreamManager = audioManager.getStreamManager();
``` ```
...@@ -54,7 +55,7 @@ ...@@ -54,7 +55,7 @@
2. 使用on('audioCapturerChange')监听音频录制流更改事件。 如果音频流监听应用需要在音频录制流状态变化、设备变化时获取通知,可以订阅该事件。 2. 使用on('audioCapturerChange')监听音频录制流更改事件。 如果音频流监听应用需要在音频录制流状态变化、设备变化时获取通知,可以订阅该事件。
```ts ```ts
audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray) => { audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray: audio.AudioCapturerChangeInfoArray) => {
for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
console.info(`## CapChange on is called for element ${i} ##`); console.info(`## CapChange on is called for element ${i} ##`);
console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`); console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`);
...@@ -89,7 +90,7 @@ ...@@ -89,7 +90,7 @@
```ts ```ts
async function getCurrentAudioCapturerInfoArray(){ async function getCurrentAudioCapturerInfoArray(){
await audioStreamManager.getCurrentAudioCapturerInfoArray().then( function (AudioCapturerChangeInfoArray) { await audioStreamManager.getCurrentAudioCapturerInfoArray().then((AudioCapturerChangeInfoArray: audio.AudioCapturerChangeInfoArray) => {
console.info('getCurrentAudioCapturerInfoArray Get Promise Called '); console.info('getCurrentAudioCapturerInfoArray Get Promise Called ');
if (AudioCapturerChangeInfoArray != null) { if (AudioCapturerChangeInfoArray != null) {
for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) { for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
...@@ -108,7 +109,7 @@ ...@@ -108,7 +109,7 @@
} }
} }
} }
}).catch((err) => { }).catch((err: BusinessError) => {
console.error(`Invoke getCurrentAudioCapturerInfoArray failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke getCurrentAudioCapturerInfoArray failed, code is ${err.code}, message is ${err.message}`);
}); });
} }
......
...@@ -31,28 +31,34 @@ ...@@ -31,28 +31,34 @@
3. 调用selectOutputDevice,将当前设备播放的所有音频投放到指定的远端设备播放。 3. 调用selectOutputDevice,将当前设备播放的所有音频投放到指定的远端设备播放。
```ts ```ts
let outputAudioDeviceDescriptor = [{ import audio from '@ohos.multimedia.audio';
deviceRole: audio.DeviceRole.OUTPUT_DEVICE, import { BusinessError } from '@ohos.base';
deviceType: audio.DeviceType.SPEAKER,
id: 1, let audioManager = audio.getAudioManager();
name: "", let audioRoutingManager = audioManager.getRoutingManager();
address: "", let outputAudioDeviceDescriptor: audio.AudioDeviceDescriptors = [{
sampleRates: [44100], deviceRole: audio.DeviceRole.OUTPUT_DEVICE,
channelCounts: [2], deviceType: audio.DeviceType.SPEAKER,
channelMasks: [0], id: 1,
networkId: audio.LOCAL_NETWORK_ID, name: "",
interruptGroupId: 1, address: "",
volumeGroupId: 1, sampleRates: [44100],
channelCounts: [2],
channelMasks: [0],
networkId: audio.LOCAL_NETWORK_ID,
interruptGroupId: 1,
volumeGroupId: 1,
displayName: ""
}]; }];
async function selectOutputDevice() { async function selectOutputDevice(): Promise<void> {
audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor, (err) => { audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor, (err: BusinessError) => {
if (err) { if (err) {
console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
console.info('Invoke selectOutputDevice succeeded.'); console.info('Invoke selectOutputDevice succeeded.');
} }
}); });
} }
``` ```
...@@ -67,35 +73,40 @@ async function selectOutputDevice() { ...@@ -67,35 +73,40 @@ async function selectOutputDevice() {
4. 调用selectOutputDeviceByFilter,将当前设备播放的指定音频流投放到指定的远端设备播放。 4. 调用selectOutputDeviceByFilter,将当前设备播放的指定音频流投放到指定的远端设备播放。
```ts ```ts
let outputAudioRendererFilter = { import audio from '@ohos.multimedia.audio';
uid: 20010041, import { BusinessError } from '@ohos.base';
rendererInfo: {
content: audio.ContentType.CONTENT_TYPE_MUSIC, let audioManager = audio.getAudioManager();
usage: audio.StreamUsage.STREAM_USAGE_MEDIA, let audioRoutingManager = audioManager.getRoutingManager();
rendererFlags: 0 }, let outputAudioRendererFilter: audio.AudioRendererFilter = {
rendererId: 0 }; uid: 20010041,
rendererInfo: {
let outputAudioDeviceDescriptor = [{ content: audio.ContentType.CONTENT_TYPE_MUSIC,
deviceRole: audio.DeviceRole.OUTPUT_DEVICE, usage: audio.StreamUsage.STREAM_USAGE_MEDIA,
deviceType: audio.DeviceType.SPEAKER, rendererFlags: 0 } as audio.AudioRendererInfo,
id: 1, rendererId: 0 };
name: "",
address: "", let outputAudioDeviceDescriptor: audio.AudioDeviceDescriptors = [{
sampleRates: [44100], deviceRole: audio.DeviceRole.OUTPUT_DEVICE,
channelCounts: [2], deviceType: audio.DeviceType.SPEAKER,
channelMasks: [0], id: 1,
networkId: audio.LOCAL_NETWORK_ID, name: "",
interruptGroupId: 1, address: "",
volumeGroupId: 1, sampleRates: [44100],
channelCounts: [2],
channelMasks: [0],
networkId: audio.LOCAL_NETWORK_ID,
interruptGroupId: 1,
volumeGroupId: 1,
displayName: ""
}]; }];
async function selectOutputDeviceByFilter(): Promise<void> {
async function selectOutputDeviceByFilter() { audioRoutingManager.selectOutputDeviceByFilter(outputAudioRendererFilter, outputAudioDeviceDescriptor, (err: BusinessError) => {
audioRoutingManager.selectOutputDeviceByFilter(outputAudioRendererFilter, outputAudioDeviceDescriptor, (err) => { if (err) {
if (err) { console.error(`Invoke selectOutputDeviceByFilter failed, code is ${err.code}, message is ${err.message}`);
console.error(`Invoke selectOutputDeviceByFilter failed, code is ${err.code}, message is ${err.message}`); } else {
} else { console.info('Invoke selectOutputDeviceByFilter succeeded.');
console.info('Invoke selectOutputDeviceByFilter succeeded.'); }
} });
});
} }
``` ```
...@@ -19,20 +19,22 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音 ...@@ -19,20 +19,22 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
import { BusinessError } from '@ohos.base';
let audioStreamInfo = { let audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_2, channels: audio.AudioChannel.CHANNEL_2,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}; };
let audioCapturerInfo = { let audioCapturerInfo: audio.AudioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_MIC, source: audio.SourceType.SOURCE_TYPE_MIC,
capturerFlags: 0 capturerFlags: 0
}; };
let audioCapturerOptions = { let audioCapturerOptions: audio.AudioCapturerOptions = {
streamInfo: audioStreamInfo, streamInfo: audioStreamInfo,
capturerInfo: audioCapturerInfo capturerInfo: audioCapturerInfo
}; };
...@@ -50,7 +52,7 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音 ...@@ -50,7 +52,7 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音
2. 调用start()方法进入running状态,开始录制音频。 2. 调用start()方法进入running状态,开始录制音频。
```ts ```ts
audioCapturer.start((err) => { audioCapturer.start((err: BusinessError) => {
if (err) { if (err) {
console.error(`Capturer start failed, code is ${err.code}, message is ${err.message}`); console.error(`Capturer start failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -62,16 +64,16 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音 ...@@ -62,16 +64,16 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音
3. 指定录制文件地址,调用read()方法读取缓冲区的数据。 3. 指定录制文件地址,调用read()方法读取缓冲区的数据。
```ts ```ts
let file = fs.openSync(path, 0o2 | 0o100); let file: fs.File = fs.openSync(path, 0o2 | 0o100);
let bufferSize = await audioCapturer.getBufferSize(); let bufferSize: number = await audioCapturer.getBufferSize();
let buffer = await audioCapturer.read(bufferSize, true); let buffer: ArrayBuffer = await audioCapturer.read(bufferSize, true);
fs.writeSync(file.fd, buffer); fs.writeSync(file.fd, buffer);
``` ```
4. 调用stop()方法停止录制。 4. 调用stop()方法停止录制。
```ts ```ts
audioCapturer.stop((err) => { audioCapturer.stop((err: BusinessError) => {
if (err) { if (err) {
console.error(`Capturer stop failed, code is ${err.code}, message is ${err.message}`); console.error(`Capturer stop failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -83,7 +85,7 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音 ...@@ -83,7 +85,7 @@ AudioCapturer是音频采集器,用于录制PCM(Pulse Code Modulation)音
5. 调用release()方法销毁实例,释放资源。 5. 调用release()方法销毁实例,释放资源。
```ts ```ts
audioCapturer.release((err) => { audioCapturer.release((err: BusinessError) => {
if (err) { if (err) {
console.error(`capturer release failed, code is ${err.code}, message is ${err.message}`); console.error(`capturer release failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -104,18 +106,18 @@ import fs from '@ohos.file.fs'; ...@@ -104,18 +106,18 @@ import fs from '@ohos.file.fs';
const TAG = 'AudioCapturerDemo'; const TAG = 'AudioCapturerDemo';
export default class AudioCapturerDemo { export default class AudioCapturerDemo {
private audioCapturer = undefined; private audioCapturer: audio.AudioCapturer = undefined;
private audioStreamInfo = { private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1, channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
} }
private audioCapturerInfo = { private audioCapturerInfo: audio.AudioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_MIC, // 音源类型 source: audio.SourceType.SOURCE_TYPE_MIC, // 音源类型
capturerFlags: 0 // 音频采集器标志 capturerFlags: 0 // 音频采集器标志
} }
private audioCapturerOptions = { private audioCapturerOptions: audio.AudioCapturerOptions = {
streamInfo: this.audioStreamInfo, streamInfo: this.audioStreamInfo,
capturerInfo: this.audioCapturerInfo capturerInfo: this.audioCapturerInfo
} }
...@@ -130,12 +132,12 @@ export default class AudioCapturerDemo { ...@@ -130,12 +132,12 @@ export default class AudioCapturerDemo {
console.info(`${TAG}: create AudioCapturer success`); console.info(`${TAG}: create AudioCapturer success`);
this.audioCapturer = capturer; this.audioCapturer = capturer;
this.audioCapturer.on('markReach', 1000, (position) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调 this.audioCapturer.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
if (position === 1000) { if (position === 1000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
}); });
this.audioCapturer.on('periodReach', 2000, (position) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调 this.audioCapturer.on('periodReach', 2000, (position: number) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
if (position === 2000) { if (position === 2000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
...@@ -147,7 +149,7 @@ export default class AudioCapturerDemo { ...@@ -147,7 +149,7 @@ export default class AudioCapturerDemo {
// 开始一次音频采集 // 开始一次音频采集
async start() { async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集 if (stateGroup.indexOf(this.audioCapturer.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
console.error(`${TAG}: start failed`); console.error(`${TAG}: start failed`);
return; return;
} }
...@@ -160,6 +162,10 @@ export default class AudioCapturerDemo { ...@@ -160,6 +162,10 @@ export default class AudioCapturerDemo {
let fd = file.fd; let fd = file.fd;
let numBuffersToCapture = 150; // 循环写入150次 let numBuffersToCapture = 150; // 循环写入150次
let count = 0; let count = 0;
class Options {
offset: number
length: number
}
while (numBuffersToCapture) { while (numBuffersToCapture) {
let bufferSize = await this.audioCapturer.getBufferSize(); let bufferSize = await this.audioCapturer.getBufferSize();
let buffer = await this.audioCapturer.read(bufferSize, true); let buffer = await this.audioCapturer.read(bufferSize, true);
...@@ -181,12 +187,12 @@ export default class AudioCapturerDemo { ...@@ -181,12 +187,12 @@ export default class AudioCapturerDemo {
// 停止采集 // 停止采集
async stop() { async stop() {
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止 // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) { if (this.audioCapturer.state.valueOf() !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Capturer is not running or paused'); console.info('Capturer is not running or paused');
return; return;
} }
await this.audioCapturer.stop(); // 停止采集 await this.audioCapturer.stop(); // 停止采集
if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Capturer stopped'); console.info('Capturer stopped');
} else { } else {
console.error('Capturer stop failed'); console.error('Capturer stop failed');
...@@ -196,12 +202,12 @@ export default class AudioCapturerDemo { ...@@ -196,12 +202,12 @@ export default class AudioCapturerDemo {
// 销毁实例,释放资源 // 销毁实例,释放资源
async release() { async release() {
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED || this.audioCapturer.state.valueOf() === audio.AudioState.STATE_NEW) {
console.info('Capturer already released'); console.info('Capturer already released');
return; return;
} }
await this.audioCapturer.release(); // 释放资源 await this.audioCapturer.release(); // 释放资源
if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) { if (this.audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Capturer released'); console.info('Capturer released');
} else { } else {
console.error('Capturer release failed'); console.error('Capturer release failed');
......
...@@ -32,21 +32,22 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音 ...@@ -32,21 +32,22 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import { BusinessError } from '@ohos.base';
let audioStreamInfo = { let audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1, channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}; };
let audioRendererInfo = { let audioRendererInfo: audio.AudioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH, content: audio.ContentType.CONTENT_TYPE_SPEECH,
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
rendererFlags: 0 rendererFlags: 0
}; };
let audioRendererOptions = { let audioRendererOptions: audio.AudioRendererOptions = {
streamInfo: audioStreamInfo, streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo rendererInfo: audioRendererInfo
}; };
...@@ -65,7 +66,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音 ...@@ -65,7 +66,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音
2. 调用start()方法进入running状态,开始渲染音频。 2. 调用start()方法进入running状态,开始渲染音频。
```ts ```ts
audioRenderer.start((err) => { audioRenderer.start((err: BusinessError) => {
if (err) { if (err) {
console.error(`Renderer start failed, code is ${err.code}, message is ${err.message}`); console.error(`Renderer start failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -77,12 +78,12 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音 ...@@ -77,12 +78,12 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音
3. 指定待渲染文件地址,打开文件调用write()方法向缓冲区持续写入音频数据进行渲染播放。如果需要对音频数据进行处理以实现个性化的播放,在写入之前操作即可。 3. 指定待渲染文件地址,打开文件调用write()方法向缓冲区持续写入音频数据进行渲染播放。如果需要对音频数据进行处理以实现个性化的播放,在写入之前操作即可。
```ts ```ts
const bufferSize = await audioRenderer.getBufferSize(); const bufferSize: number = await audioRenderer.getBufferSize();
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); let file: fs.File = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let readsize = await fs.read(file.fd, buf); let readsize: number = await fs.read(file.fd, buf);
let writeSize = await new Promise((resolve, reject) => { let writeSize: number = await new Promise((resolve, reject) => {
audioRenderer.write(buf, (err, writeSize) => { audioRenderer.write(buf, (err: BusinessError, writeSize: number) => {
if (err) { if (err) {
reject(err); reject(err);
} else { } else {
...@@ -95,7 +96,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音 ...@@ -95,7 +96,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音
4. 调用stop()方法停止渲染。 4. 调用stop()方法停止渲染。
```ts ```ts
audioRenderer.stop((err) => { audioRenderer.stop((err: BusinessError) => {
if (err) { if (err) {
console.error(`Renderer stop failed, code is ${err.code}, message is ${err.message}`); console.error(`Renderer stop failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -107,7 +108,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音 ...@@ -107,7 +108,7 @@ AudioRenderer是音频渲染器,用于播放PCM(Pulse Code Modulation)音
5. 调用release()方法销毁实例,释放资源。 5. 调用release()方法销毁实例,释放资源。
```ts ```ts
audioRenderer.release((err) => { audioRenderer.release((err: BusinessError) => {
if (err) { if (err) {
console.error(`Renderer release failed, code is ${err.code}, message is ${err.message}`); console.error(`Renderer release failed, code is ${err.code}, message is ${err.message}`);
} else { } else {
...@@ -127,19 +128,19 @@ import fs from '@ohos.file.fs'; ...@@ -127,19 +128,19 @@ import fs from '@ohos.file.fs';
const TAG = 'AudioRendererDemo'; const TAG = 'AudioRendererDemo';
export default class AudioRendererDemo { export default class AudioRendererDemo {
private renderModel = undefined; private renderModel: audio.AudioRenderer = undefined;
private audioStreamInfo = { private audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率
channels: audio.AudioChannel.CHANNEL_2, // 通道 channels: audio.AudioChannel.CHANNEL_2, // 通道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
} }
private audioRendererInfo = { private audioRendererInfo: audio.AudioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_MUSIC, // 媒体类型 content: audio.ContentType.CONTENT_TYPE_MUSIC, // 媒体类型
usage: audio.StreamUsage.STREAM_USAGE_MEDIA, // 音频流使用类型 usage: audio.StreamUsage.STREAM_USAGE_MEDIA, // 音频流使用类型
rendererFlags: 0 // 音频渲染器标志 rendererFlags: 0 // 音频渲染器标志
} }
private audioRendererOptions = { private audioRendererOptions: audio.AudioRendererOptions = {
streamInfo: this.audioStreamInfo, streamInfo: this.audioStreamInfo,
rendererInfo: this.audioRendererInfo rendererInfo: this.audioRendererInfo
} }
...@@ -150,12 +151,12 @@ export default class AudioRendererDemo { ...@@ -150,12 +151,12 @@ export default class AudioRendererDemo {
if (!err) { if (!err) {
console.info(`${TAG}: creating AudioRenderer success`); console.info(`${TAG}: creating AudioRenderer success`);
this.renderModel = renderer; this.renderModel = renderer;
this.renderModel.on('stateChange', (state) => { // 设置监听事件,当转换到指定的状态时触发回调 this.renderModel.on('stateChange', (state: audio.AudioState) => { // 设置监听事件,当转换到指定的状态时触发回调
if (state == 2) { if (state == 2) {
console.info('audio renderer state is: STATE_RUNNING'); console.info('audio renderer state is: STATE_RUNNING');
} }
}); });
this.renderModel.on('markReach', 1000, (position) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调 this.renderModel.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
if (position == 1000) { if (position == 1000) {
console.info('ON Triggered successfully'); console.info('ON Triggered successfully');
} }
...@@ -169,7 +170,7 @@ export default class AudioRendererDemo { ...@@ -169,7 +170,7 @@ export default class AudioRendererDemo {
// 开始一次音频渲染 // 开始一次音频渲染
async start() { async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.renderModel.state) === -1) { // 当且仅当状态为prepared、paused和stopped之一时才能启动渲染 if (stateGroup.indexOf(this.renderModel.state.valueOf()) === -1) { // 当且仅当状态为prepared、paused和stopped之一时才能启动渲染
console.error(TAG + 'start failed'); console.error(TAG + 'start failed');
return; return;
} }
...@@ -184,8 +185,12 @@ export default class AudioRendererDemo { ...@@ -184,8 +185,12 @@ export default class AudioRendererDemo {
let stat = await fs.stat(filePath); let stat = await fs.stat(filePath);
let buf = new ArrayBuffer(bufferSize); let buf = new ArrayBuffer(bufferSize);
let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
class Options {
offset: number
length: number
}
for (let i = 0; i < len; i++) { for (let i = 0; i < len; i++) {
let options = { let options: Options = {
offset: i * bufferSize, offset: i * bufferSize,
length: bufferSize length: bufferSize
}; };
...@@ -193,7 +198,7 @@ export default class AudioRendererDemo { ...@@ -193,7 +198,7 @@ export default class AudioRendererDemo {
// buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染 // buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
let writeSize = await new Promise((resolve, reject) => { let writeSize: number = await new Promise((resolve, reject) => {
this.renderModel.write(buf, (err, writeSize) => { this.renderModel.write(buf, (err, writeSize) => {
if (err) { if (err) {
reject(err); reject(err);
...@@ -202,11 +207,11 @@ export default class AudioRendererDemo { ...@@ -202,11 +207,11 @@ export default class AudioRendererDemo {
} }
}); });
}); });
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为released,停止渲染 if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为released,停止渲染
fs.close(file); fs.close(file);
await this.renderModel.stop(); await this.renderModel.stop();
} }
if (this.renderModel.state === audio.AudioState.STATE_RUNNING) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RUNNING) {
if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染 if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
fs.close(file); fs.close(file);
await this.renderModel.stop(); await this.renderModel.stop();
...@@ -218,12 +223,12 @@ export default class AudioRendererDemo { ...@@ -218,12 +223,12 @@ export default class AudioRendererDemo {
// 暂停渲染 // 暂停渲染
async pause() { async pause() {
// 只有渲染器状态为running的时候才能暂停 // 只有渲染器状态为running的时候才能暂停
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) { if (this.renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running'); console.info('Renderer is not running');
return; return;
} }
await this.renderModel.pause(); // 暂停渲染 await this.renderModel.pause(); // 暂停渲染
if (this.renderModel.state === audio.AudioState.STATE_PAUSED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_PAUSED) {
console.info('Renderer is paused.'); console.info('Renderer is paused.');
} else { } else {
console.error('Pausing renderer failed.'); console.error('Pausing renderer failed.');
...@@ -233,12 +238,12 @@ export default class AudioRendererDemo { ...@@ -233,12 +238,12 @@ export default class AudioRendererDemo {
// 停止渲染 // 停止渲染
async stop() { async stop() {
// 只有渲染器状态为running或paused的时候才可以停止 // 只有渲染器状态为running或paused的时候才可以停止
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) { if (this.renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING && this.renderModel.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused.'); console.info('Renderer is not running or paused.');
return; return;
} }
await this.renderModel.stop(); // 停止渲染 await this.renderModel.stop(); // 停止渲染
if (this.renderModel.state === audio.AudioState.STATE_STOPPED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped.'); console.info('Renderer stopped.');
} else { } else {
console.error('Stopping renderer failed.'); console.error('Stopping renderer failed.');
...@@ -248,12 +253,12 @@ export default class AudioRendererDemo { ...@@ -248,12 +253,12 @@ export default class AudioRendererDemo {
// 销毁实例,释放资源 // 销毁实例,释放资源
async release() { async release() {
// 渲染器状态不是released状态,才能release // 渲染器状态不是released状态,才能release
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer already released'); console.info('Renderer already released');
return; return;
} }
await this.renderModel.release(); // 释放资源 await this.renderModel.release(); // 释放资源
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { if (this.renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info('Renderer released'); console.info('Renderer released');
} else { } else {
console.error('Renderer release failed.'); console.error('Renderer release failed.');
......
...@@ -46,12 +46,12 @@ TonePlayer<sup>9+</sup>提供播放和管理DTMF(Dual Tone Multi Frequency, ...@@ -46,12 +46,12 @@ TonePlayer<sup>9+</sup>提供播放和管理DTMF(Dual Tone Multi Frequency,
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
let audioRendererInfo = { let audioRendererInfo: audio.AudioRendererInfo = {
content : audio.ContentType.CONTENT_TYPE_SONIFICATION, content : audio.ContentType.CONTENT_TYPE_SONIFICATION,
usage : audio.StreamUsage.STREAM_USAGE_MEDIA, usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
rendererFlags : 0 rendererFlags : 0
}; };
tonePlayerPromise = audio.createTonePlayer(audioRendererInfo); let tonePlayerPromise = audio.createTonePlayer(audioRendererInfo);
``` ```
2. 加载指定类型DTMF音调配置。 2. 加载指定类型DTMF音调配置。
...@@ -95,11 +95,11 @@ export class TonelayerDemo { ...@@ -95,11 +95,11 @@ export class TonelayerDemo {
private timer : number; private timer : number;
private timerPro : number; private timerPro : number;
// promise调用方式 // promise调用方式
async testTonePlayerPromise(type) { async testTonePlayerPromise(type: audio.ToneType) {
console.info('testTonePlayerPromise start'); console.info('testTonePlayerPromise start');
if (this.timerPro) clearTimeout(this.timerPro); if (this.timerPro) clearTimeout(this.timerPro);
let tonePlayerPromise; let tonePlayerPromise: audio.TonePlayer;
let audioRendererInfo = { let audioRendererInfo: audio.AudioRendererInfo = {
content : audio.ContentType.CONTENT_TYPE_SONIFICATION, content : audio.ContentType.CONTENT_TYPE_SONIFICATION,
usage : audio.StreamUsage.STREAM_USAGE_MEDIA, usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
rendererFlags : 0 rendererFlags : 0
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
```ts ```ts
import audio from '@ohos.multimedia.audio'; import audio from '@ohos.multimedia.audio';
import { BusinessError } from '@ohos.base';
let audioManager = audio.getAudioManager(); let audioManager = audio.getAudioManager();
let audioVolumeManager = audioManager.getVolumeManager(); let audioVolumeManager = audioManager.getVolumeManager();
``` ```
...@@ -17,7 +19,7 @@ let audioVolumeManager = audioManager.getVolumeManager(); ...@@ -17,7 +19,7 @@ let audioVolumeManager = audioManager.getVolumeManager();
通过设置监听事件,可以监听系统音量的变化: 通过设置监听事件,可以监听系统音量的变化:
```ts ```ts
audioVolumeManager.on('volumeChange', (volumeEvent) => { audioVolumeManager.on('volumeChange', (volumeEvent: audio.VolumeEvent) => {
console.info(`VolumeType of stream: ${volumeEvent.volumeType} `); console.info(`VolumeType of stream: ${volumeEvent.volumeType} `);
console.info(`Volume level: ${volumeEvent.volume} `); console.info(`Volume level: ${volumeEvent.volume} `);
console.info(`Whether to updateUI: ${volumeEvent.updateUi} `); console.info(`Whether to updateUI: ${volumeEvent.updateUi} `);
...@@ -34,15 +36,15 @@ audioVolumeManager.on('volumeChange', (volumeEvent) => { ...@@ -34,15 +36,15 @@ audioVolumeManager.on('volumeChange', (volumeEvent) => {
```ts ```ts
let volume = 1.0 // 指定的音量大小,取值范围为[0.00-1.00],1表示最大音量 let volume = 1.0 // 指定的音量大小,取值范围为[0.00-1.00],1表示最大音量
avPlayer.setVolume(volume) avPlayer.setVolume(volume);
``` ```
使用AudioRenderer设置音频流音量的示例代码如下: 使用AudioRenderer设置音频流音量的示例代码如下:
```ts ```ts
audioRenderer.setVolume(0.5).then(data=>{ // 音量范围为[0.0-1.0] audioRenderer.setVolume(0.5).then(() => { // 音量范围为[0.0-1.0]
console.info('Invoke setVolume succeeded.'); console.info('Invoke setVolume succeeded.');
}).catch((err) => { }).catch((err: BusinessError) => {
console.error(`Invoke setVolume failed, code is ${err.code}, message is ${err.message}`); console.error(`Invoke setVolume failed, code is ${err.code}, message is ${err.message}`);
}); });
``` ```
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册