提交 43e2896e 编写于 作者: G Geevarghese V K

js api usage doc update based on new d.ts

Signed-off-by: NGeevarghese V K <geevarghese.v.k1@huawei.com>
上级 0d8c9861
......@@ -146,7 +146,13 @@ You use audio management APIs to set and obtain volume, and get information abou
</th>
</tr>
</thead>
<tbody><tr id="row188162012454"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p764215288462"><a name="p764215288462"></a><a name="p764215288462"></a>MEDIA = 1</p>
<tbody>
<tr id="row188162012454"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p764215288462"><a name="p764215288462"></a><a name="p764215288462"></a>VOICE_CALL = 0</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p1596200459"><a name="p1596200459"></a><a name="p1596200459"></a>Audio streams for voice calls</p>
</td>
</tr>
<tr id="row188162012454"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p764215288462"><a name="p764215288462"></a><a name="p764215288462"></a>MEDIA = 1</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p1596200459"><a name="p1596200459"></a><a name="p1596200459"></a>Audio streams for media purpose</p>
</td>
......@@ -154,6 +160,11 @@ You use audio management APIs to set and obtain volume, and get information abou
<tr id="row1288915367468"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p51611346194614"><a name="p51611346194614"></a><a name="p51611346194614"></a>RINGTONE = 2</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p9333131144712"><a name="p9333131144712"></a><a name="p9333131144712"></a>Audio streams for ring tones</p>
<tr id="row188162012454"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p764215288462"><a name="p764215288462"></a><a name="p764215288462"></a>VOICE_ASSISTANT = 9</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p1596200459"><a name="p1596200459"></a><a name="p1596200459"></a>Audio streams for voice assistant</p>
</td>
</tr>
</td>
</tr>
</tbody>
......@@ -222,27 +233,27 @@ You use audio management APIs to set and obtain volume, and get information abou
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p17389145016497"><a name="p17389145016497"></a><a name="p17389145016497"></a>Invalid device</p>
</td>
</tr>
<tr id="row938915016493"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p538925044916"><a name="p538925044916"></a><a name="p538925044916"></a>SPEAKER = 1</p>
<tr id="row938915016493"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p538925044916"><a name="p538925044916"></a><a name="p538925044916"></a>SPEAKER = 2</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p16724165865017"><a name="p16724165865017"></a><a name="p16724165865017"></a>Speaker</p>
</td>
</tr>
<tr id="row12389105084916"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p538914502497"><a name="p538914502497"></a><a name="p538914502497"></a>WIRED_HEADSET = 2</p>
<tr id="row12389105084916"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p538914502497"><a name="p538914502497"></a><a name="p538914502497"></a>WIRED_HEADSET = 3</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p63891850144911"><a name="p63891850144911"></a><a name="p63891850144911"></a>Wired headset</p>
</td>
</tr>
<tr id="row2389205074915"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p10389175054919"><a name="p10389175054919"></a><a name="p10389175054919"></a>BLUETOOTH_SCO = 3</p>
<tr id="row2389205074915"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p10389175054919"><a name="p10389175054919"></a><a name="p10389175054919"></a>BLUETOOTH_SCO = 7</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p538905016496"><a name="p538905016496"></a><a name="p538905016496"></a>Bluetooth device using the synchronous connection oriented link (SCO)</p>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p538905016496"><a name="p538905016496"></a><a name="p538905016496"></a>Bluetooth device using the synchronous connection oriented (SCO) link</p>
</td>
</tr>
<tr id="row83891502499"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p1938975015494"><a name="p1938975015494"></a><a name="p1938975015494"></a>BLUETOOTH_A2DP = 4</p>
<tr id="row83891502499"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p1938975015494"><a name="p1938975015494"></a><a name="p1938975015494"></a>BLUETOOTH_A2DP = 8</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p193891550134912"><a name="p193891550134912"></a><a name="p193891550134912"></a>Bluetooth device using advanced audio distribution profile (A2DP)</p>
</td>
</tr>
<tr id="row11389175014916"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p1738955018497"><a name="p1738955018497"></a><a name="p1738955018497"></a>MIC = 5</p>
<tr id="row11389175014916"><td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.1 "><p id="p1738955018497"><a name="p1738955018497"></a><a name="p1738955018497"></a>MIC = 15</p>
</td>
<td class="cellrowborder" valign="top" width="50%" headers="mcps1.2.3.1.2 "><p id="p73891250174914"><a name="p73891250174914"></a><a name="p73891250174914"></a>Microphone</p>
</td>
......@@ -269,5 +280,3 @@ You use audio management APIs to set and obtain volume, and get information abou
console.log(`Media getVolume ${value}`);
});
```
......@@ -3,8 +3,6 @@
---
## ***Note***:
1. This document applies to JavaScript.
2. Changes to the AudioRenderer interface have been proposed.
When the updated APIs have been integrated, the document will be revised, and apps must adapt to it.
---
## **Summary**
This guide will show you how to use AudioRenderer to create an audio player app.
......@@ -24,11 +22,28 @@ Please see [**js-apis-audio.md**](https://gitee.com/openharmony/docs/blob/master
## **Usage**
Here's an example of how to use AudioRenderer to play a raw audio file.
1. Use **createAudioRenderer** to create an AudioRenderer instance for the **AudioVolumeType**.\
1. Use **createAudioRenderer** to create an AudioRenderer instance. Renderer parameters can be set in **audioRendererOptions**.\
This object can be used to play, control, and obtain the status of the playback, as well as receive callback notifications.
```
const volType = audio.AudioVolumeType.MEDIA; // For music
const audioRenderer = audio.createAudioRenderer(volType);
var audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
channels: audio.AudioChannel.CHANNEL_1,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
}
var audioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH,
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
rendererFlags: 1
}
var audioRendererOptions = {
streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo
}
let audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
```
2. Subscribe to audio interruption events using the **on** API.\
......@@ -118,27 +133,8 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
});
```
3. Prepare the renderer. Call **SetParams** on the instance. You need to set the renderer parameters based on the audio playback specification.
```
async function prepareRenderer() {
// file_example_WAV_2MG.wav
var audioParams = {
format: audio.AudioSampleFormat.SAMPLE_S16LE,
channels: audio.AudioChannel.STEREO,
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_16000,
encoding: audio.AudioEncodingType.ENCODING_PCM,
};
let response = await audioRenderer.setParams(audioParams);
var state = audioRenderer.state;
if (state != audio.AudioState.STATE_PREPARED) {
console.info('Prepare renderer failed');
return;
}
}
```
4. Call the **start()** function on the AudioRenderer instance to start/resume the playback task.\
The renderer state will be STATE _RUNNING once the start is complete. You can then begin writing buffers.
The renderer state will be STATE_RUNNING once the start is complete. You can then begin writing buffers.
```
async function startRenderer() {
var state = audioRenderer.state;
......@@ -148,13 +144,14 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
console.info('Renderer is not in a correct state to start');
return;
}
var started = await audioRenderer.start();
if (started) {
isPlay = true;
await audioRenderer.start();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_RUNNING) {
console.info('Renderer started');
} else {
console.error('Renderer start failed');
return;
}
}
......@@ -212,8 +209,11 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
console.info('Renderer is not running');
return;
}
var paused = await audioRenderer.pause();
if (paused) {
await audioRenderer.pause();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_PAUSED) {
console.info('Renderer paused');
} else {
console.error('Renderer pause failed');
......@@ -226,8 +226,11 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
console.info('Renderer is not running or paused');
return;
}
var stopped = await audioRenderer.stop();
if (stopped) {
await audioRenderer.stop();
state = audioRenderer.state;
if (state == audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped');
} else {
console.error('Renderer stop failed');
......@@ -243,8 +246,11 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
console.info('Resourced already released');
return;
}
var released = await audioRenderer.release();
if (released) {
await audioRenderer.release();
state = audioRenderer.state;
if (state == STATE_RELEASED) {
console.info('Renderer released');
} else {
console.info('Renderer release failed');
......@@ -257,7 +263,6 @@ Here's an example of how to use AudioRenderer to play a raw audio file.
You should also keep in mind that an AudioRenderer is state-based.
That is, the AudioRenderer has an internal state that you must always check when calling playback control APIs, because some operations are only acceptable while the renderer is in a given state.\
The system may throw an error/exception or generate other undefined behaviour if you perform an operation while in the improper state.\
Before each necessary operation, the example code performs a state check.
## **Asynchronous Operations:**
Most of the AudioRenderer calls are asynchronous. As a result, the UI thread will not be blocked.\
......@@ -267,4 +272,3 @@ provides reference for both callback and promise.
## **Other APIs:**
See [**js-apis-audio.md**](https://gitee.com/openharmony/docs/blob/master/en/application-dev/reference/apis/js-apis-audio.md) for more useful APIs like getAudioTime, drain, and getBufferSize.
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册