未验证 提交 db4ce2fa 编写于 作者: O openharmony_ci 提交者: Gitee

!22093 翻译完成:21344+21413+21467 media相关的更改

Merge pull request !22093 from wusongqing/TR21344
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
- Best Practices - Best Practices
- [Camera Photographing Sample](camera-shooting-case.md) - [Camera Photographing Sample](camera-shooting-case.md)
- [Camera Recording Sample](camera-recording-case.md) - [Camera Recording Sample](camera-recording-case.md)
- [Using Performance Improvement Features (for System Applications Only)](camera-performance-improvement.md)
- Image - Image
- [Image Overview](image-overview.md) - [Image Overview](image-overview.md)
- [Image Decoding](image-decoding.md) - [Image Decoding](image-decoding.md)
......
...@@ -28,8 +28,7 @@ Read [AudioEncoder](../reference/native-apis/_audio_encoder.md) for the API refe ...@@ -28,8 +28,7 @@ Read [AudioEncoder](../reference/native-apis/_audio_encoder.md) for the API refe
Refer to the code snippet below to complete the entire audio encoding process, including creating an encoder, setting encoding parameters (such as the sampling rate, bit rate, and number of audio channels), and starting, refreshing, resetting, and destroying the encoder. Refer to the code snippet below to complete the entire audio encoding process, including creating an encoder, setting encoding parameters (such as the sampling rate, bit rate, and number of audio channels), and starting, refreshing, resetting, and destroying the encoder.
During application development, you must call the APIs in the defined sequence. Otherwise, an exception or undefined behavior may occur.
During application development, you must call the APIs in the defined sequence. Otherwise, an exception or undefined behavior may occur.
For details about the complete code, see [Sample](https://gitee.com/openharmony/multimedia_av_codec/blob/master/test/nativedemo/audio_demo/avcodec_audio_aac_encoder_demo.cpp). For details about the complete code, see [Sample](https://gitee.com/openharmony/multimedia_av_codec/blob/master/test/nativedemo/audio_demo/avcodec_audio_aac_encoder_demo.cpp).
...@@ -41,37 +40,34 @@ The figure below shows the call relationship of audio encoding. ...@@ -41,37 +40,34 @@ The figure below shows the call relationship of audio encoding.
You can create an encoder by name or MIME type. You can create an encoder by name or MIME type.
```cpp ```cpp
// Create an encoder by name. // Create an encoder by name.
OH_AVCapability *capability = OH_AVCodec_GetCapability(OH_AVCODEC_MIMETYPE_AUDIO_AAC, true); OH_AVCapability *capability = OH_AVCodec_GetCapability(OH_AVCODEC_MIMETYPE_AUDIO_AAC, true);
const char *name = OH_AVCapability_GetName(capability); const char *name = OH_AVCapability_GetName(capability);
OH_AVCodec *audioEnc = OH_AudioEncoder_CreateByName(name); OH_AVCodec *audioEnc = OH_AudioEncoder_CreateByName(name);
``` ```
```cpp
```cpp // Create an encoder by MIME type.
// Create an encoder by MIME type. OH_AVCodec *audioEnc = OH_AudioEncoder_CreateByMime(OH_AVCODEC_MIMETYPE_AUDIO_AAC);
OH_AVCodec *audioEnc = OH_AudioEncoder_CreateByMime(OH_AVCODEC_MIMETYPE_AUDIO_AAC); ```
``` ```cpp
// Initialize the queues.
```cpp class AEncSignal {
// Initialize the queues. public:
class AEncSignal { std::mutex inMutex_;
public: std::mutex outMutex_;
std::mutex inMutex_; std::mutex startMutex_;
std::mutex outMutex_; std::condition_variable inCond_;
std::mutex startMutex_; std::condition_variable outCond_;
std::condition_variable inCond_; std::condition_variable startCond_;
std::condition_variable outCond_; std::queue<uint32_t> inQueue_;
std::condition_variable startCond_; std::queue<uint32_t> outQueue_;
std::queue<uint32_t> inQueue_; std::queue<OH_AVMemory *> inBufferQueue_;
std::queue<uint32_t> outQueue_; std::queue<OH_AVMemory *> outBufferQueue_;
std::queue<OH_AVMemory *> inBufferQueue_; std::queue<OH_AVCodecBufferAttr> attrQueue_;
std::queue<OH_AVMemory *> outBufferQueue_; };
std::queue<OH_AVCodecBufferAttr> attrQueue_; AEncSignal *signal_ = new AEncSignal();
}; ```
AEncSignal *signal_ = new AEncSignal();
```
2. Call **OH_AudioEncoder_SetCallback()** to set callback functions. 2. Call **OH_AudioEncoder_SetCallback()** to set callback functions.
Register the **OH_AVCodecAsyncCallback** struct that defines the following callback function pointers: Register the **OH_AVCodecAsyncCallback** struct that defines the following callback function pointers:
...@@ -83,126 +79,122 @@ The figure below shows the call relationship of audio encoding. ...@@ -83,126 +79,122 @@ The figure below shows the call relationship of audio encoding.
You need to process the callback functions to ensure that the encoder runs properly. You need to process the callback functions to ensure that the encoder runs properly.
```cpp ```cpp
// Implement the OH_AVCodecOnError callback function. // Implement the OH_AVCodecOnError callback function.
static void OnError(OH_AVCodec *codec, int32_t errorCode, void *userData) static void OnError(OH_AVCodec *codec, int32_t errorCode, void *userData)
{ {
(void)codec; (void)codec;
(void)errorCode; (void)errorCode;
(void)userData; (void)userData;
} }
// Implement the OH_AVCodecOnStreamChanged callback function. // Implement the OH_AVCodecOnStreamChanged callback function.
static void OnStreamChanged(OH_AVCodec *codec, OH_AVFormat *format, void *userData) static void OnStreamChanged(OH_AVCodec *codec, OH_AVFormat *format, void *userData)
{ {
(void)codec; (void)codec;
(void)format; (void)format;
(void)userData; (void)userData;
} }
// Implement the OH_AVCodecOnNeedInputData callback function. // Implement the OH_AVCodecOnNeedInputData callback function.
static void OnNeedInputData(OH_AVCodec *codec, uint32_t index, OH_AVMemory *data, void *userData) static void OnNeedInputData(OH_AVCodec *codec, uint32_t index, OH_AVMemory *data, void *userData)
{ {
(void)codec; (void)codec;
// The input stream is sent to the InputBuffer queue. // The input stream is sent to the InputBuffer queue.
AEncSignal *signal = static_cast<AEncSignal *>(userData); AEncSignal *signal = static_cast<AEncSignal *>(userData);
unique_lock<mutex> lock(signal->inMutex_); unique_lock<mutex> lock(signal->inMutex_);
signal->inQueue_.push(index); signal->inQueue_.push(index);
signal->inBufferQueue_.push(data); signal->inBufferQueue_.push(data);
signal->inCond_.notify_all(); signal->inCond_.notify_all();
} }
// Implement the OH_AVCodecOnNewOutputData callback function. // Implement the OH_AVCodecOnNewOutputData callback function.
static void OnNeedOutputData(OH_AVCodec *codec, uint32_t index, OH_AVMemory *data, OH_AVCodecBufferAttr *attr, static void OnNeedOutputData(OH_AVCodec *codec, uint32_t index, OH_AVMemory *data, OH_AVCodecBufferAttr *attr,
void *userData) void *userData)
{ {
(void)codec; (void)codec;
// The index of the output buffer is sent to OutputQueue_. // The index of the output buffer is sent to OutputQueue_.
// The encoded data is sent to the outBuffer queue. // The encoded data is sent to the outBuffer queue.
AEncSignal *signal = static_cast<AEncSignal *>(userData); AEncSignal *signal = static_cast<AEncSignal *>(userData);
unique_lock<mutex> lock(signal->outMutex_); unique_lock<mutex> lock(signal->outMutex_);
signal->outQueue_.push(index); signal->outQueue_.push(index);
signal->outBufferQueue_.push(data); signal->outBufferQueue_.push(data);
if (attr) { if (attr) {
signal->attrQueue_.push(*attr); signal->attrQueue_.push(*attr);
} }
} }
OH_AVCodecAsyncCallback cb = {&OnError, &OnStreamChanged, &OnNeedInputData, &OnNeedOutputData}; OH_AVCodecAsyncCallback cb = {&OnError, &OnStreamChanged, &OnNeedInputData, &OnNeedOutputData};
// Set the asynchronous callbacks. // Set the asynchronous callbacks.
int32_t ret = OH_AudioEncoder_SetCallback(audioEnc, cb, userData); int32_t ret = OH_AudioEncoder_SetCallback(audioEnc, cb, userData);
``` ```
3. Call **OH_AudioEncoder_Configure** to configure the encoder. 3. Call **OH_AudioEncoder_Configure** to configure the encoder.
The following options are mandatory: sampling rate, bit rate, number of audio channels, audio channel type, and bit depth. The maximum input length is optional. The following options are mandatory: sampling rate, bit rate, number of audio channels, audio channel type, and bit depth. The maximum input length is optional.
For FLAC encoding, the compliance level and sampling precision are also mandatory. For FLAC encoding, the compliance level and sampling precision are also mandatory.
```cpp ```cpp
enum AudioFormatType : int32_t { enum AudioFormatType : int32_t {
TYPE_AAC = 0, TYPE_AAC = 0,
TYPE_FLAC = 1, TYPE_FLAC = 1,
}; };
int32_t ret; int32_t ret;
// (Mandatory) Configure the audio sampling rate. // (Mandatory) Configure the audio sampling rate.
constexpr uint32_t DEFAULT_SMAPLERATE = 44100; constexpr uint32_t DEFAULT_SMAPLERATE = 44100;
// (Mandatory) Configure the audio bit rate. // (Mandatory) Configure the audio bit rate.
constexpr uint32_t DEFAULT_BITRATE = 32000; constexpr uint64_t DEFAULT_BITRATE = 32000;
// (Mandatory) Configure the number of audio channels. // (Mandatory) Configure the number of audio channels.
constexpr uint32_t DEFAULT_CHANNEL_COUNT = 2; constexpr uint32_t DEFAULT_CHANNEL_COUNT = 2;
// (Mandatory) Configure the audio channel type. // (Mandatory) Configure the audio channel type.
constexpr AudioChannelLayout CHANNEL_LAYOUT =AudioChannelLayout::STEREO; constexpr AudioChannelLayout CHANNEL_LAYOUT =AudioChannelLayout::STEREO;
// (Mandatory) Configure the audio bit depth. Only SAMPLE_S16LE and SAMPLE_S32LE are available for FLAC encoding. // (Mandatory) Configure the audio bit depth. Only SAMPLE_S16LE and SAMPLE_S32LE are available for FLAC encoding.
constexpr OH_BitsPerSample SAMPLE_FORMAT =OH_BitsPerSample::SAMPLE_S32LE; constexpr OH_BitsPerSample SAMPLE_FORMAT =OH_BitsPerSample::SAMPLE_S32LE;
// (Mandatory) Configure the audio bit depth. Only SAMPLE_S32P is available for AAC encoding. // (Mandatory) Configure the audio bit depth. Only SAMPLE_F32P is available for AAC encoding.
constexpr OH_BitsPerSample SAMPLE_AAC_FORMAT =OH_BitsPerSample::SAMPLE_S32P; constexpr OH_BitsPerSample SAMPLE_AAC_FORMAT = OH_BitsPerSample::SAMPLE_F32LE;
// Configure the audio compliance level. The default value is 0, and the value ranges from -2 to 2. // Configure the audio compliance level. The default value is 0, and the value ranges from -2 to 2.
constexpr int32_t COMPLIANCE_LEVEL = 0; constexpr int32_t COMPLIANCE_LEVEL = 0;
// (Mandatory) Configure the audio sampling precision. SAMPLE_S16LE, SAMPLE_S24LE, and SAMPLE_S32LE are available. // (Mandatory) Configure the audio sampling precision. SAMPLE_S16LE, SAMPLE_S24LE, and SAMPLE_S32LE are available.
constexpr BITS_PER_CODED_SAMPLE BITS_PER_CODED_SAMPLE =OH_BitsPerSample::SAMPLE_S24LE; constexpr BITS_PER_CODED_SAMPLE BITS_PER_CODED_SAMPLE =OH_BitsPerSample::SAMPLE_S24LE;
// (Optional) Configure the maximum input length. // (Optional) Configure the maximum input length.
constexpr uint32_t DEFAULT_MAX_INPUT_SIZE = 1024*DEFAULT_CHANNEL_COUNT *sizeof(float);//aac constexpr uint32_t DEFAULT_MAX_INPUT_SIZE = 1024*DEFAULT_CHANNEL_COUNT *sizeof(float);//aac
OH_AVFormat *format = OH_AVFormat_Create(); OH_AVFormat *format = OH_AVFormat_Create();
// Set the format. // Set the format.
OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_SAMPLE_RATE.data(),DEFAULT_SMAPLERATE); OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_SAMPLE_RATE.data(),DEFAULT_SMAPLERATE);
OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_BITRATE.data(), DEFAULT_BITRATE); OH_AVFormat_SetLongValue(format,MediaDescriptionKey::MD_KEY_BITRATE.data(), DEFAULT_BITRATE);
OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_CHANNEL_COUNT.data(),DEFAULT_CHANNEL_COUNT); OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_CHANNEL_COUNT.data(),DEFAULT_CHANNEL_COUNT);
OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_MAX_INPUT_SIZE.data(),DEFAULT_MAX_INPUT_SIZE); OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_MAX_INPUT_SIZE.data(),DEFAULT_MAX_INPUT_SIZE);
OH_AVFormat_SetLongValue(format,MediaDescriptionKey::MD_KEY_CHANNEL_LAYOUT.data(),CHANNEL_LAYOUT); OH_AVFormat_SetLongValue(format,MediaDescriptionKey::MD_KEY_CHANNEL_LAYOUT.data(),CHANNEL_LAYOUT);
OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_AUDIO_SAMPLE_FORMAT.data(),SAMPLE_FORMAT); OH_AVFormat_SetIntValue(format,MediaDescriptionKey::MD_KEY_AUDIO_SAMPLE_FORMAT.data(),SAMPLE_FORMAT);
if(audioType == TYPE_AAC){ if(audioType == TYPE_AAC){
OH_AVFormat_SetIntValue(format, MediaDescriptionKey::MD_KEY_AUDIO_SAMPLE_FORMAT.data(), SAMPLE_AAC_FORMAT); OH_AVFormat_SetIntValue(format, MediaDescriptionKey::MD_KEY_AUDIO_SAMPLE_FORMAT.data(), SAMPLE_AAC_FORMAT);
} }
if (audioType == TYPE_FLAC) { if (audioType == TYPE_FLAC) {
OH_AVFormat_SetIntValue(format, MediaDescriptionKey::MD_KEY_BITS_PER_CODED_SAMPLE.data(), BITS_PER_CODED_SAMPLE); OH_AVFormat_SetIntValue(format, MediaDescriptionKey::MD_KEY_BITS_PER_CODED_SAMPLE.data(), BITS_PER_CODED_SAMPLE);
OH_AVFormat_SetLongValue(format, MediaDescriptionKey::MD_KEY_COMPLIANCE_LEVEL.data(), COMPLIANCE_LEVEL); OH_AVFormat_SetLongValue(format, MediaDescriptionKey::MD_KEY_COMPLIANCE_LEVEL.data(), COMPLIANCE_LEVEL);
} }
// Configure the encoder. // Configure the encoder.
ret = OH_AudioEncoder_Configure(audioEnc, format); ret = OH_AudioEncoder_Configure(audioEnc, format);
if (ret != AV_ERR_OK) { if (ret != AV_ERR_OK) {
// Exception handling. // Exception handling.
} }
``` ```
4. Call **OH_AudioEncoder_Prepare()** to prepare internal resources for the encoder. 4. Call **OH_AudioEncoder_Prepare()** to prepare internal resources for the encoder.
```c++ ```c++
OH_AudioEncoder_Prepare(audioEnc); OH_AudioEncoder_Prepare(audioEnc);
``` ```
5. Call **OH_AudioEncoder_Start()** to start the encoder. 5. Call **OH_AudioEncoder_Start()** to start the encoder.
```c++ ```c++
inputFile_ = std::make_unique<std::ifstream>(); inputFile_ = std::make_unique<std::ifstream>();
// Open the path of the binary file to be encoded. // Open the path of the binary file to be encoded.
inputFile_->open(inputFilePath.data(), std::ios::in |std::ios::binary); inputFile_->open(inputFilePath.data(), std::ios::in |std::ios::binary);
// Configure the path of the output file. // Configure the path of the output file.
outFile_ = std::make_unique<std::ofstream>(); outFile_ = std::make_unique<std::ofstream>();
outFile_->open(outputFilePath.data(), std::ios::out |std::ios::binary); outFile_->open(outputFilePath.data(), std::ios::out |std::ios::binary);
// Start encoding. // Start encoding.
ret = OH_AudioEncoder_Start(audioEnc); ret = OH_AudioEncoder_Start(audioEnc);
if (ret != AV_ERR_OK) { if (ret != AV_ERR_OK) {
// Exception handling. // Exception handling.
} }
``` ```
6. Call **OH_AudioEncoder_PushInputData()** to write the data to encode. 6. Call **OH_AudioEncoder_PushInputData()** to write the data to encode.
To indicate the End of Stream (EOS), pass in the **AVCODEC_BUFFER_FLAGS_EOS** flag. To indicate the End of Stream (EOS), pass in the **AVCODEC_BUFFER_FLAGS_EOS** flag.
...@@ -213,64 +205,64 @@ The figure below shows the call relationship of audio encoding. ...@@ -213,64 +205,64 @@ The figure below shows the call relationship of audio encoding.
| Sampling Rate| FRAME_SIZE| | Sampling Rate| FRAME_SIZE|
| :----: | :----: | | :----: | :----: |
| 8000 | 576 | | 8000 | 576 |
| 16000 | 1152 | | 16000 | 1152 |
| 22050 | 2304 | | 22050 | 2304 |
| 24000 | 2304 | | 24000 | 2304 |
| 32000 | 2304 | | 32000 | 2304 |
| 44100 | 4608 | | 44100 | 4608 |
| 48000 | 4608 | | 48000 | 4608 |
| 88200 | 8192 | | 88200 | 8192 |
| 96000 | 8192 | | 96000 | 8192 |
**NOTE**: If **FRAME_SIZE** is not set to **1024** for AAC encoding, an error code is returned. In the case of FLAC encoding, if **FRAME_SIZE** is set to a value greater than the value listed in the table for a given sampling rate, an error code is returned; if **FRAME_SIZE** is set to a value less than the value listed, the encoded file may be damaged. **NOTE**: If **FRAME_SIZE** is not set to **1024** for AAC encoding, an error code is returned. In the case of FLAC encoding, if **FRAME_SIZE** is set to a value greater than the value listed in the table for a given sampling rate, an error code is returned; if **FRAME_SIZE** is set to a value less than the value listed, the encoded file may be damaged.
```c++
constexpr int32_t FRAME_SIZE = 1024; // AAC encoding ```c++
constexpr int32_t DEFAULT_CHANNEL_COUNT =2; constexpr int32_t FRAME_SIZE = 1024; // AAC encoding
constexpr int32_t INPUT_FRAME_BYTES = DEFAULT_CHANNEL_COUNT * FRAME_SIZE * sizeof(float); // AAC encoding constexpr int32_t DEFAULT_CHANNEL_COUNT =2;
// Configure the buffer information. constexpr int32_t INPUT_FRAME_BYTES = DEFAULT_CHANNEL_COUNT * FRAME_SIZE * sizeof(float); // AAC encoding
OH_AVCodecBufferAttr info; // Configure the buffer information.
// Set the package size, offset, and timestamp. OH_AVCodecBufferAttr info;
info.size = pkt_->size; // Set the package size, offset, and timestamp.
info.offset = 0; info.size = pkt_->size;
info.pts = pkt_->pts; info.offset = 0;
info.flags = AVCODEC_BUFFER_FLAGS_CODEC_DATA; info.pts = pkt_->pts;
auto buffer = signal_->inBufferQueue_.front(); info.flags = AVCODEC_BUFFER_FLAGS_CODEC_DATA;
if (inputFile_->eof()){ auto buffer = signal_->inBufferQueue_.front();
info.size = 0; if (inputFile_->eof()){
info.flags = AVCODEC_BUFFER_FLAGS_EOS; info.size = 0;
}else{ info.flags = AVCODEC_BUFFER_FLAGS_EOS;
inputFile_->read((char *)OH_AVMemory_GetAddr(buffer), INPUT_FRAME_BYTES); }else{
} inputFile_->read((char *)OH_AVMemory_GetAddr(buffer), INPUT_FRAME_BYTES);
uint32_t index = signal_->inQueue_.front(); }
// Send the data to the input queue for encoding. The index is the subscript of the queue. uint32_t index = signal_->inQueue_.front();
int32_t ret = OH_AudioEncoder_PushInputData(audioEnc, index,info); // Send the data to the input queue for encoding. The index is the subscript of the queue.
if (ret != AV_ERR_OK) { int32_t ret = OH_AudioEncoder_PushInputData(audioEnc, index,info);
// Exception handling. if (ret != AV_ERR_OK) {
} // Exception handling.
``` }
```
7. Call **OH_AudioEncoder_FreeOutputData()** to output the encoded stream. 7. Call **OH_AudioEncoder_FreeOutputData()** to output the encoded stream.
```c++ ```c++
OH_AVCodecBufferAttr attr = signal_->attrQueue_.front(); OH_AVCodecBufferAttr attr = signal_->attrQueue_.front();
OH_AVMemory *data = signal_->outBufferQueue_.front(); OH_AVMemory *data = signal_->outBufferQueue_.front();
uint32_t index = signal_->outQueue_.front(); uint32_t index = signal_->outQueue_.front();
// Write the encoded data (specified by data) to the output file. // Write the encoded data (specified by data) to the output file.
outFile_->write(reinterpret_cast<char *>(OH_AVMemory_GetAdd(data)), attr.size); outFile_->write(reinterpret_cast<char *>(OH_AVMemory_GetAdd(data)), attr.size);
// Release the output buffer. // Release the output buffer.
ret = OH_AudioEncoder_FreeOutputData(audioEnc, index); ret = OH_AudioEncoder_FreeOutputData(audioEnc, index);
if (ret != AV_ERR_OK) { if (ret != AV_ERR_OK) {
// Exception handling. // Exception handling.
} }
if (attr.flags == AVCODEC_BUFFER_FLAGS_EOS) { if (attr.flags == AVCODEC_BUFFER_FLAGS_EOS) {
cout << "decode eos" << endl; cout << "decode eos" << endl;
isRunning_.store(false); isRunning_.store(false);
break; break;
} }
``` ```
8. (Optional) Call **OH_AudioEncoder_Flush()** to refresh the encoder. 8. (Optional) Call **OH_AudioEncoder_Flush()** to refresh the encoder.
After **OH_AudioEncoder_Flush()** is called, the current encoding queue is cleared. After **OH_AudioEncoder_Flush()** is called, the current encoding queue is cleared.
...@@ -282,19 +274,18 @@ The figure below shows the call relationship of audio encoding. ...@@ -282,19 +274,18 @@ The figure below shows the call relationship of audio encoding.
* The EOS of the file is reached. * The EOS of the file is reached.
* An error with **OH_AudioEncoder_IsValid** set to **true** (indicating that the execution can continue) occurs. * An error with **OH_AudioEncoder_IsValid** set to **true** (indicating that the execution can continue) occurs.
```c++ ```c++
// Refresh the encoder. // Refresh the encoder.
ret = OH_AudioEncoder_Flush(audioEnc); ret = OH_AudioEncoder_Flush(audioEnc);
if (ret != AV_ERR_OK) { if (ret != AV_ERR_OK) {
// Exception handling. // Exception handling.
} }
// Start encoding again. // Start encoding again.
ret = OH_AudioEncoder_Start(audioEnc); ret = OH_AudioEncoder_Start(audioEnc);
if (ret != AV_ERR_OK) { if (ret != AV_ERR_OK) {
// Exception handling. // Exception handling.
} }
``` ```
9. (Optional) Call **OH_AudioEncoder_Reset()** to reset the encoder. 9. (Optional) Call **OH_AudioEncoder_Reset()** to reset the encoder.
After **OH_AudioEncoder_Reset()** is called, the encoder returns to the initialized state. To continue encoding, you must call **OH_AudioEncoder_Configure()** and then **OH_AudioEncoder_Start()**. After **OH_AudioEncoder_Reset()** is called, the encoder returns to the initialized state. To continue encoding, you must call **OH_AudioEncoder_Configure()** and then **OH_AudioEncoder_Start()**.
...@@ -334,4 +325,4 @@ The figure below shows the call relationship of audio encoding. ...@@ -334,4 +325,4 @@ The figure below shows the call relationship of audio encoding.
audioEnc = NULL; // The encoder cannot be destroyed repeatedly. audioEnc = NULL; // The encoder cannot be destroyed repeatedly.
} }
return ret; return ret;
``` ```
\ No newline at end of file
# Using Performance Improvement Features (for System Applications Only)
The camera startup performance is affected by time-consuming operations such as power-on of underlying components and initialization of the process pipeline. To improve the camera startup speed and thumbnail display speed, OpenHarmony introduces some features. The capabilities of these features are related to underlying components. You need to check whether these capabilities are supported before using them.
These features are involved in the processes of starting the camera device, configuring streams, and taking photos. This topic describes the three scenarios.
## Deferred Stream Configuration
A typical camera startup process includes starting the camera device, configuring a data stream, and starting the data stream. Before configuring the data stream, you need to obtain the surface ID of the **\<XComponent>**.
The deferred stream configuration feature decouples stream configuration and start from the surface. Before the **\<XComponent>** provides the surface for the camera application, the system configures and starts the stream. This way, the surface only needs to be available before the stream is started. This improves the startup speed and prevents the implementation of other startup optimization schemas from being affected.
![deferred-surface-scene](figures/deferred-surface-scene.png)
Before optimization: Stream configuration depends on a **Surface** object, which is available after UI loading is complete. In other words, you can create a session, configure input and output streams, and start the session only after the UI is loaded. The camera HDI is responsible for stream configuration.
After optimization: Stream configuration does not depend on the **Surface** object. UI loading and stream configuration are executed concurrently. After the parameters are prepared, you can create a session.
### Available APIs
Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
| API| Description|
| ---- | ---- |
| createDeferredPreviewOutput(profile: Profile): Promise\<PreviewOutput> | Creates a deferred **PreviewOutput** instance and adds it to the data stream instead of a common **PreviewOutput** instance during stream configuration.|
| addDeferredSurface(surfaceId: string): Promise\<void> | Adds a surface for delayed preview. This API can run after **session.commitConfig()** or **session.start()** is called.|
### Development Example
The figure below shows the recommended API call process.
![](figures/deferred-surface-sequence-diagram.png)
```js
import camera from '@ohos.multimedia.camera';
function async preview(context: Context, cameraInfo: camera.Device, previewProfile: camera.Profile, photoProfile: camera.Profile, surfaceId: string): Promise<void> {
const cameraManager: camera.CameraManager = camera.getCameraManager(context);
const cameraInput camera.CameraInput = await cameraManager.createCameraInput(cameraInfo)
const previewOutput: camera.PreviewOutput = await cameraManager.createDeferredPreviewOutput(previewProfile);
const photoOutput: camera.PhotoOutput = await cameraManager.createPhotoOutput(photoProfile);
const session: camera.CaptureSession = await this.mCameraManager.createCaptureSession();
await session.beginConfig();
await session.addInput(cameraInput);
await session.addOutput(previewOutput);
await session.addOutput(photoOutput);
await session.commitConfig();
await session.start();
await previewOutput.addDeferredSurface(surfaceId);
}
```
## Quick Thumbnail
The photographing performance depends on the algorithm processing speed. A complex algorithm chain provides better image effect while requiring longer processing time.
To improve the photographing speed perceived by end users, the quick thumbnail feature is introduced. When the user takes a photo, a thumbnail is output and reported to the camera application for display before a real image is reported.
In this way, the photographing process is optimized, which fulfills the processing requirements of the post-processing algorithm without blocking the photographing speed of the foreground.
### Available APIs
Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
| API| Description|
| ---- | ---- |
| isQuickThumbnailSupported() : boolean | Checks whether the quick thumbnail feature is supported.|
| enableQuickThumbnail(enabled:bool): void | Enables or disables the quick thumbnail feature.|
| on(type: 'quickThumbnail', callback: AsyncCallback\<image.PixelMap>): void | Listens for camera thumbnails.|
> **NOTE**
>
> - **isQuickThumbnailSupported** and **enableQuickThumbnail** must be called after **CaptureSession.addOutput** and **CaptureSession.addInput** but before **CaptureSession.commitConfig()**.
> - **on()** takes effect after **enableQuickThumbnail(true)** is called.
### Development Example
The figure below shows the recommended API call process.
![](figures/quick-thumbnail-sequence-diagram.png)
```js
import camera from '@ohos.multimedia.camera'
this.cameraManager = camera.getCameraManager(globalThis.abilityContext);
let cameras = this.cameraManager.getSupportedCameras()
// Create a CaptureSession instance.
this.captureSession = await this.cameraManager.createCaptureSession()
// Start configuration for the session.
await this.captureSession.beginConfig()
// Add a CameraInput instance to the session.
this.cameraInput = await this.cameraManager.createCameraInput(cameras[0])
await this.cameraInput.open()
await this.captureSession.addInput(this.cameraInput)
// Add a PhotoOutput instance to the session.
this.photoOutPut = await this.cameraManager.createPhotoOutput(photoProfile, surfaceId)
await this.captureSession.addOutput(this.photoOutPut)
boolean isSupported = this.photoOutPut.isQuickThumbnailSupported()
if (isSupported) {
// Enable the quick thumbnail feature.
this.photoOutPut.enableQuickThumbnail(true)
this.photoOutPut.on('quickThumbnail', (err, pixelmap) => {
if (err || pixelmap === undefined) {
Logger.error(this.tag, 'photoOutPut on thumbnail failed ')
return
}
// Display or save the PixelMap instance.
this.showOrSavePicture(pixelmap)
})
}
```
## Prelaunch
Generally, the startup of the camera application is triggered when the user touches the camera icon on the home screen. The home screen senses the touch event and instructs the application manager to start the camera application. This takes a relatively long time. After the camera application is started, the camera startup process starts. A typical camera startup process includes starting the camera device, configuring a data stream, and starting the data stream, which is also time-consuming.
The prelaunch feature triggers the action of starting the camera device before the camera application is started. In other words, when the user touches the camera icon on the home screen, the system starts the camera device. At this time, the camera application is not started yet. The figure below shows the camera application process before and after the prelaunch feature is introduced.
![prelaunch-scene](figures/prelaunch-scene.png)
### Available APIs
Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
| API| Description|
| ---- | ---- |
| isPrelaunchSupported(camera: CameraDevice) : boolean | Checks whether the camera supports prelaunch.|
| setPrelaunchConfig(prelaunchConfig: PrelaunchConfig) : void | Sets the prelaunch parameters.|
| prelaunch() : void | Prelaunches the camera. This API is called when a user clicks the system camera icon to start the camera application.|
### Development Example
The figure below shows the recommended API call process.
![](figures/prelaunch-sequence-diagram.png)
- **Home screen**
```js
import camera from '@ohos.multimedia.camera'
this.cameraManager = camera.getCameraManager(globalThis.abilityContext);
try {
this.cameraManager.prelaunch();
} catch (error) {
console.error(`catch error: Code: ${error.code}, message: ${error.message}`)
}
```
- **Camera application**
To use the prelaunch feature, the camera application must configure the **ohos.permission.CAMERA** permission.
For details about how to request and verify the permissions, see [Permission Application Guide](../security/accesstoken-guidelines.md).
```js
import camera from '@ohos.multimedia.camera'
this.cameraManager = camera.getCameraManager(globalThis.abilityContext);
let cameras = this.cameraManager.getSupportedCameras()
if(this.cameraManager.isPrelaunchSupported(cameras[0])) {
try {
this.cameraManager.setPrelaunchConfig({cameraDevice: cameras[0]});
} catch (error) {
console.error(`catch error: Code: ${error.code}, message: ${error.message}`)
}
}
```
...@@ -72,7 +72,7 @@ let AVRecorderConfig = { ...@@ -72,7 +72,7 @@ let AVRecorderConfig = {
audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
profile : AVRecorderProfile, profile : AVRecorderProfile,
url : 'fd://', // Before passing in a file descriptor to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: eg.fd://45--file:///data/media/01.mp4. url : 'fd://', // Before passing in a file descriptor to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: fd://45--file:///data/media/01.mp4.
rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error. rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error.
location : { latitude : 30, longitude : 130 } location : { latitude : 30, longitude : 130 }
} }
......
...@@ -161,7 +161,7 @@ startAbilityForResult(parameter: StartAbilityParameter, callback: AsyncCallback\ ...@@ -161,7 +161,7 @@ startAbilityForResult(parameter: StartAbilityParameter, callback: AsyncCallback\
Starts an ability. This API uses an asynchronous callback to return the result when the ability is terminated. The following situations may be possible for a started ability: Starts an ability. This API uses an asynchronous callback to return the result when the ability is terminated. The following situations may be possible for a started ability:
- Normally, you can call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability. The result is returned to the caller. - Normally, you can call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability. The result is returned to the caller.
- If an exception occurs, for example, the ability is killed, an exception message, in which **resultCode** is **-1**, is returned to the caller. - If an exception occurs, for example, the ability is killed, an exception message, in which **resultCode** is **-1**, is returned to the caller.
- If different applications call this API to start an ability that uses the sington mode and then call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability, the normal result is returned to the last caller, and an exception message, in which **resultCode** is **-1**, is returned to others. - If different applications call this API to start an ability that uses the singleton mode and then call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability, the normal result is returned to the last caller, and an exception message, in which **resultCode** is **-1**, is returned to others.
Observe the following when using this API: Observe the following when using this API:
- If an application running in the background needs to call this API to start an ability, it must have the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission. - If an application running in the background needs to call this API to start an ability, it must have the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission.
...@@ -214,7 +214,7 @@ startAbilityForResult(parameter: StartAbilityParameter): Promise\<AbilityResult> ...@@ -214,7 +214,7 @@ startAbilityForResult(parameter: StartAbilityParameter): Promise\<AbilityResult>
Starts an ability. This API uses a promise to return the result when the ability is terminated. The following situations may be possible to an ability after it is started: Starts an ability. This API uses a promise to return the result when the ability is terminated. The following situations may be possible to an ability after it is started:
- Normally, you can call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability. The result is returned to the caller. - Normally, you can call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability. The result is returned to the caller.
- If an exception occurs, for example, the ability is killed, an exception message, in which **resultCode** is **-1**, is returned to the caller. - If an exception occurs, for example, the ability is killed, an exception message, in which **resultCode** is **-1**, is returned to the caller.
- If different applications call this API to start an ability that uses the sington mode and then call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability, the normal result is returned to the last caller, and an exception message, in which **resultCode** is **-1**, is returned to others. - If different applications call this API to start an ability that uses the singleton mode and then call [terminateSelfWithResult](#featureabilityterminateselfwithresult7) to terminate the ability, the normal result is returned to the last caller, and an exception message, in which **resultCode** is **-1**, is returned to others.
Observe the following when using this API: Observe the following when using this API:
- If an application running in the background needs to call this API to start an ability, it must have the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission. - If an application running in the background needs to call this API to start an ability, it must have the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission.
......
...@@ -545,7 +545,7 @@ Enumerates the audio stream usage. ...@@ -545,7 +545,7 @@ Enumerates the audio stream usage.
| STREAM_USAGE_UNKNOWN | 0 | Unknown usage.| | STREAM_USAGE_UNKNOWN | 0 | Unknown usage.|
| STREAM_USAGE_MEDIA | 1 | Media. | | STREAM_USAGE_MEDIA | 1 | Media. |
| STREAM_USAGE_MUSIC<sup>10+</sup> | 1 | Music. | | STREAM_USAGE_MUSIC<sup>10+</sup> | 1 | Music. |
| STREAM_USAGE_VOICE_COMMUNICATION | 2 | Voice communication.| | STREAM_USAGE_VOICE_COMMUNICATION | 2 | Voice communication.|
| STREAM_USAGE_VOICE_ASSISTANT<sup>9+</sup> | 3 | Voice assistant.| | STREAM_USAGE_VOICE_ASSISTANT<sup>9+</sup> | 3 | Voice assistant.|
| STREAM_USAGE_ALARM<sup>10+</sup> | 4 | Alarming. | | STREAM_USAGE_ALARM<sup>10+</sup> | 4 | Alarming. |
| STREAM_USAGE_VOICE_MESSAGE<sup>10+</sup> | 5 | Voice message.| | STREAM_USAGE_VOICE_MESSAGE<sup>10+</sup> | 5 | Voice message.|
...@@ -3076,7 +3076,7 @@ Checks whether the fixed volume mode is enabled. When the fixed volume mode is e ...@@ -3076,7 +3076,7 @@ Checks whether the fixed volume mode is enabled. When the fixed volume mode is e
**Example** **Example**
```js ```js
bool volumeAdjustSwitch = audioVolumeGroupManager.isVolumeUnadjustable(); let volumeAdjustSwitch = audioVolumeGroupManager.isVolumeUnadjustable();
console.info(`Whether it is volume unadjustable: ${volumeAdjustSwitch}.`); console.info(`Whether it is volume unadjustable: ${volumeAdjustSwitch}.`);
``` ```
...@@ -5790,13 +5790,13 @@ audioRenderer.getCurrentOutputDevices((err, deviceInfo) => { ...@@ -5790,13 +5790,13 @@ audioRenderer.getCurrentOutputDevices((err, deviceInfo) => {
console.error(`getCurrentOutputDevices Fail: ${err}`); console.error(`getCurrentOutputDevices Fail: ${err}`);
} else { } else {
console.info(`DeviceInfo id: ${deviceInfo.id}`); console.info(`DeviceInfo id: ${deviceInfo.id}`);
console.info(`DeviceInfo type: ${descriptor.deviceType}`); console.info(`DeviceInfo type: ${deviceInfo.deviceType}`);
console.info(`DeviceInfo role: ${descriptor.deviceRole}`); console.info(`DeviceInfo role: ${deviceInfo.deviceRole}`);
console.info(`DeviceInfo name: ${descriptor.name}`); console.info(`DeviceInfo name: ${deviceInfo.name}`);
console.info(`DeviceInfo address: ${descriptor.address}`); console.info(`DeviceInfo address: ${deviceInfo.address}`);
console.info(`DeviceInfo samplerates: ${descriptor.sampleRates[0]}`); console.info(`DeviceInfo samplerates: ${deviceInfo.sampleRates[0]}`);
console.info(`DeviceInfo channelcounts: ${descriptor.channelCounts[0]}`); console.info(`DeviceInfo channelcounts: ${deviceInfo.channelCounts[0]}`);
console.info(`DeviceInfo channelmask: ${descriptor.channelMasks}`); console.info(`DeviceInfo channelmask: ${deviceInfo.channelMasks}`);
} }
}); });
``` ```
...@@ -5819,13 +5819,13 @@ Obtains the output device descriptors of the audio streams. This API uses a prom ...@@ -5819,13 +5819,13 @@ Obtains the output device descriptors of the audio streams. This API uses a prom
```js ```js
audioRenderer.getCurrentOutputDevices().then((deviceInfo) => { audioRenderer.getCurrentOutputDevices().then((deviceInfo) => {
console.info(`DeviceInfo id: ${deviceInfo.id}`); console.info(`DeviceInfo id: ${deviceInfo.id}`);
console.info(`DeviceInfo type: ${descriptor.deviceType}`); console.info(`DeviceInfo type: ${deviceInfo.deviceType}`);
console.info(`DeviceInfo role: ${descriptor.deviceRole}`); console.info(`DeviceInfo role: ${deviceInfo.deviceRole}`);
console.info(`DeviceInfo name: ${descriptor.name}`); console.info(`DeviceInfo name: ${deviceInfo.name}`);
console.info(`DeviceInfo address: ${descriptor.address}`); console.info(`DeviceInfo address: ${deviceInfo.address}`);
console.info(`DeviceInfo samplerates: ${descriptor.sampleRates[0]}`); console.info(`DeviceInfo samplerates: ${deviceInfo.sampleRates[0]}`);
console.info(`DeviceInfo channelcounts: ${descriptor.channelCounts[0]}`); console.info(`DeviceInfo channelcounts: ${deviceInfo.channelCounts[0]}`);
console.info(`DeviceInfo channelmask: ${descriptor.channelMasks}`); console.info(`DeviceInfo channelmask: ${deviceInfo.channelMasks}`);
}).catch((err) => { }).catch((err) => {
console.error(`Get current output devices Fail: ${err}`); console.error(`Get current output devices Fail: ${err}`);
}); });
...@@ -6073,7 +6073,7 @@ Subscribes to audio output device change events. ...@@ -6073,7 +6073,7 @@ Subscribes to audio output device change events.
**Example** **Example**
```js ```js
audioRenderer.on('outputDeviceChange', (deviceChangeInfo) => { audioRenderer.on('outputDeviceChange', (err, deviceChangeInfo) => {
if (err) { if (err) {
console.error(`Subscribes output device change event callback Fail: ${err}`); console.error(`Subscribes output device change event callback Fail: ${err}`);
} else { } else {
...@@ -6105,7 +6105,7 @@ Unsubscribes from audio output device event changes. ...@@ -6105,7 +6105,7 @@ Unsubscribes from audio output device event changes.
**Example** **Example**
```js ```js
audioRenderer.off('outputDeviceChange', (deviceChangeInfo) => { audioRenderer.off('outputDeviceChange', (err,deviceChangeInfo) => {
if (err) { if (err) {
console.error(`Unsubscribes output device change event callback Fail: ${err}`); console.error(`Unsubscribes output device change event callback Fail: ${err}`);
} else { } else {
......
...@@ -665,7 +665,7 @@ if(this.cameraManager.isPrelaunchSupported(cameras[0])) { ...@@ -665,7 +665,7 @@ if(this.cameraManager.isPrelaunchSupported(cameras[0])) {
prelaunch(): void prelaunch(): void
Prelaunches the camera. This API is called when the camera application is started after a user clicks the system camera icon. Prelaunches the camera. This API is called when a user clicks the system camera icon to start the camera application.
**System API**: This is a system API. **System API**: This is a system API.
...@@ -2726,7 +2726,7 @@ previewOutput.on('error', (previewOutputError) => { ...@@ -2726,7 +2726,7 @@ previewOutput.on('error', (previewOutputError) => {
addDeferredSurface(surfaceId: string): void addDeferredSurface(surfaceId: string): void
Adds a surface for delayed preview. This API can run after **session.commitConfig()** is used to commit the configuration for a stream and **session.start()** is used to start the stream. Adds a surface for delayed preview. This API can run after **session.commitConfig()** or **session.start()** is called.
**System API**: This is a system API. **System API**: This is a system API.
......
...@@ -1781,7 +1781,7 @@ let AVRecorderConfig = { ...@@ -1781,7 +1781,7 @@ let AVRecorderConfig = {
audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
profile : AVRecorderProfile, profile : AVRecorderProfile,
url : 'fd://', // Before passing in an FD to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: eg.fd://45. url : 'fd://', // Before passing in an FD to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: fd://45.
rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error. rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error.
location : { latitude : 30, longitude : 130 } location : { latitude : 30, longitude : 130 }
} }
...@@ -1852,7 +1852,7 @@ let AVRecorderConfig = { ...@@ -1852,7 +1852,7 @@ let AVRecorderConfig = {
audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV, videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
profile : AVRecorderProfile, profile : AVRecorderProfile,
url : 'fd://', // Before passing in an FD to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: eg.fd://45. url : 'fd://', // Before passing in an FD to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: fd://45.
rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error. rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error.
location : { latitude : 30, longitude : 130 } location : { latitude : 30, longitude : 130 }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册