diff --git a/CODEOWNERS b/CODEOWNERS
index 4f7ab1dad900229e988205402075da217e03f536..638941c1a2a1765ad2a55dd81cf65ca72dce37be 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -338,7 +338,7 @@ zh-cn/application-dev/reference/apis/js-apis-distributedMissionManager.md @chenm
zh-cn/application-dev/reference/apis/js-apis-document.md @panqinxu @zengyawen @bubble_mao @jinhaihw
zh-cn/application-dev/reference/apis/js-apis-effectKit.md @zhangqiang183 @ge-yafang @wind_zj @zxg-gitee
zh-cn/application-dev/reference/apis/js-apis-emitter.md @jayleehw @RayShih @li-weifeng2 @currydavids
-zh-cn/application-dev/reference/apis/js-apis-EnterpriseAdminExtensionAbility.md @Buda-Liu @ningningW @budda-wang @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-EnterpriseAdminExtensionAbility.md @liuzuming @ningningW @yangqing3
zh-cn/application-dev/reference/apis/js-apis-environment.md @panqinxu @zengyawen @bubble_mao @jinhaihw
zh-cn/application-dev/reference/apis/js-apis-errorManager.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen
zh-cn/application-dev/reference/apis/js-apis-eventhub.md @jayleehw @RayShih @li-weifeng2 @currydavids
@@ -384,7 +384,7 @@ zh-cn/application-dev/reference/apis/js-apis-lightweightmap.md @gongjunsong @ge-
zh-cn/application-dev/reference/apis/js-apis-lightweightset.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-linkedlist.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-list.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
-zh-cn/application-dev/reference/apis/js-apis-logs.md @huaweimaxuchu @ningningW @niulihua @tomatodevboy
+zh-cn/application-dev/reference/apis/js-apis-logs.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-media.md @liuyuehua1 @zengyawen @xxb-wzy @currydavids
zh-cn/application-dev/reference/apis/js-apis-medialibrary.md @panqinxu @zengyawen @bubble_mao @jinhaihw
zh-cn/application-dev/reference/apis/js-apis-mediaquery.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy
@@ -397,7 +397,7 @@ zh-cn/application-dev/reference/apis/js-apis-notification.md @jayleehw @RayShih
zh-cn/application-dev/reference/apis/js-apis-observer.md @zhang-hai-feng @zengyawen @jyh926 @gaoxi785
zh-cn/application-dev/reference/apis/js-apis-osAccount.md @nianCode @zengyawen @JiDong-CS @murphy1984
zh-cn/application-dev/reference/apis/js-apis-particleAbility.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen
-zh-cn/application-dev/reference/apis/js-apis-pasteboard.md @feng-aiwen @ge-yafang @gong-a-shi @logic42
+zh-cn/application-dev/reference/apis/js-apis-pasteboard.md @han-zhengshi @ge-yafang @logic42
zh-cn/application-dev/reference/apis/js-apis-permissionrequestresult.md @littlejerry1 @RayShih @gwang2008 @chengxingzhen
zh-cn/application-dev/reference/apis/js-apis-plainarray.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-pointer.md @yuanxinying @ningningW @cococoler @alien0208
@@ -415,7 +415,7 @@ zh-cn/application-dev/reference/apis/js-apis-resource-manager.md @Buda-Liu @ning
zh-cn/application-dev/reference/apis/js-apis-router.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy
zh-cn/application-dev/reference/apis/js-apis-rpc.md @xuepianpian @RayShih @zhaopeng_gitee @vagrant_world
zh-cn/application-dev/reference/apis/js-apis-runninglock.md @aqxyjay @zengyawen @aqxyjay @alien0208
-zh-cn/application-dev/reference/apis/js-apis-screen-lock.md @feng-aiwen @ningningW @wangzhangjun @murphy1984
+
zh-cn/application-dev/reference/apis/js-apis-screen.md @zhangqiang183 @ge-yafang @zhouyaoying @zxg-gitee
zh-cn/application-dev/reference/apis/js-apis-screenshot.md @zhangqiang183 @ge-yafang @zhouyaoying @zxg-gitee
zh-cn/application-dev/reference/apis/js-apis-securityLabel.md @panqinxu @zengyawen @bubble_mao @jinhaihw
@@ -435,6 +435,7 @@ zh-cn/application-dev/reference/apis/js-apis-system-bluetooth.md @cheng_guohong
zh-cn/application-dev/reference/apis/js-apis-system-brightness.md @aqxyjay @zengyawen @aqxyjay @alien0208
zh-cn/application-dev/reference/apis/js-apis-system-cipher.md @gaoyong @zengyawen @niejiteng @jumozhanjiang
zh-cn/application-dev/reference/apis/js-apis-system-configuration.md @Buda-Liu @ningningW @budda-wang @tomatodevboy
+zh-cn/application-dev/reference/apis/js-apis-system-date-time.md @feng-aiwen @ningningW @illybyy @murphy1984
zh-cn/application-dev/reference/apis/js-apis-system-device.md @mupceet @zengyawen @handyohos @nan-xiansen
zh-cn/application-dev/reference/apis/js-apis-system-fetch.md @zhang-hai-feng @zengyawen @jyh926 @gaoxi785
zh-cn/application-dev/reference/apis/js-apis-system-file.md @panqinxu @zengyawen @bubble_mao @jinhaihw
@@ -455,7 +456,7 @@ zh-cn/application-dev/reference/apis/js-apis-system-vibrate.md @hellohyh001 @nin
zh-cn/application-dev/reference/apis/js-apis-telephony-data.md @zhang-hai-feng @zengyawen @jyh926 @gaoxi785
zh-cn/application-dev/reference/apis/js-apis-testRunner.md @inter515 @littlejerry1 @RayShih @inter515 @jiyong
zh-cn/application-dev/reference/apis/js-apis-thermal.md @aqxyjay @zengyawen @aqxyjay @alien0208
-zh-cn/application-dev/reference/apis/js-apis-timer.md @huaweimaxuchu @HelloCrease @niulihua @tomatodevboy
+zh-cn/application-dev/reference/apis/js-apis-timer.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-touchevent.md @mayunteng_1 @ningningW @cococoler @alien0208
zh-cn/application-dev/reference/apis/js-apis-treemap.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
zh-cn/application-dev/reference/apis/js-apis-treeset.md @gongjunsong @ge-yafang @flyingwolf @BlackStone
@@ -520,7 +521,7 @@ zh-cn/application-dev/reference/apis/js-apis-bundleManager.md @shuaytao @RayShih
zh-cn/application-dev/reference/apis/js-apis-bundleMonitor.md @shuaytao @RayShih @wangzhen107 @inter515
zh-cn/application-dev/reference/apis/js-apis-colorSpaceManager.md @zhangqiang183 @ge-yafang @wind_zj @zxg-gitee
zh-cn/application-dev/reference/apis/js-apis-commonEventManager.md @jayleehw @RayShih @li-weifeng2 @currydavids
-zh-cn/application-dev/reference/apis/js-apis-configPolicy.md @Buda-Liu @ningningW @budda-wang @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-configPolicy.md @liuzuming @ningningW @yangqing3
zh-cn/application-dev/reference/apis/js-apis-cooperate.md @yuanxinying @ningningW @cococoler @alien0208
zh-cn/application-dev/reference/apis/js-apis-cryptoFramework.md @gaoyong @zengyawen @niejiteng @jumozhanjiang
zh-cn/application-dev/reference/apis/js-apis-cert.md @gaoyong @zengyawen @niejiteng @jumozhanjiang
@@ -528,8 +529,14 @@ zh-cn/application-dev/reference/apis/js-apis-curve.md @huaweimaxuchu @HelloCreas
zh-cn/application-dev/reference/apis/js-apis-defaultAppManager.md @shuaytao @RayShih @wangzhen107 @inter515
zh-cn/application-dev/reference/apis/js-apis-distributedBundle.md @shuaytao @RayShih @wangzhen107 @inter515
zh-cn/application-dev/reference/apis/js-apis-distributedKVStore.md @feng-aiwen @ge-yafang @gong-a-shi @logic42
-zh-cn/application-dev/reference/apis/js-apis-enterprise-adminManager.md @Buda-Liu @ningningW @budda-wang @yangqing3
-zh-cn/application-dev/reference/apis/js-apis-enterprise-dateTimeManager.md @Buda-Liu @ningningW @budda-wang @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-accountManager.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-adminManager.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-bundleManager.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-dateTimeManager.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-deviceControl.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-deviceInfo.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-networkManager.md @liuzuming @ningningW @yangqing3
+zh-cn/application-dev/reference/apis/js-apis-enterprise-wifiManager.md @liuzuming @ningningW @yangqing3
zh-cn/application-dev/reference/apis/js-apis-fileAccess.md @panqinxu @zengyawen @bubble_mao @jinhaihw
zh-cn/application-dev/reference/apis/js-apis-fileExtensionInfo.md @panqinxu @zengyawen @bubble_mao @jinhaihw
zh-cn/application-dev/reference/apis/js-apis-freeInstall.md @shuaytao @RayShih @wangzhen107 @inter515
diff --git a/README_zh.md b/README_zh.md
index 6e71a0abd9fba5cbe4b4de47c77437d3a851aa45..b69235fdbdc19771dbae119f8873b610a7adf65c 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -18,7 +18,7 @@
- master:最新开发版本。
- - OpenHarmony 3.2 Beta5版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.2-beta5.md)了解版本详情。
+ - OpenHarmony 3.2 Release版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.2-release.md)了解版本详情。
- OpenHarmony 3.1 Release版本:点击[此处](zh-cn/release-notes/OpenHarmony-v3.1-release.md)了解版本详情。
diff --git a/en/OpenHarmony-Overview.md b/en/OpenHarmony-Overview.md
index 594fb355e417b047d93b1de9d413540a2c839190..1aecb0171e73b02bcbcb9347497f594bf96c912e 100644
--- a/en/OpenHarmony-Overview.md
+++ b/en/OpenHarmony-Overview.md
@@ -127,7 +127,7 @@ The following table describes the subsystems of OpenHarmony. For details about t
| Build | Provides a compilation and building framework based on Generate Ninja (GN) and Ninja. | All systems |
| Test | The test-driven development mode is used during the development process. You can develop new cases or modify existing cases to test new or enhanced system features. The test helps you develop high-quality code in the development phase.| All systems |
| Data Management | Provides local data management and distributed data management:
- Local application data management for lightweight preference databases and relational databases
- Distributed data service to provide applications with the capability to store data in the databases of different devices| Standard system |
-| Programming Language Runtime| Provides the compilation and execution environment for programs developed with JavaScript or C/C++, basic libraries that support the runtime, and the runtime-associated APIs, compilers, and auxiliary tools.| All systems |
+| Compiler and Runtime | Provides the compilation and execution environment for programs developed with JavaScript or C/C++, basic libraries that support the runtime, and the runtime-associated APIs, compilers, and auxiliary tools.| All systems |
| Distributed Scheduler| Starts, registers, queries, and manages system services. | All systems |
| JS UI Framework | OpenHarmony JS UI framework supports web-development-like paradigm. | All systems |
| Multimedia | Provides easy-to-use APIs for developing multimedia components such as audio, video, and camera, and enables applications to use multimedia resources of the system.| All systems |
@@ -189,7 +189,7 @@ For details about how to obtain the source code of OpenHarmony, see [Source Code
## How to Participate
-For details about how to join in the OpenHarmony community, see [OpenHarmony Community](https://gitee.com/openharmony/community/blob/master/README-EN.md)
+For details about how to join in the OpenHarmony community, see [OpenHarmony Community](https://gitee.com/openharmony/community/blob/master/README_EN.md)
For details about how to contribute, see [How to contribute](contribute/how-to-contribute.md).
diff --git a/en/application-dev/IDL/idl-guidelines.md b/en/application-dev/IDL/idl-guidelines.md
index f165215bad4d663b794c249f8029d33aeeda5863..a7ce0ec46adeeca0dd697cd8dabde834b7cc14fc 100644
--- a/en/application-dev/IDL/idl-guidelines.md
+++ b/en/application-dev/IDL/idl-guidelines.md
@@ -3,7 +3,7 @@
## IDL Overview
To ensure successful communications between the client and server, interfaces recognized by both parties must be defined. The OpenHarmony Interface Definition Language (IDL) is a tool for defining such interfaces. OpenHarmony IDL decomposes objects to be transferred into primitives that can be understood by the operating system and encapsulates cross-boundary objects based on developers' requirements.
- **Figure 1** IDL interface description
+**Figure 1** IDL interface description

@@ -156,11 +156,13 @@ On DevEco Studio, choose **Tools > SDK Manager** to view the local installation
Go to the local installation path, choose **toolchains > 3.x.x.x** (the folder named after the version number), and check whether the executable file of IDL exists.
-> **NOTE**: Use the SDK of the latest version. The use of an earlier version may cause errors in some statements.
+> **NOTE**
+>
+> Use the SDK of the latest version. The use of an earlier version may cause errors in some statements.
-If the executable file does not exist, download the SDK package from the mirror as instructed in the [Release Notes](../../release-notes). The following uses the [3.2 Beta3](../../release-notes/OpenHarmony-v3.2-beta3.md#acquiring-source-code-from-mirrors) as an example.
+If the executable file does not exist, download the SDK package from the mirror as instructed in the [Release Notes](../../release-notes). The following uses [3.2 Beta3](../../release-notes/OpenHarmony-v3.2-beta3.md) as an example.
-For details about how to replace the SDK package, see [Guide to Switching to Full SDK](../quick-start/full-sdk-switch-guide.md).
+For details about how to replace the SDK package, see [Full SDK Compilation Guide](../quick-start/full-sdk-compile-guide.md).
After obtaining the executable file, perform subsequent development steps based on your scenario.
@@ -176,6 +178,8 @@ You can use TS to create IDL files.
interface OHOS.IIdlTestService {
int TestIntTransaction([in] int data);
void TestStringTransaction([in] String data);
+ void TestMapTransaction([in] Map data);
+ int TestArrayTransaction([in] String[] data);
}
```
@@ -183,7 +187,9 @@ Run the **idl -gen-ts -d *dir* -c dir/IIdlTestService.idl** command in the folde
-*dir* next to **d** is the target output folder. For example, if the target output folder is **IIdlTestServiceTs**, run the **idl -gen-ts -d IIdlTestServiceTs -c IIdlTestServiceTs/IIdlTestService.idl** command in the folder where the executable file is located. The interface file, stub file, and proxy file are generated in the *dir* directory (**IIdlTestServiceTs** directory in this example) in the execution environment.
-> **NOTE**: The generated interface class file name must be the same as that of the .idl file. Otherwise, an error occurs during code generation.
+> **NOTE**
+>
+> The generated interface class file name must be the same as that of the .idl file. Otherwise, an error occurs during code generation.
For example, for an .idl file named **IIdlTestService.idl** and target output directory named **IIdlTestServiceTs**, the directory structure is similar to the following:
@@ -203,6 +209,8 @@ The stub class generated by IDL is an abstract implementation of the interface c
```ts
import {testIntTransactionCallback} from "./i_idl_test_service";
import {testStringTransactionCallback} from "./i_idl_test_service";
+import {testMapTransactionCallback} from "./i_idl_test_service";
+import {testArrayTransactionCallback} from "./i_idl_test_service";
import IIdlTestService from "./i_idl_test_service";
import rpc from "@ohos.rpc";
@@ -211,8 +219,8 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl
super(des);
}
- async onRemoteRequestEx(code: number, data, reply, option): Promise {
- console.log("onRemoteRequestEx called, code = " + code);
+ async onRemoteMessageRequest(code: number, data, reply, option): Promise {
+ console.log("onRemoteMessageRequest called, code = " + code);
switch(code) {
case IdlTestServiceStub.COMMAND_TEST_INT_TRANSACTION: {
let _data = data.readInt();
@@ -231,6 +239,29 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl
});
return true;
}
+ case IdlTestServiceStub.COMMAND_TEST_MAP_TRANSACTION: {
+ let _data = new Map();
+ let _dataSize = data.readInt();
+ for (let i = 0; i < _dataSize; ++i) {
+ let key = data.readInt();
+ let value = data.readInt();
+ _data.set(key, value);
+ }
+ this.testMapTransaction(_data, (errCode) => {
+ reply.writeInt(errCode);
+ });
+ return true;
+ }
+ case IdlTestServiceStub.COMMAND_TEST_ARRAY_TRANSACTION: {
+ let _data = data.readStringArray();
+ this.testArrayTransaction(_data, (errCode, returnValue) => {
+ reply.writeInt(errCode);
+ if (errCode == 0) {
+ reply.writeInt(returnValue);
+ }
+ });
+ return true;
+ }
default: {
console.log("invalid request code" + code);
break;
@@ -241,17 +272,23 @@ export default class IdlTestServiceStub extends rpc.RemoteObject implements IIdl
testIntTransaction(data: number, callback: testIntTransactionCallback): void{}
testStringTransaction(data: string, callback: testStringTransactionCallback): void{}
+ testMapTransaction(data: Map, callback: testMapTransactionCallback): void{}
+ testArrayTransaction(data: string[], callback: testArrayTransactionCallback): void{}
static readonly COMMAND_TEST_INT_TRANSACTION = 1;
static readonly COMMAND_TEST_STRING_TRANSACTION = 2;
+ static readonly COMMAND_TEST_MAP_TRANSACTION = 3;
+ static readonly COMMAND_TEST_ARRAY_TRANSACTION = 4;
}
```
-You need to inherit the interface class defined in the IDL file and implement the methods in the class. The following code snippet shows how to inherit the **IdlTestServiceStub** interface class and implement the **testIntTransaction** and **testStringTransaction** methods.
+You need to inherit the interface class defined in the IDL file and implement the methods in the class. The following code snippet shows how to inherit the **IdlTestServiceStub** interface class and implement the **testIntTransaction**, **testStringTransaction**, **testMapTransaction**, and **testArrayTransaction** methods.
```ts
import {testIntTransactionCallback} from "./i_idl_test_service"
import {testStringTransactionCallback} from "./i_idl_test_service"
+import {testMapTransactionCallback} from "./i_idl_test_service";
+import {testArrayTransactionCallback} from "./i_idl_test_service";
import IdlTestServiceStub from "./idl_test_service_stub"
@@ -265,6 +302,14 @@ class IdlTestImp extends IdlTestServiceStub {
{
callback(0);
}
+ testMapTransaction(data: Map, callback: testMapTransactionCallback): void
+ {
+ callback(0);
+ }
+ testArrayTransaction(data: string[], callback: testArrayTransactionCallback): void
+ {
+ callback(0, 1);
+ }
}
```
@@ -320,11 +365,28 @@ function callbackTestStringTransaction(result: number): void {
}
}
+function callbackTestMapTransaction(result: number): void {
+ if (result == 0) {
+ console.log('case 3 success');
+ }
+}
+
+function callbackTestArrayTransaction(result: number, ret: number): void {
+ if (result == 0 && ret == 124) {
+ console.log('case 4 success');
+ }
+}
+
var onAbilityConnectDone = {
onConnect:function (elementName, proxy) {
let testProxy = new IdlTestServiceProxy(proxy);
+ let testMap = new Map();
+ testMap.set(1, 1);
+ testMap.set(1, 2);
testProxy.testIntTransaction(123, callbackTestIntTransaction);
testProxy.testStringTransaction('hello', callbackTestStringTransaction);
+ testProxy.testMapTransaction(testMap, callbackTestMapTransaction);
+ testProxy.testArrayTransaction(['1','2'], callbackTestMapTransaction);
},
onDisconnect:function (elementName) {
console.log('onDisconnectService onDisconnect');
diff --git a/en/application-dev/Readme-EN.md b/en/application-dev/Readme-EN.md
index a627e1116a792c5d4fc885ae01aa6ccb172b7b1d..73bbd2d608562535e3272c1a659bcebbd39b125a 100644
--- a/en/application-dev/Readme-EN.md
+++ b/en/application-dev/Readme-EN.md
@@ -17,7 +17,6 @@
- Application Package Structure
- [Application Package Structure in Stage Model)](quick-start/application-package-structure-stage.md)
- [Application Package Structure in FA Model](quick-start/application-package-structure-fa.md)
- - [HAR File Structure](quick-start/har-structure.md)
- Multi-HAP Mechanism
- [Multi-HAP Design Objectives](quick-start/multi-hap-objective.md)
- [Multi-HAP Build View](quick-start/multi-hap-build-view.md)
@@ -49,7 +48,7 @@
- Development
- [Application Models](application-models/Readme-EN.md)
- [UI Development](ui/Readme-EN.md)
- - [Common Event and Notification](notification/Readme-EN.md)
+ - [Notification](notification/Readme-EN.md)
- [Window Manager](windowmanager/Readme-EN.md)
- [WebGL](webgl/Readme-EN.md)
- [Media](media/Readme-EN.md)
@@ -57,7 +56,7 @@
- [Connectivity](connectivity/Readme-EN.md)
- [Data Management](database/Readme-EN.md)
- [File Management](file-management/Readme-EN.md)
- - [Telephony](telephony/Readme-EN.md)
+ - [Telephony Service](telephony/Readme-EN.md)
- [Task Management](task-management/Readme-EN.md)
- [Device Management](device/Readme-EN.md)
- [Device Usage Statistics](device-usage-statistics/Readme-EN.md)
diff --git a/en/application-dev/ability-deprecated/ability-delegator.md b/en/application-dev/ability-deprecated/ability-delegator.md
index f72a192dc510c28104511fb1530a915c9f9827cc..b32d472176a5b6270fece94ae4bd8ae9a7bd73fa 100644
--- a/en/application-dev/ability-deprecated/ability-delegator.md
+++ b/en/application-dev/ability-deprecated/ability-delegator.md
@@ -63,7 +63,7 @@ For details about how to use DevEco Studio to start the test framework, see [Ope
**Example**
```javascript
-import AbilityDelegatorRegistry from '@ohos.app.ability.abilityDelegatorRegistry';
+import AbilityDelegatorRegistry from '@ohos.application.abilityDelegatorRegistry'
function onAbilityCreateCallback(data) {
console.info("onAbilityCreateCallback");
@@ -87,11 +87,11 @@ abilityDelegator.addAbilityMonitor(monitor).then(() => {
**Modules to Import**
```javascript
-import AbilityDelegatorRegistry from '@ohos.app.ability.abilityDelegatorRegistry';
+import AbilityDelegatorRegistry from '@ohos.application.abilityDelegatorRegistry'
```
```javascript
-var abilityDelegator = AbilityDelegatorRegistry.getAbilityDelegator();
+var abilityDelegator = AbilityDelegatorRegistry.getAbilityDelegator()
```
### Starting an Ability and Listening for the Ability State
diff --git a/en/application-dev/ability-deprecated/context-userguide.md b/en/application-dev/ability-deprecated/context-userguide.md
index 1340e72918e141dd3b95b5ddc8dbc11258f83493..79cae1da5611b0736f7d11a5bb0cfb9b48df3f0a 100644
--- a/en/application-dev/ability-deprecated/context-userguide.md
+++ b/en/application-dev/ability-deprecated/context-userguide.md
@@ -250,9 +250,9 @@ In the stage model, in the onWindowStageCreate lifecycle of an ability, you can
Use the API described in the table below to obtain the context associated with an ArkTS page.
-| API | Description |
-| :------------------------------------ | :--------------------------- |
-| getContext(component: Object): Object | Obtains the **Context** object associated with a component on the page.|
+| API | Description |
+| :------------------------------------ | :----------------------------------------------------------- |
+| getContext(component: Object): Object | Obtains the **Context** object associated with a component on the page.
Since API version 9, this API is supported in ArkTS widgets.|
**Example**
diff --git a/en/application-dev/ability-deprecated/fa-dataability.md b/en/application-dev/ability-deprecated/fa-dataability.md
index 8d94e8f225a3966d676e6c7631968c25f5634531..217f617db77ff329eb1d0fa0eef7dcb6172cf45a 100644
--- a/en/application-dev/ability-deprecated/fa-dataability.md
+++ b/en/application-dev/ability-deprecated/fa-dataability.md
@@ -154,7 +154,7 @@ The basic dependency packages include:
import featureAbility from '@ohos.ability.featureAbility'
import ohos_data_ability from '@ohos.data.dataAbility'
import ohos_data_rdb from '@ohos.data.rdb'
-
+
var urivar = "dataability:///com.ix.DataAbility"
var DAHelper = featureAbility.acquireDataAbilityHelper(
urivar
diff --git a/en/application-dev/ability-deprecated/fa-formability.md b/en/application-dev/ability-deprecated/fa-formability.md
index 5c08a1b0b3955472d6f3b16cf7a343a083a0116a..96ed58d8ef2206d6c66e413d0a6fc34423651974 100644
--- a/en/application-dev/ability-deprecated/fa-formability.md
+++ b/en/application-dev/ability-deprecated/fa-formability.md
@@ -25,7 +25,7 @@ Carry out the following operations to develop the widget provider based on the [
1. Implement lifecycle callbacks by using the **LifecycleForm** APIs.
2. Create a **FormBindingData** instance.
3. Update a widget by using the **FormProvider** APIs.
-4. Develop the widget UI pages.
+4. Develop the widget UI page.
## Available APIs
@@ -231,7 +231,7 @@ You should override **onDestroy** to implement widget data deletion.
}
```
-For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md).
+For details about how to implement persistent data storage, see [Data Persistence by User Preferences](../database/data-persistence-by-preferences.md).
The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary.
@@ -402,3 +402,5 @@ The code snippet is as follows:
}
}
```
+
+
\ No newline at end of file
diff --git a/en/application-dev/ability-deprecated/fa-pageability.md b/en/application-dev/ability-deprecated/fa-pageability.md
index 28b5ce36e292acc9e350f8ae96cb7bcf17f8c8c3..e28c0f2823ff61f6c60f469eaaf9d197184e8f50 100644
--- a/en/application-dev/ability-deprecated/fa-pageability.md
+++ b/en/application-dev/ability-deprecated/fa-pageability.md
@@ -47,7 +47,7 @@ You can specify the launch type by setting **launchType** in the **config.json**
| Launch Type | Description |Description |
| ----------- | ------- |---------------- |
-| standard | Multi-instance | A new instance is started each time an ability starts.|
+| standard | Multi-instance | A new instance is started each time an ability starts.|
| singleton | Singleton | The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.|
By default, **singleton** is used.
diff --git a/en/application-dev/ability-deprecated/stage-ability-continuation.md b/en/application-dev/ability-deprecated/stage-ability-continuation.md
index b53d57d849c8c771b92d4e86a2095163aab0a395..f99966aff24d9b465627ba475cda018671820809 100644
--- a/en/application-dev/ability-deprecated/stage-ability-continuation.md
+++ b/en/application-dev/ability-deprecated/stage-ability-continuation.md
@@ -6,7 +6,7 @@ Ability continuation is to continue the current mission of an application, inclu
## Available APIs
-The following table lists the APIs used for ability continuation. For details about the APIs, see [Ability](../reference/apis/js-apis-application-ability.md).
+The following table lists the APIs used for ability continuation. For details about the APIs, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md).
**Table 1** Ability continuation APIs
@@ -48,96 +48,88 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar
}
```
-
-
-
- Configure the application startup type.
-
- If **launchType** is set to **standard** in the **module.json5** file, the application is of the multi-instance launch type. During ability continuation, regardless of whether the application is already open, the target starts the application and restores the UI page. If **launchType** is set to **singleton**, the application is of the singleton launch type. If the application is already open, the target clears the existing page stack and restores the UI page. For more information, see "Launch Type" in [Ability Development](./stage-ability.md).
+
+ If **launchType** is set to **multiton** in the **module.json5** file, the application is of the multi-instance launch type. During ability continuation, regardless of whether the application is already open, the target starts the application and restores the UI page. If **launchType** is set to **singleton**, the application is of the singleton launch type. If the application is already open, the target clears the existing page stack and restores the UI page. For more information, see "Launch Type" in [Ability Development](./stage-ability.md).
+
+ Configure a multi-instance application as follows:
+
+ ```javascript
+ {
+ "module": {
+ "abilities": [
+ {
+ "launchType": "multiton"
+ }
+ ]
+ }
+ }
+ ```
+
+ Configure a singleton application as follows or retain the default settings of **launchType**:
+
+ ```javascript
+ {
+ "module": {
+ "abilities": [
+ {
+ "launchType": "singleton"
+ }
+ ]
+ }
+ }
+ ```
+
+ - Apply for the distributed permissions.
- Configure a multi-instance application as follows:
+ Declare the **DISTRIBUTED_DATASYNC** permission in the **module.json5** file for the application.
- ```javascript
- {
- "module": {
- "abilities": [
- {
- "launchType": "standard"
- }
- ]
- }
- }
- ```
+ ```javascript
+ "requestPermissions": [
+ {
+ "name": "ohos.permission.DISTRIBUTED_DATASYNC"
+ },
+ ```
- Configure a singleton application as follows or retain the default settings of **launchType**:
+ This permission must be granted by the user in a dialog box when the application is started for the first time. To enable the application to display a dialog box to ask for the permission, add the following code to **onWindowStageCreate** of the **Ability** class:
- ```javascript
- {
- "module": {
- "abilities": [
- {
- "launchType": "singleton"
+ ```javascript
+ requestPermissions = async () => {
+ let permissions: Array = [
+ "ohos.permission.DISTRIBUTED_DATASYNC"
+ ];
+ let needGrantPermission = false
+ let accessManger = accessControl.createAtManager()
+ Logger.info("app permission get bundle info")
+ let bundleInfo = await bundle.getApplicationInfo(BUNDLE_NAME, 0, 100)
+ Logger.info(`app permission query permission ${bundleInfo.accessTokenId.toString()}`)
+ for (const permission of permissions) {
+ Logger.info(`app permission query grant status ${permission}`)
+ try {
+ let grantStatus = await accessManger.verifyAccessToken(bundleInfo.accessTokenId, permission)
+ if (grantStatus === PERMISSION_REJECT) {
+ needGrantPermission = true
+ break;
+ }
+ } catch (err) {
+ Logger.error(`app permission query grant status error ${permission} ${JSON.stringify(err)}`)
+ needGrantPermission = true
+ break;
+ }
+ }
+ if (needGrantPermission) {
+ Logger.info("app permission needGrantPermission")
+ try {
+ await accessManger.requestPermissionsFromUser(this.context, permissions)
+ } catch (err) {
+ Logger.error(`app permission ${JSON.stringify(err)}`)
+ }
+ } else {
+ Logger.info("app permission already granted")
+ }
}
- ]
- }
- }
- ```
-
-
-
- - Apply for the distributed permissions.
-
- Declare the **DISTRIBUTED_DATASYNC** permission in the **module.json5** file for the application.
-
- ```javascript
- "requestPermissions": [
- {
- "name": "ohos.permission.DISTRIBUTED_DATASYNC"
- },
- ```
-
-
-
- This permission must be granted by the user in a dialog box when the application is started for the first time. To enable the application to display a dialog box to ask for the permission, add the following code to **onWindowStageCreate** of the **Ability** class:
-
- ```javascript
- requestPermissions = async () => {
- let permissions: Array = [
- "ohos.permission.DISTRIBUTED_DATASYNC"
- ];
- let needGrantPermission = false
- let accessManger = accessControl.createAtManager()
- Logger.info("app permission get bundle info")
- let bundleInfo = await bundle.getApplicationInfo(BUNDLE_NAME, 0, 100)
- Logger.info(`app permission query permission ${bundleInfo.accessTokenId.toString()}`)
- for (const permission of permissions) {
- Logger.info(`app permission query grant status ${permission}`)
- try {
- let grantStatus = await accessManger.verifyAccessToken(bundleInfo.accessTokenId, permission)
- if (grantStatus === PERMISSION_REJECT) {
- needGrantPermission = true
- break;
- }
- } catch (err) {
- Logger.error(`app permission query grant status error ${permission} ${JSON.stringify(err)}`)
- needGrantPermission = true
- break;
- }
- }
- if (needGrantPermission) {
- Logger.info("app permission needGrantPermission")
- try {
- await accessManger.requestPermissionsFromUser(this.context, permissions)
- } catch (err) {
- Logger.error(`app permission ${JSON.stringify(err)}`)
- }
- } else {
- Logger.info("app permission already granted")
- }
- }
- ```
-
-
+ ```
+
2. Implement the **onContinue()** API.
@@ -155,7 +147,7 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar
You can obtain the target device ID (identified by the key **targetDevice**) and the version number (identified by the key **version**) of the application installed on the target device from the **wantParam** parameter of this API. The version number can be used for compatibility check. If the current application version is incompatible with that on the target device, **OnContinueResult.MISMATCH** can be returned to reject the continuation request.
- Example
+ Example:
```javascript
onContinue(wantParam : {[key: string]: any}) {
@@ -168,8 +160,6 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar
}
```
-
-
3. Implement the continuation logic in the **onCreate()** or **onNewWant()** API.
The **onCreate()** API is called by the target. When the ability is started on the target device, this API is called to instruct the application to synchronize the memory data and UI component state, and triggers page restoration after the synchronization is complete. If the continuation logic is not implemented, the ability will be started in common startup mode and the page cannot be restored.
@@ -178,11 +168,9 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar
After data restore is complete, call **restoreWindowStage** to trigger page restoration.
-
-
You can also use **want.parameters.version** in the **want** parameter to obtain the application version number of the initiator.
-
- Example
+
+ Example:
```javascript
import UIAbility from '@ohos.app.ability.UIAbility';
@@ -190,7 +178,7 @@ The code snippets provided below are all from [Sample](https://gitee.com/openhar
export default class EntryAbility extends UIAbility {
storage : LocalStorag;
-
+
onCreate(want, launchParam) {
Logger.info(`EntryAbility onCreate ${AbilityConstant.LaunchReason.CONTINUATION}`)
if (launchParam.launchReason == AbilityConstant.LaunchReason.CONTINUATION) {
@@ -211,7 +199,7 @@ For a singleton ability, use **onNewWant()** to achieve the same implementation.
Use distributed objects.
-Distributed objects allow cross-device data synchronization like local variables. For two devices that form a Super Device, when data in the distributed data object of an application is added, deleted, or modified on a device, the data for the same application is also updated on the other device. Both devices can listen for the data changes and online and offline states of the other. For details, see [Distributed Data Object Development](../database/database-distributedobject-guidelines.md).
+Distributed objects allow cross-device data synchronization like local variables. For two devices that form a Super Device, when data in the distributed data object of an application is added, deleted, or modified on a device, the data for the same application is also updated on the other device. Both devices can listen for the data changes and online and offline states of the other. For details, see [Sharing Distributed Data Objects](../database/data-sync-of-distributed-data-object.md).
In the ability continuation scenario, the distributed data object is used to synchronize the memory data from the local device to the target device.
@@ -249,8 +237,6 @@ In the ability continuation scenario, the distributed data object is used to syn
});
```
-
-
- The target device obtains the session ID from **onCreate()**, creates a distributed object, and associates the distributed object with the session ID. In this way, the distributed object can be synchronized. Before calling **restoreWindowStage**, ensure that all distributed objects required for continuation have been associated.
```javascript
@@ -283,8 +269,6 @@ In the ability continuation scenario, the distributed data object is used to syn
}
```
-
-
### More Information
1. Timeout
@@ -294,15 +278,13 @@ In the ability continuation scenario, the distributed data object is used to syn
2. By default, the system supports page stack information migration, which means that the page stack of the initiator will be automatically migrated to the target device. No adaptation is required.
-
-
### Restrictions
1. The continuation must be performed between the same ability, which means the same bundle name, module name, and ability name. For details, see [Application Package Structure Configuration File](../quick-start/module-configuration-file.md).
2. Currently, the application can only implement the continuation capability. The continuation action must be initiated by the system.
-
-
### Best Practice
For better user experience, you are advised to use the **wantParam** parameter to transmit data smaller than 100 KB and use distributed objects to transmit data larger than 100 KB.
+
+
\ No newline at end of file
diff --git a/en/application-dev/ability-deprecated/stage-ability.md b/en/application-dev/ability-deprecated/stage-ability.md
index 60f954c78f306193e7bfefe1e6ceee2babf86da4..2cd18f7aa3052cee86785d55bc81d68cfdece802 100644
--- a/en/application-dev/ability-deprecated/stage-ability.md
+++ b/en/application-dev/ability-deprecated/stage-ability.md
@@ -12,8 +12,8 @@ An ability can be launched in the **standard**, **singleton**, or **specified**
| Launch Type | Description |Action |
| ----------- | ------- |---------------- |
-| standard | Standard mode | A new instance is started each time an ability starts.|
-| singleton | Singleton mode | The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.|
+| multiton | Multi-instance mode| A new instance is started each time an ability starts.|
+| singleton | Singleton mode | Default type. The ability has only one instance in the system. If an instance already exists when an ability is started, that instance is reused.|
| specified | Instance-specific| The internal service of an ability determines whether to create multiple instances during running.|
By default, the singleton mode is used. The following is an example of the **module.json5** file:
@@ -39,7 +39,7 @@ The table below describes the APIs provided by the **AbilityStage** class, which
|onAcceptWant(want: Want): string|Called when a specified ability is started.|
|onConfigurationUpdated(config: Configuration): void|Called when the global configuration is updated.|
-The table below describes the APIs provided by the **Ability** class. For details about the APIs, see [Ability](../reference/apis/js-apis-application-ability.md).
+The table below describes the APIs provided by the **Ability** class. For details about the APIs, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md).
**Table 2** Ability APIs
@@ -190,7 +190,7 @@ export default class EntryAbility extends UIAbility {
```
## Starting an Ability
### Available APIs
-The **Ability** class has the **context** attribute, which belongs to the **AbilityContext** class. The **AbilityContext** class has the **abilityInfo**, **currentHapModuleInfo**, and other attributes as well as the APIs used for starting abilities. For details, see [AbilityContext](../reference/apis/js-apis-ability-context.md).
+The **Ability** class has the **context** attribute, which belongs to the **AbilityContext** class. The **AbilityContext** class has the **abilityInfo**, **currentHapModuleInfo**, and other attributes as well as the APIs used for starting abilities. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md).
**Table 3** AbilityContext APIs
|API|Description|
@@ -207,7 +207,7 @@ The **Ability** class has the **context** attribute, which belongs to the **Abil
An application can obtain the context of an **Ability** instance through **this.context** and then use the **startAbility** API in the **AbilityContext** class to start the ability. The ability can be started by specifying **Want**, **StartOptions**, and **accountId**, and the operation result can be returned using a callback or **Promise** instance. The sample code is as follows:
```ts
let context = this.context
-var want = {
+let want = {
"deviceId": "",
"bundleName": "com.example.MyApplication",
"abilityName": "EntryAbility"
@@ -224,7 +224,7 @@ context.startAbility(want).then(() => {
In the cross-device scenario, you must specify the ID of the remote device. The sample code is as follows:
```ts
let context = this.context
-var want = {
+let want = {
"deviceId": getRemoteDeviceId(),
"bundleName": "com.example.MyApplication",
"abilityName": "EntryAbility"
@@ -239,9 +239,9 @@ Obtain the ID of a specified device from **DeviceManager**. The sample code is a
```ts
import deviceManager from '@ohos.distributedHardware.deviceManager';
function getRemoteDeviceId() {
- if (typeof dmClass === 'object' && dmClass != null) {
- var list = dmClass.getTrustedDeviceListSync();
- if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') {
+ if (typeof dmClass === 'object' && dmClass !== null) {
+ let list = dmClass.getTrustedDeviceListSync();
+ if (typeof (list) === 'undefined' || typeof (list.length) === 'undefined') {
console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null");
return;
}
diff --git a/en/application-dev/ability-deprecated/stage-call.md b/en/application-dev/ability-deprecated/stage-call.md
index 71f5f6934dda385161f4adcb95837924c691c278..d9269295e06633fa0f55bdebad51eb1c354f2934 100644
--- a/en/application-dev/ability-deprecated/stage-call.md
+++ b/en/application-dev/ability-deprecated/stage-call.md
@@ -31,12 +31,12 @@ The ability call process is as follows:
> Currently, only system applications can use the ability call.
## Available APIs
-The table below describes the ability call APIs. For details, see [Ability](../reference/apis/js-apis-application-ability.md#caller).
+The table below describes the ability call APIs. For details, see [UIAbility](../reference/apis/js-apis-app-ability-uiAbility.md#caller).
**Table 2** Ability call APIs
|API|Description|
|:------|:------|
-|startAbilityByCall(want: Want): Promise\|Starts an ability in the foreground (through the **want** configuration) or background (default) and obtains the **Caller** object for communication with the ability. For details, see [AbilityContext](../reference/apis/js-apis-ability-context.md#abilitycontextstartabilitybycall) or **ServiceExtensionContext**.|
+|startAbilityByCall(want: Want): Promise\|Starts an ability in the foreground (through the **want** configuration) or background (default) and obtains the **Caller** object for communication with the ability. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartabilitybycall) or **ServiceExtensionContext**.|
|on(method: string, callback: CalleeCallBack): void|Callback invoked when the callee ability registers a method.|
|off(method: string): void|Callback invoked when the callee ability deregisters a method.|
|call(method: string, data: rpc.Sequenceable): Promise\|Sends agreed sequenceable data to the callee ability.|
@@ -47,242 +47,263 @@ The table below describes the ability call APIs. For details, see [Ability](../r
## How to Develop
The procedure for developing the ability call is as follows:
1. Create a callee ability.
-
2. Access the callee ability.
### Creating a Callee Ability
For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener.
-**1. Configure the ability launch type.**
-
- Set **launchType** of the callee ability to **singleton** in the **module.json5** file.
-|JSON Field|Description|
-|:------|:------|
-|"launchType"|Ability launch type. Set this parameter to **singleton**.|
-
-An example of the ability configuration is as follows:
-```json
-"abilities":[{
- "name": ".CalleeAbility",
- "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts",
- "launchType": "singleton",
- "description": "$string:CalleeAbility_desc",
- "icon": "$media:icon",
- "label": "$string:CalleeAbility_label",
- "visible": true
-}]
-```
-**2. Import the Ability module.**
-```ts
-import Ability from '@ohos.app.ability.UIAbility'
-```
-**3. Define the agreed sequenceable data.**
-
- The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. The code snippet is as follows:
-```ts
-export default class MySequenceable {
- num: number = 0
- str: string = ""
-
- constructor(num, string) {
- this.num = num
- this.str = string
- }
-
- marshalling(messageParcel) {
- messageParcel.writeInt(this.num)
- messageParcel.writeString(this.str)
- return true
- }
-
- unmarshalling(messageParcel) {
- this.num = messageParcel.readInt()
- this.str = messageParcel.readString()
- return true
- }
-}
-```
-**4. Implement Callee.on and Callee.off.**
-
- The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The code snippet is as follows:
-```ts
-const TAG: string = '[CalleeAbility]'
-const MSG_SEND_METHOD: string = 'CallSendMsg'
-
-function sendMsgCallback(data) {
- console.log('CalleeSortFunc called')
-
- // Obtain the sequenceable data sent by the caller ability.
- let receivedData = new MySequenceable(0, '')
- data.readSequenceable(receivedData)
- console.log(`receiveData[${receivedData.num}, ${receivedData.str}]`)
-
- // Process the data.
- // Return the sequenceable data result to the caller ability.
- return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`)
-}
-
-export default class CalleeAbility extends Ability {
- onCreate(want, launchParam) {
- try {
- this.callee.on(MSG_SEND_METHOD, sendMsgCallback)
- } catch (error) {
- console.log(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`)
- }
- }
-
- onDestroy() {
- try {
- this.callee.off(MSG_SEND_METHOD)
- } catch (error) {
- console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`)
- }
- }
-}
-```
+1. **Configure the ability launch type.**
+
+ Set **launchType** of the callee ability to **singleton** in the **module.json5** file.
+
+ |JSON Field|Description|
+ |:------|:------|
+ |"launchType"|Ability launch type. Set this parameter to **singleton**.|
+
+ An example of the ability configuration is as follows:
+
+ ```json
+ "abilities":[{
+ "name": ".CalleeAbility",
+ "srcEntry": "./ets/CalleeAbility/CalleeAbility.ts",
+ "launchType": "singleton",
+ "description": "$string:CalleeAbility_desc",
+ "icon": "$media:icon",
+ "label": "$string:CalleeAbility_label",
+ "exported": true
+ }]
+ ```
+
+2. **Import the UIAbility module.**
+
+ ```ts
+ import UIAbility from '@ohos.app.ability.UIAbility';
+ ```
+
+3. **Define the agreed sequenceable data.**
+
+ The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string. The code snippet is as follows:
+
+ ```ts
+ export default class MySequenceable {
+ num: number = 0
+ str: string = ""
+
+ constructor(num, string) {
+ this.num = num
+ this.str = string
+ }
+
+ marshalling(messageParcel) {
+ messageParcel.writeInt(this.num)
+ messageParcel.writeString(this.str)
+ return true
+ }
+
+ unmarshalling(messageParcel) {
+ this.num = messageParcel.readInt()
+ this.str = messageParcel.readString()
+ return true
+ }
+ }
+ ```
+
+4. **Implement Callee.on and Callee.off.**
+
+ The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The code snippet is as follows:
+
+ ```ts
+ const TAG: string = '[CalleeAbility]'
+ const MSG_SEND_METHOD: string = 'CallSendMsg'
+
+ function sendMsgCallback(data) {
+ console.log('CalleeSortFunc called')
+
+ // Obtain the sequenceable data sent by the caller ability.
+ let receivedData = new MySequenceable(0, '')
+ data.readSequenceable(receivedData)
+ console.log(`receiveData[${receivedData.num}, ${receivedData.str}]`)
+
+ // Process the data.
+ // Return the sequenceable data result to the caller ability.
+ return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`)
+ }
+
+ export default class CalleeAbility extends Ability {
+ onCreate(want, launchParam) {
+ try {
+ this.callee.on(MSG_SEND_METHOD, sendMsgCallback)
+ } catch (error) {
+ console.log(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`)
+ }
+ }
+
+ onDestroy() {
+ try {
+ this.callee.off(MSG_SEND_METHOD)
+ } catch (error) {
+ console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`)
+ }
+ }
+ }
+ ```
### Accessing the Callee Ability
-**1. Import the Ability module.**
-```ts
-import Ability from '@ohos.app.ability.UIAbility'
-```
-**2. Obtain the Caller object.**
-
- The **context** attribute of the ability implements **startAbilityByCall** to obtain the **Caller** object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the **Caller** object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. The code snippet is as follows:
-```ts
-// Register the onRelease listener of the caller ability.
-private regOnRelease(caller) {
- try {
- caller.on("release", (msg) => {
- console.log(`caller onRelease is called ${msg}`)
- })
- console.log('caller register OnRelease succeed')
- } catch (error) {
- console.log(`caller register OnRelease failed with ${error}`)
- }
-}
-
-async onButtonGetCaller() {
- try {
- this.caller = await context.startAbilityByCall({
- bundleName: 'com.samples.CallApplication',
- abilityName: 'CalleeAbility'
- })
- if (this.caller === undefined) {
- console.log('get caller failed')
- return
- }
- console.log('get caller success')
- this.regOnRelease(this.caller)
- } catch (error) {
- console.log(`get caller failed with ${error}`)
- }
-}
-```
- In the cross-device scenario, you need to specify the ID of the peer device. The code snippet is as follows:
-```ts
-async onButtonGetRemoteCaller() {
- var caller = undefined
- var context = this.context
-
- context.startAbilityByCall({
- deviceId: getRemoteDeviceId(),
- bundleName: 'com.samples.CallApplication',
- abilityName: 'CalleeAbility'
- }).then((data) => {
- if (data != null) {
- caller = data
- console.log('get remote caller success')
- // Register the onRelease listener of the caller ability.
- caller.on("release", (msg) => {
- console.log(`remote caller onRelease is called ${msg}`)
- })
- console.log('remote caller register OnRelease succeed')
- }
- }).catch((error) => {
- console.error(`get remote caller failed with ${error}`)
- })
-}
-```
- Obtain the ID of the peer device from **DeviceManager**. Note that the **getTrustedDeviceListSync** API is open only to system applications. The code snippet is as follows:
-```ts
-import deviceManager from '@ohos.distributedHardware.deviceManager';
-var dmClass;
-function getRemoteDeviceId() {
- if (typeof dmClass === 'object' && dmClass != null) {
- var list = dmClass.getTrustedDeviceListSync()
- if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') {
- console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null")
- return
- }
- console.log("EntryAbility onButtonClick getRemoteDeviceId success:" + list[0].deviceId)
- return list[0].deviceId
- } else {
- console.log("EntryAbility onButtonClick getRemoteDeviceId err: dmClass is null")
- }
-}
-```
- In the cross-device scenario, your application must also apply for the data synchronization permission from end users. The code snippet is as follows:
-```ts
-import abilityAccessCtrl from '@ohos.abilityAccessCtrl.d.ts';
-
-requestPermission() {
- let context = this.context
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']
- let atManager = abilityAccessCtrl.createAtManager();
- atManager.requestPermissionsFromUser(context, permissions).then((data) => {
- console.log("Succeed to request permission from user with data: "+ JSON.stringify(data))
- }).catch((error) => {
- console.log("Failed to request permission from user with error: "+ JSON.stringify(error))
- })
-}
-```
-**3. Send agreed sequenceable data.**
-
- The sequenceable data can be sent to the callee ability with or without a return value. The method and sequenceable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability. The code snippet is as follows:
-```ts
-const MSG_SEND_METHOD: string = 'CallSendMsg'
-async onButtonCall() {
- try {
- let msg = new MySequenceable(1, 'origin_Msg')
- await this.caller.call(MSG_SEND_METHOD, msg)
- } catch (error) {
- console.log(`caller call failed with ${error}`)
- }
-}
-```
-
- In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**. The code snippet is as follows:
-```ts
-const MSG_SEND_METHOD: string = 'CallSendMsg'
-originMsg: string = ''
-backMsg: string = ''
-async onButtonCallWithResult(originMsg, backMsg) {
- try {
- let msg = new MySequenceable(1, originMsg)
- const data = await this.caller.callWithResult(MSG_SEND_METHOD, msg)
- console.log('caller callWithResult succeed')
-
- let result = new MySequenceable(0, '')
- data.readSequenceable(result)
- backMsg(result.str)
- console.log(`caller result is [${result.num}, ${result.str}]`)
- } catch (error) {
- console.log(`caller callWithResult failed with ${error}`)
- }
-}
-```
-**4. Release the Caller object.**
-
- When the **Caller** object is no longer required, use **release()** to release it. The code snippet is as follows:
-```ts
-releaseCall() {
- try {
- this.caller.release()
- this.caller = undefined
- console.log('caller release succeed')
- } catch (error) {
- console.log(`caller release failed with ${error}`)
- }
-}
-```
+1. **Import the Ability module.**
+
+ ```ts
+ import UIAbility from '@ohos.app.ability.UIAbility';
+ ```
+
+2. **Obtain the Caller object.**
+
+ The **context** attribute of the ability implements **startAbilityByCall** to obtain the **Caller** object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the **Caller** object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements. The code snippet is as follows:
+
+ ```ts
+ // Register the onRelease listener of the caller ability.
+ private regOnRelease(caller) {
+ try {
+ caller.on("release", (msg) => {
+ console.log(`caller onRelease is called ${msg}`)
+ })
+ console.log('caller register OnRelease succeed')
+ } catch (error) {
+ console.log(`caller register OnRelease failed with ${error}`)
+ }
+ }
+
+ async onButtonGetCaller() {
+ try {
+ this.caller = await context.startAbilityByCall({
+ bundleName: 'com.samples.CallApplication',
+ abilityName: 'CalleeAbility'
+ })
+ if (this.caller === undefined) {
+ console.log('get caller failed')
+ return
+ }
+ console.log('get caller success')
+ this.regOnRelease(this.caller)
+ } catch (error) {
+ console.log(`get caller failed with ${error}`)
+ }
+ }
+ ```
+
+ In the cross-device scenario, you need to specify the ID of the peer device. The code snippet is as follows:
+
+ ```ts
+ async onButtonGetRemoteCaller() {
+ var caller = undefined
+ var context = this.context
+
+ context.startAbilityByCall({
+ deviceId: getRemoteDeviceId(),
+ bundleName: 'com.samples.CallApplication',
+ abilityName: 'CalleeAbility'
+ }).then((data) => {
+ if (data != null) {
+ caller = data
+ console.log('get remote caller success')
+ // Register the onRelease listener of the caller ability.
+ caller.on("release", (msg) => {
+ console.log(`remote caller onRelease is called ${msg}`)
+ })
+ console.log('remote caller register OnRelease succeed')
+ }
+ }).catch((error) => {
+ console.error(`get remote caller failed with ${error}`)
+ })
+ }
+ ```
+
+ Obtain the ID of the peer device from **DeviceManager**. Note that the **getTrustedDeviceListSync** API is open only to system applications. The code snippet is as follows:
+
+ ```ts
+ import deviceManager from '@ohos.distributedHardware.deviceManager';
+ var dmClass;
+ function getRemoteDeviceId() {
+ if (typeof dmClass === 'object' && dmClass != null) {
+ var list = dmClass.getTrustedDeviceListSync()
+ if (typeof (list) == 'undefined' || typeof (list.length) == 'undefined') {
+ console.log("EntryAbility onButtonClick getRemoteDeviceId err: list is null")
+ return
+ }
+ console.log("EntryAbility onButtonClick getRemoteDeviceId success:" + list[0].deviceId)
+ return list[0].deviceId
+ } else {
+ console.log("EntryAbility onButtonClick getRemoteDeviceId err: dmClass is null")
+ }
+ }
+ ```
+
+ In the cross-device scenario, your application must also apply for the data synchronization permission from end users. The code snippet is as follows:
+
+ ```ts
+ import abilityAccessCtrl from '@ohos.abilityAccessCtrl.d.ts';
+
+ requestPermission() {
+ let context = this.context
+ let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']
+ let atManager = abilityAccessCtrl.createAtManager();
+ atManager.requestPermissionsFromUser(context, permissions).then((data) => {
+ console.log("Succeed to request permission from user with data: "+ JSON.stringify(data))
+ }).catch((error) => {
+ console.log("Failed to request permission from user with error: "+ JSON.stringify(error))
+ })
+ }
+ ```
+
+3. **Send agreed sequenceable data.**
+
+ The sequenceable data can be sent to the callee ability with or without a return value. The method and sequenceable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability. The code snippet is as follows:
+
+ ```ts
+ const MSG_SEND_METHOD: string = 'CallSendMsg'
+ async onButtonCall() {
+ try {
+ let msg = new MySequenceable(1, 'origin_Msg')
+ await this.caller.call(MSG_SEND_METHOD, msg)
+ } catch (error) {
+ console.log(`caller call failed with ${error}`)
+ }
+ }
+ ```
+
+ In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**. The code snippet is as follows:
+
+ ```ts
+ const MSG_SEND_METHOD: string = 'CallSendMsg'
+ originMsg: string = ''
+ backMsg: string = ''
+ async onButtonCallWithResult(originMsg, backMsg) {
+ try {
+ let msg = new MySequenceable(1, originMsg)
+ const data = await this.caller.callWithResult(MSG_SEND_METHOD, msg)
+ console.log('caller callWithResult succeed')
+
+ let result = new MySequenceable(0, '')
+ data.readSequenceable(result)
+ backMsg(result.str)
+ console.log(`caller result is [${result.num}, ${result.str}]`)
+ } catch (error) {
+ console.log(`caller callWithResult failed with ${error}`)
+ }
+ }
+ ```
+
+4. **Release the Caller object.**
+
+ When the **Caller** object is no longer required, use **release()** to release it. The code snippet is as follows:
+
+ ```ts
+ releaseCall() {
+ try {
+ this.caller.release()
+ this.caller = undefined
+ console.log('caller release succeed')
+ } catch (error) {
+ console.log(`caller release failed with ${error}`)
+ }
+ }
+ ```
\ No newline at end of file
diff --git a/en/application-dev/ability-deprecated/stage-formextension.md b/en/application-dev/ability-deprecated/stage-formextension.md
index bc1c54afe9d2e323f0938bca250f83737df9cbdb..8a0425f4fab41b97cd15ecb9986f77b4a108ae7a 100644
--- a/en/application-dev/ability-deprecated/stage-formextension.md
+++ b/en/application-dev/ability-deprecated/stage-formextension.md
@@ -135,7 +135,7 @@ To create a widget in the stage model, you need to implement lifecycle callbacks
| Name | Description | Data Type | Default Value Allowed |
| ----------- | ------------------------------------------------------------ | ---------- | -------------------- |
| name | Name of the Extension ability. This field must be specified. | String | No |
- | srcEntrance | Path of the Extension ability lifecycle code. This field must be specified.| String | No |
+ | srcEntry | Path of the Extension ability lifecycle code. This field must be specified.| String | No |
| description | Description of the Extension ability. The value can be a string or a resource index to descriptions in multiple languages.| String | Yes (initial value: left empty)|
| icon | Index of the Extension ability icon file. | String | Yes (initial value: left empty)|
| label | Descriptive information about the Extension ability presented externally. The value can be a string or a resource index to the description.| String | Yes (initial value: left empty)|
@@ -150,7 +150,7 @@ To create a widget in the stage model, you need to implement lifecycle callbacks
```json
"extensionAbilities": [{
"name": "FormAbility",
- "srcEntrance": "./ets/FormAbility/FormAbility.ts",
+ "srcEntry": "./ets/FormAbility/FormAbility.ts",
"label": "$string:form_FormAbility_label",
"description": "$string:form_FormAbility_desc",
"type": "form",
@@ -242,7 +242,7 @@ You should override **onDestroy** to implement widget data deletion.
}
```
-For details about how to implement persistent data storage, see [Lightweight Data Store Development](../database/database-preference-guidelines.md).
+For details about how to implement persistent data storage, see [Application Data Persistence Overview](../database/app-data-persistence-overview.md).
The **Want** object passed in by the widget host to the widget provider contains a flag that specifies whether the requested widget is normal or temporary.
@@ -366,7 +366,7 @@ You can set router and message events for components on a widget. The router eve
1. Set the **onclick** field in the HML file to **routerEvent** or **messageEvent**, depending on the **actions** settings in the JSON file.
2. Set the router event.
- **action**: **"router"**, which indicates a router event.
- - **abilityName**: target ability name, for example, **EntryAbility**, which is the default UIAbility name in DevEco Studio for the stage model.
+ - **abilityName**: target ability name, for example, **EntryAbility**, which is the default main ability name in DevEco Studio for the stage model.
- **params**: custom parameters of the target ability. Set them as required. The value can be obtained from **parameters** in **want** used for starting the target ability. For example, in the lifecycle function **onCreate** of the EntryAbility in the stage model, you can obtain **want** and its **parameters** field.
3. Set the message event.
- **action**: **"message"**, which indicates a message event.
@@ -413,3 +413,5 @@ The code snippet is as follows:
}
}
```
+
+
\ No newline at end of file
diff --git a/en/application-dev/ability-deprecated/stage-serviceextension.md b/en/application-dev/ability-deprecated/stage-serviceextension.md
index aee8f9c8116dffb49956a2bb9a1cad2ad263a166..8f77e3251d56ff8023d8215546a38b0614f5c8b3 100644
--- a/en/application-dev/ability-deprecated/stage-serviceextension.md
+++ b/en/application-dev/ability-deprecated/stage-serviceextension.md
@@ -33,8 +33,8 @@ OpenHarmony does not support creation of a Service Extension ability for third-p
"icon": "$media:icon",
"description": "service",
"type": "service",
- "visible": true,
- "srcEntrance": "./ets/ServiceExtAbility/ServiceExtAbility.ts"
+ "exported": true,
+ "srcEntry": "./ets/ServiceExtAbility/ServiceExtAbility.ts"
}]
```
diff --git a/en/application-dev/application-dev-guide-for-gitee.md b/en/application-dev/application-dev-guide-for-gitee.md
index 96e956f86e1528c5946c094b204c83e6e7d96222..1ad5989d2cf8258c46e219a239a2c8c5a1d1274c 100644
--- a/en/application-dev/application-dev-guide-for-gitee.md
+++ b/en/application-dev/application-dev-guide-for-gitee.md
@@ -24,14 +24,17 @@ First thing first, familiarize yourself with the two cornerstone frameworks in O
All applications should be developed on top of these frameworks.
Then, equip yourself for developing the key features, with the following guidelines:
-- [Common Event and Notification](notification/Readme-EN.md)
+
+- [Web](web/web-component-overview.md)
+- [Notification](notification/Readme-EN.md)
- [Window Manager](windowmanager/Readme-EN.md)
- [WebGL](webgl/Readme-EN.md)
- [Media](media/Readme-EN.md)
- [Security](security/Readme-EN.md)
- [Connectivity](connectivity/Readme-EN.md)
-- [Telephony](telephony/Readme-EN.md)
+- [Telephony Service](telephony/Readme-EN.md)
- [Data Management](database/Readme-EN.md)
+- [File Management](file-management/Readme-EN.md)
- [Task Management](task-management/Readme-EN.md)
- [Device Management](device/Readme-EN.md)
- [Device Usage Statistics](device-usage-statistics/Readme-EN.md)
@@ -69,3 +72,5 @@ They are organized as follows:
### Readme
For details about the principles and basic information of each subsystem, see the README file in [docs/en/readme](../readme).
+
+
\ No newline at end of file
diff --git a/en/application-dev/application-dev-guide.md b/en/application-dev/application-dev-guide.md
index 650eaf0b956e544bd19e8892b0c6946a6839beb5..8170d075cf08e8258b7c8b3731661f0e4959c6aa 100644
--- a/en/application-dev/application-dev-guide.md
+++ b/en/application-dev/application-dev-guide.md
@@ -4,7 +4,7 @@ The application development documents provide reference for you to develop appli
The documents are carefully organized as follows:
-### Getting Started
+## Getting Started
[Here](quick-start/start-overview.md) you'll learn how to quickly get started with OpenHarmony application development.
@@ -12,7 +12,7 @@ Browse the documents on the instructions for quickly building your first applica
Check out the development fundamentals, which comprise descriptions of the package structure configuration file for OpenHarmony applications and the instructions for use of resource files.
-### Development
+## Development
To facilitate your application development, we provide development guidelines for key features.
@@ -24,14 +24,17 @@ First thing first, familiarize yourself with the two cornerstone frameworks in O
All applications should be developed on top of these frameworks.
Then, equip yourself for developing the key features, with the following guidelines:
-- [Common Event and Notification](notification/notification-overview.md)
+
+- [Web](web/web-component-overview.md)
+- [Notification](notification/notification-overview.md)
- [Window Manager](windowmanager/window-overview.md)
- [WebGL](webgl/webgl-overview.md)
-- [Media](media/audio-overview.md)
+- [Media](media/media-application-overview.md)
- [Security](security/userauth-overview.md)
- [Connectivity](connectivity/ipc-rpc-overview.md)
-- [Telephony](telephony/telephony-overview.md)
-- [Data Management](database/database-mdds-overview.md)
+- [Telephony Service](telephony/telephony-overview.md)
+- [Data Management](database/data-mgmt-overview.md)
+- [File Management](file-management/file-management-overview.md)
- [Task Management](task-management/background-task-overview.md)
- [Device](device/usb-overview.md)
- [Device Usage Statistics](device-usage-statistics/device-usage-statistics-overview.md)
@@ -41,30 +44,28 @@ Then, equip yourself for developing the key features, with the following guideli
- [OpenHarmony IDL Specifications and User Guide](IDL/idl-guidelines.md)
- [Using Native APIs in Application Projects](napi/napi-guidelines.md)
-### Tools
+## Tools
DevEco Studio is a high-performance integrated development environment (IDE) recommended for developing OpenHarmony applications.
[Here](https://developer.harmonyos.com/en/docs/documentation/doc-guides/ohos-deveco-studio-overview-0000001263280421) you can learn everything about DevEco Studio, including how to use this tool to create a project and sign, debug, and run an application.
-### Hands-On Tutorials
+## Hands-On Tutorials
To make you better understand how functions work together and jumpstart your application development projects, we provide stripped-down, real-world [samples](https://gitee.com/openharmony/applications_app_samples/blob/master/README.md) and [codelabs](https://gitee.com/openharmony/codelabs).
-### API References
+## API References
API references encompass all components and APIs available in OpenHarmony, helping you use and integrate APIs more effectively.
They are organized as follows:
-- [Component Reference (TypeScript-based Declarative Development Paradigm)](reference/arkui-ts/Readme-EN.md)
-
-- [Component Reference (JavaScript-based Web-like Development Paradigm)](reference/arkui-js/Readme-EN.md)
-
-- [JS Service Widget UI Components](reference/js-service-widget-ui/Readme-EN.md)
-
-- [JS and TS APIs](reference/apis/js-apis-ability-dataUriUtils.md)
-
+- [Component Reference (TypeScript-based Declarative Development Paradigm)](reference/arkui-ts/ts-components-summary.md)
+- [Component Reference (JavaScript-compatible Web-like Development Paradigm-ArkUI.Full)](reference/arkui-js/js-components-common-attributes.md)
+- [Component Reference (JavaScript-compatible Web-like Development Paradigm-ArkUI.Lite)](reference/arkui-js-lite/js-framework-file.md)
+- [JS Service Widget UI Components](reference/js-service-widget-ui/js-service-widget-file.md)
+- [JS and TS APIs](reference/apis/development-intro.md)
- Native APIs
- [Standard Library](reference/native-lib/third_party_libc/musl.md)
- - [Node_API](reference/native-lib/third_party_napi/napi.md)
+ - [Node_API](reference/native-lib/third_party_napi/napi.md)
+
\ No newline at end of file
diff --git a/en/application-dev/application-models/Readme-EN.md b/en/application-dev/application-models/Readme-EN.md
index efc515db54971c76432f02b5f409990ecbd767b7..b38074f214762a1d42474e7e12005314427d3ee1 100644
--- a/en/application-dev/application-models/Readme-EN.md
+++ b/en/application-dev/application-models/Readme-EN.md
@@ -17,9 +17,11 @@
- ExtensionAbility Component
- [ExtensionAbility Component Overview](extensionability-overview.md)
- [ServiceExtensionAbility](serviceextensionability.md)
- - [DataShareExtensionAbility](datashareextensionability.md)
- [FormExtensionAbility (Widget)](widget-development-stage.md)
- - [StaticSubscriberExtensionAbility](static-subscriber-extension-ability.md)
+ - [AccessibilityExtensionAbility](accessibilityextensionability.md)
+ - [EnterpriseAdminExtensionAbility](enterprise-extensionAbility.md)
+ - [InputMethodExtensionAbility](inputmethodextentionability.md)
+ - [WindowExtensionAbility](windowextensionability.md)
- [AbilityStage Component Container](abilitystage.md)
- [Context](application-context-stage.md)
- Want
@@ -32,15 +34,19 @@
- [Component Startup Rules](component-startup-rules.md)
- Inter-Device Application Component Interaction (Continuation)
- [Continuation Overview](inter-device-interaction-hop-overview.md)
- - [Cross-Device Migration](hop-cross-device-migration.md)
- - [Multi-device Collaboration](hop-multi-device-collaboration.md)
+ - [Cross-Device Migration (for System Applications Only)](hop-cross-device-migration.md)
+ - [Multi-device Collaboration (for System Applications Only)](hop-multi-device-collaboration.md)
+ - [Subscribing to System Environment Variable Changes](subscribe-system-environment-variable-changes.md)
- IPC
- [Process Model](process-model-stage.md)
- Common Events
- [Introduction to Common Events](common-event-overview.md)
- - [Subscribing to Common Events](common-event-subscription.md)
+ - Common Event Subscription
+ - [Common Event Subscription Overview](common-event-subscription-overview.md)
+ - [Subscribing to Common Events in Dynamic Mode](common-event-subscription.md)
+ - [Subscribing to Common Events in Static Mode (for System Applications Only)](common-event-static-subscription.md)
+ - [Unsubscribing from Common Events](common-event-unsubscription.md)
- [Publishing Common Events](common-event-publish.md)
- - [Unsubscribing from Common Events](common-event-unsubscription.md)
- [Background Services](background-services.md)
- Inter-Thread Communication
- [Thread Model](thread-model-stage.md)
@@ -50,6 +56,7 @@
- [Mission Management Scenarios](mission-management-overview.md)
- [Mission Management and Launch Type](mission-management-launch-type.md)
- [Page Stack and MissionList](page-mission-stack.md)
+ - [Setting the Icon and Name of a Mission Snapshot](mission-set-icon-name-for-task-snapshot.md)
- [Application Configuration File](config-file-stage.md)
- FA Model Development
- [FA Model Development Overview](fa-model-development-overview.md)
@@ -63,7 +70,7 @@
- [Creating a PageAbility](create-pageability.md)
- [Starting a Local PageAbility](start-local-pageability.md)
- [Stopping a PageAbility](stop-pageability.md)
- - [Starting a Remote PageAbility](start-remote-pageability.md)
+ - [Starting a Remote PageAbility (for System Applications Only)](start-remote-pageability.md)
- [Starting a Specified Page](start-page.md)
- [Window Properties](window-properties.md)
- [Requesting Permissions](request-permissions.md)
diff --git a/en/application-dev/application-models/ability-startup-with-explicit-want.md b/en/application-dev/application-models/ability-startup-with-explicit-want.md
index 9186379f32299ee7a42b7f82af4fc7f464c160d1..6b61b06311a519e959e87d826e4a27c8b2b3d208 100644
--- a/en/application-dev/application-models/ability-startup-with-explicit-want.md
+++ b/en/application-dev/application-models/ability-startup-with-explicit-want.md
@@ -1,4 +1,7 @@
# Using Explicit Want to Start an Ability
+When a user touches a button in an application, the application often needs to start a UIAbility component to complete a specific task. If the **abilityName** and **bundleName** parameters are specified when starting a UIAbility, then the explicit Want is used.
-When a user touches a button in an application, the application often needs to start a UIAbility component to complete a specific task. If the **abilityName** and **bundleName** parameters are specified when starting a UIAbility, the explicit Want is used. For details about how to use the explicit Want, see [Starting UIAbility in the Same Application](uiability-intra-device-interaction.md#starting-uiability-in-the-same-application).
+## Using Explicit Want
+
+The user touches a button in the application to start the UIAbility component to complete a specific task. To start the UIAbility component in explicit Want mode, the **abilityName** and **bundleName** parameters must be specified. For details, see [Starting UIAbility in the Same Application](uiability-intra-device-interaction.md#starting-uiability-in-the-same-application).
diff --git a/en/application-dev/application-models/ability-startup-with-implicit-want.md b/en/application-dev/application-models/ability-startup-with-implicit-want.md
index 6550e5c628c642cf227cfde5f74eef7b61c8a52b..dbd65bb560d7531bb6e00b21c004815fda1a997c 100644
--- a/en/application-dev/application-models/ability-startup-with-implicit-want.md
+++ b/en/application-dev/application-models/ability-startup-with-implicit-want.md
@@ -1,77 +1,78 @@
# Using Implicit Want to Open a Website
-
-## Prerequisites
-
-One or more browsers are installed on your device.
-
-The **module.json5** of a browser application is as follows:
+This section uses the operation of using a browser to open a website as an example. It is assumed that one or more browser applications are installed on the device. To ensure that the browser application can work properly, configure the [module.json5 file](../quick-start/module-configuration-file.md) as follows:
```json
-"skills": [
- {
- "entities": [
- "entity.system.browsable"
- // ...
- ],
- "actions": [
- "ohos.want.action.viewData"
- // ...
- ],
- "uris": [
- {
- "scheme": "https",
- "host": "www.test.com",
- "port": "8080",
- // Prefix matching is used.
- "pathStartWith": "query",
- "type": "text/*"
- },
+{
+ "module": {
+ // ...
+ "abilities": [
{
- "scheme": "http",
// ...
+ "skills": [
+ {
+ "entities": [
+ "entity.system.home",
+ "entity.system.browsable"
+ // ...
+ ],
+ "actions": [
+ "action.system.home",
+ "ohos.want.action.viewData"
+ // ...
+ ],
+ "uris": [
+ {
+ "scheme": "https",
+ "host": "www.test.com",
+ "port": "8080",
+ // Prefix matching is used.
+ "pathStartWith": "query"
+ },
+ {
+ "scheme": "http",
+ // ...
+ }
+ // ...
+ ]
+ }
+ ]
}
- // ...
]
- },
-]
+ }
+}
```
+In the initiator UIAbility, use implicit Want to start the browser application.
-## How to Develop
+```ts
+import common from '@ohos.app.ability.common';
-1. Use the custom function **implicitStartAbility** to start an ability.
-
- ```ts
- async implicitStartAbility() {
- try {
- let want = {
- // Uncomment the line below if you want to implicitly query data only in the specific bundle.
- // bundleName: "com.example.myapplication",
- "action": "ohos.want.action.viewData",
- // entities can be omitted.
- "entities": [ "entity.system.browsable" ],
- "uri": "https://www.test.com:8080/query/student",
- "type": "text/plain"
- }
- let context = getContext(this) as common.UIAbilityContext;
- await context.startAbility(want)
- console.info(`explicit start ability succeed`)
- } catch (error) {
- console.info(`explicit start ability failed with ${error.code}`)
- }
- }
- ```
-
- The matching process is as follows:
- 1. If **action** in the passed **want** parameter is specified and is included in **actions** under **skills**, the matching is successful.
-
- 2. If **entities** in the passed **want** parameter is specified and is included in **entities** under **skills**, the matching is successful.
+function implicitStartAbility() {
+ let context = getContext(this) as common.UIAbilityContext;
+ let wantInfo = {
+ // Uncomment the line below if you want to implicitly query data only in the specific bundle.
+ // bundleName: 'com.example.myapplication',
+ 'action': 'ohos.want.action.viewData',
+ // entities can be omitted.
+ 'entities': ['entity.system.browsable'],
+ 'uri': 'https://www.test.com:8080/query/student'
+ }
+ context.startAbility(wantInfo).then(() => {
+ // ...
+ }).catch((err) => {
+ // ...
+ })
+}
+```
- 3. If **uri** in the passed **want** parameter is included in **uris** under **skills**, which is concatenated into `https://www.test.com:8080/query*` (where \* is a wildcard), the matching is successful.
+The matching process is as follows:
- 4. If **type** in the passed **want** parameter is specified and is included in **type** under **skills**, the matching is successful.
+1. If **action** in the passed **want** parameter is specified and is included in **actions** under **skills** of the ability to match, the matching is successful.
+2. If **entities** in the passed **want** parameter is specified and is included in **entities** under **skills** of the ability to match, the matching is successful.
+3. If **uri** in the passed **want** parameter is included in **uris** under **skills** of the ability to match, which is concatenated into https://www.test.com:8080/query* (where * is a wildcard), the matching is successful.
+4. If **type** in the passed **want** parameter is specified and is included in **type** under **skills** of the ability to match, the matching is successful.
-2. When there are multiple matching applications, a dialog box is displayed for you to select one of them.
+If there are multiple matching applications, the system displays a dialog box for you to select one of them. The following figure shows an example.
- 
+
\ No newline at end of file
diff --git a/en/application-dev/application-models/abilitystage.md b/en/application-dev/application-models/abilitystage.md
index 4e0a273f850b4919d0964580ebed89c053c273f7..9a4e71d3fa696ee6f2707545b80456df34fe85ac 100644
--- a/en/application-dev/application-models/abilitystage.md
+++ b/en/application-dev/application-models/abilitystage.md
@@ -29,6 +29,18 @@ AbilityStage is not automatically generated in the default project of DevEco Stu
}
}
```
+
+4. Set **srcEntry** in the [module.json5 file](../quick-start/module-configuration-file.md) to the code path of the module.
+ ```json
+ {
+ "module": {
+ "name": "entry",
+ "type": "entry",
+ "srcEntry": "./ets/myabilitystage/MyAbilityStage.ts",
+ // ...
+ }
+ }
+ ```
[AbilityStage](../reference/apis/js-apis-app-ability-abilityStage.md) has the lifecycle callback [onCreate()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageoncreate) and the event callbacks [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant), [onConfigurationUpdated()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonconfigurationupdate), and [onMemoryLevel()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonmemorylevel).
@@ -41,6 +53,7 @@ AbilityStage is not automatically generated in the default project of DevEco Stu
- **onConfigurationUpdated()** event callback: triggered when the global system configuration changes. The global system configuration, such as the system language and theme, are defined in the [Configuration](../reference/apis/js-apis-app-ability-configuration.md) class before project configuration.
- **onMemoryLevel()** event callback: triggered when the system adjusts the memory.
+
When an application is switched to the background, it is cached in the background. This adversely affects the overall system performance. When system resources are insufficient, the system reclaims memory from applications in multiple ways. For example, the system may stop applications to release memory for executing key tasks. To further maintain the balance of the system memory and prevent the system from stopping application processes, you can subscribe to the system memory changes in the **onMemoryLevel()** lifecycle callback of AbilityStage to release unnecessary resources.
@@ -54,4 +67,3 @@ When an application is switched to the background, it is cached in the backgroun
}
}
```
-
diff --git a/en/application-dev/application-models/application-context-stage.md b/en/application-dev/application-models/application-context-stage.md
index de07a3600a27b619f144a4f22223e17616f80805..5308d1899a46ffb9907a18bd74a7920627ac720b 100644
--- a/en/application-dev/application-models/application-context-stage.md
+++ b/en/application-dev/application-models/application-context-stage.md
@@ -19,10 +19,10 @@
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let uiAbilityContext = this.context;
- // ...
- }
+ onCreate(want, launchParam) {
+ let uiAbilityContext = this.context;
+ // ...
+ }
}
```
@@ -34,21 +34,21 @@
```ts
import ServiceExtensionAbility from '@ohos.app.ability.ServiceExtensionAbility';
export default class MyService extends ServiceExtensionAbility {
- onCreate(want) {
- let serviceExtensionContext = this.context;
- // ...
- }
+ onCreate(want) {
+ let serviceExtensionContext = this.context;
+ // ...
+ }
}
```
- [AbilityStageContext](../reference/apis/js-apis-inner-application-abilityStageContext.md): module-level context. It provides **HapModuleInfo** and **Configuration** in addition to those provided by the base class **Context**.
```ts
- import AbilityStage from "@ohos.app.ability.AbilityStage";
+ import AbilityStage from '@ohos.app.ability.AbilityStage';
export default class MyAbilityStage extends AbilityStage {
- onCreate() {
- let abilityStageContext = this.context;
- // ...
- }
+ onCreate() {
+ let abilityStageContext = this.context;
+ // ...
+ }
}
```
- [ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md): application-level context. It provides APIs for subscribing to application component lifecycle changes, system memory changes, and system environment changes. The application-level context can be obtained from UIAbility, ExtensionAbility, and AbilityStage.
@@ -56,10 +56,10 @@
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let applicationContext = this.context.getApplicationContext();
- // ...
- }
+ onCreate(want, launchParam) {
+ let applicationContext = this.context.getApplicationContext();
+ // ...
+ }
}
```
@@ -71,7 +71,7 @@ This topic describes how to use the context in the following scenarios:
- [Obtaining the Application Development Path](#obtaining-the-application-development-path)
-- [Obtaining and Modifying Encrypted Areas](#obtaining-and-modifying-encrypted-areas)
+- [Obtaining and Modifying Encryption Areas](#obtaining-and-modifying-encryption-areas)
- [Creating Context of Another Application or Module](#creating-context-of-another-application-or-module)
- [Subscribing to UIAbility Lifecycle Changes in a Process](#subscribing-to-uiability-lifecycle-changes-in-a-process)
@@ -84,13 +84,13 @@ The following table describes the application development paths obtained from co
| Name| Type| Readable| Writable| Description|
| -------- | -------- | -------- | -------- | -------- |
-| cacheDir | string | Yes| No| Cache directory of the application on the internal storage.
It is the content of **Storage** of an application under **Settings > Apps & services > Apps**.|
-| tempDir | string | Yes| No| Temporary file directory of the application.
Files in this directory are deleted after the application is uninstalled.|
-| filesDir | string | Yes| No| File directory of the application on the internal storage.
Files in this directory may be synchronized to other directories during application migration or backup.|
-| databaseDir | string | Yes| No| Storage directory of the local database.|
-| bundleCodeDir | string | Yes| No| Installation directory of the application on the internal storage.|
-| distributedFilesDir | string | Yes| No| Storage directory of distributed application data files.|
-| preferencesDir | string | Yes| Yes| Preferences directory of the application.|
+| bundleCodeDir | string | Yes | No | Path for storing the application's installation package, that is, installation directory of the application on the internal storage. |
+| cacheDir | string | Yes| No| Path for storing the application's cache files, that is, cache directory of the application on the internal storage.
It is the content of **Storage** of an application under **Settings > Apps & services > Apps**.|
+| filesDir | string | Yes | No | Path for storing the application's common files, that is, file directory of the application on the internal storage.
Files in this directory may be synchronized to other directories during application migration or backup.|
+| preferencesDir | string | Yes | Yes | Path for storing the application's preference files, that is, preferences directory of the application. |
+| tempDir | string | Yes | No | Path for storing the application's temporary files.
Files in this directory are deleted after the application is uninstalled.|
+| databaseDir | string | Yes | No | Path for storing the application's database, that is, storage directory of the local database. |
+| distributedFilesDir | string | Yes| No| Path for storing the application's distributed files.|
The capability of obtaining the application development path is provided by the base class **Context**. This capability is also provided by **ApplicationContext**, **AbilityStageContext**, **UIAbilityContext**, and **ExtensionContext**. However, the paths obtained from different contexts may differ, as shown below.
@@ -127,16 +127,16 @@ The sample code for obtaining the application development paths is as follows:
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let cacheDir = this.context.cacheDir;
- let tempDir = this.context.tempDir;
- let filesDir = this.context.filesDir;
- let databaseDir = this.context.databaseDir;
- let bundleCodeDir = this.context.bundleCodeDir;
- let distributedFilesDir = this.context.distributedFilesDir;
- let preferencesDir = this.context.preferencesDir;
- // ...
- }
+ onCreate(want, launchParam) {
+ let cacheDir = this.context.cacheDir;
+ let tempDir = this.context.tempDir;
+ let filesDir = this.context.filesDir;
+ let databaseDir = this.context.databaseDir;
+ let bundleCodeDir = this.context.bundleCodeDir;
+ let distributedFilesDir = this.context.distributedFilesDir;
+ let preferencesDir = this.context.preferencesDir;
+ // ...
+ }
}
```
@@ -144,45 +144,52 @@ export default class EntryAbility extends UIAbility {
>
> The sample code obtains the sandbox path of the application development path. The absolute path can be obtained by running the **find / -name ** command in the hdc shell after file creation or modification.
-### Obtaining and Modifying Encrypted Areas
+### Obtaining and Modifying Encryption Areas
-You can read and write [the area attribute in the context](../reference/apis/js-apis-inner-application-context.md) to obtain and set an encrypted area. Two encryption levels are supported:
+Encrypting application files enhances data security by preventing files from unauthorized access. Different application files require different levels of protection. For private files, such as alarms and wallpapers, the application must place them in the device-level encryption area (EL1) to ensure that they can be accessed before the user enters the password. For sensitive files, such as personal privacy data, the application must place them in the user-level encryption area (EL2).
-- AreaMode.EL1: device-level encryption area, which is accessible after the device is powered on.
+In practice, you need to select a proper encrypted area based on scenario-specific requirements to protect application data security. The proper use of EL1 and the EL2 can efficiently improve the security.
-- AreaMode.EL2: user-level encryption area, which is accessible only after the device is powered on and the password is entered (for the first time).
+> **NOTE**
+>
+> - AreaMode.EL1: device-level encryption area, which is accessible after the device is powered on.
+>
+> - AreaMode.EL2: user-level encryption area, which is accessible only after the device is powered on and the password is entered (for the first time).
+
+You can obtain and set the encryption area by reading and writing the [area attribute in Context](../reference/apis/js-apis-inner-application-context.md).
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- // Before storing common information, switch the encryption level to EL1.
- if (this.context.area === 1) {// Obtain the area.
- this.context.area = 0; // Modify the area.
- }
- // Store common information.
-
- // Before storing sensitive information, switch the encryption level to EL2.
- if (this.context.area === 0) { // Obtain the area.
- this.context.area = 1; // Modify the area.
- }
- // Store sensitive information.
+ onCreate(want, launchParam) {
+ // Before storing common information, switch the encryption level to EL1.
+ if (this.context.area === 1) {// Obtain the area.
+ this.context.area = 0; // Modify the area.
}
+ // Store common information.
+
+ // Before storing sensitive information, switch the encryption level to EL2.
+ if (this.context.area === 0) { // Obtain the area.
+ this.context.area = 1; // Modify the area.
+ }
+ // Store sensitive information.
+ }
}
```
### Creating Context of Another Application or Module
-The base class **Context** provides the [createBundleContext(bundleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatebundlecontext), [createModuleContext(moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext), and [createModuleContext(bundleName:string, moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext-1) methods for creating the context of other applications or modules, so as to obtain the resource information, for example, [obtaining the application development paths](#obtaining-the-application-development-path) of other modules.
+The base class **Context** provides [createBundleContext(bundleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatebundlecontext), [createModuleContext(moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext), and [createModuleContext(bundleName:string, moduleName:string)](../reference/apis/js-apis-inner-application-context.md#contextcreatemodulecontext-1) to create the context of other applications or modules, so as to obtain the resource information, for example, [obtaining the application development paths](#obtaining-the-application-development-path) of other modules.
- Call **createBundleContext(bundleName:string)** to create the context of another application.
> **NOTE**
>
> To obtain the context of another application:
>
- > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+ > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+ >
> - This is a system API and cannot be called by third-party applications.
For example, application information displayed on the home screen includes the application name and icon. The home screen application calls the foregoing method to obtain the context information, so as to obtain the resource information including the application name and icon.
@@ -191,12 +198,12 @@ The base class **Context** provides the [createBundleContext(bundleName:string)]
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let bundleName2 = "com.example.application";
- let context2 = this.context.createBundleContext(bundleName2);
- let label2 = context2.applicationInfo.label;
- // ...
- }
+ onCreate(want, launchParam) {
+ let bundleName2 = 'com.example.application';
+ let context2 = this.context.createBundleContext(bundleName2);
+ let label2 = context2.applicationInfo.label;
+ // ...
+ }
}
```
@@ -205,99 +212,113 @@ The base class **Context** provides the [createBundleContext(bundleName:string)]
>
> To obtain the context of a specified module of another application:
>
- > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+ > - Request the **ohos.permission.GET_BUNDLE_INFO_PRIVILEGED** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+ >
> - This is a system API and cannot be called by third-party applications.
-
+
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let bundleName2 = "com.example.application";
- let moduleName2 = "module1";
- let context2 = this.context.createModuleContext(bundleName2, moduleName2);
- // ...
- }
+ onCreate(want, launchParam) {
+ let bundleName2 = 'com.example.application';
+ let moduleName2 = 'module1';
+ let context2 = this.context.createModuleContext(bundleName2, moduleName2);
+ // ...
+ }
}
```
-
+
- Call **createModuleContext(moduleName:string)** to obtain the context of another module in the current application. After obtaining the context, you can obtain the resource information of that module.
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
export default class EntryAbility extends UIAbility {
- onCreate(want, launchParam) {
- let moduleName2 = "module1";
- let context2 = this.context.createModuleContext(moduleName2);
- // ...
- }
+ onCreate(want, launchParam) {
+ let moduleName2 = 'module1';
+ let context2 = this.context.createModuleContext(moduleName2);
+ // ...
+ }
}
```
### Subscribing to UIAbility Lifecycle Changes in a Process
-In the DFX statistics scenario of an application, if you need to collect statistics on the stay duration and access frequency of a page, you can subscribe to UIAbility lifecycle changes.
+In the DFX statistics scenario of an application, if you need to collect statistics on the stay duration and access frequency of a page, you can subscribe to UIAbility lifecycle changes in a process.
-[ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md) provides APIs for subscribing to UIAbility lifecycle changes in a process. When the UIAbility lifecycle changes in a process, for example, being created or destroyed, becoming visible or invisible, or gaining or losing focus, the corresponding callback is triggered, and a listener ID is returned. The ID is incremented by 1 each time the listener is registered. When the number of listeners exceeds the upper limit (2^63-1), -1 is returned. The following uses [UIAbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md) as an example.
+[ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext) provides APIs for subscribing to UIAbility lifecycle changes in a process. When the UIAbility lifecycle changes in a process, for example, being created or destroyed, becoming visible or invisible, or gaining or losing focus, the corresponding callback is triggered. Each time the callback is registered, a listener lifecycle ID is returned, with the value incremented by 1 each time. When the number of listeners exceeds the upper limit (2^63-1), **-1** is returned. The following uses [UIAbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md) as an example.
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
import window from '@ohos.window';
-const TAG: string = "[Example].[Entry].[EntryAbility]";
+const TAG: string = '[Example].[Entry].[EntryAbility]';
export default class EntryAbility extends UIAbility {
- lifecycleId: number;
-
- onCreate(want, launchParam) {
- let abilityLifecycleCallback = {
- onAbilityCreate(uiability) {
- console.info(TAG, "onAbilityCreate uiability:" + JSON.stringify(uiability));
- },
- onWindowStageCreate(uiability, windowStage) {
- console.info(TAG, "onWindowStageCreate uiability:" + JSON.stringify(uiability));
- console.info(TAG, "onWindowStageCreate windowStage:" + JSON.stringify(windowStage));
- },
- onWindowStageActive(uiability, windowStage) {
- console.info(TAG, "onWindowStageActive uiability:" + JSON.stringify(uiability));
- console.info(TAG, "onWindowStageActive windowStage:" + JSON.stringify(windowStage));
- },
- onWindowStageInactive(uiability, windowStage) {
- console.info(TAG, "onWindowStageInactive uiability:" + JSON.stringify(uiability));
- console.info(TAG, "onWindowStageInactive windowStage:" + JSON.stringify(windowStage));
- },
- onWindowStageDestroy(uiability, windowStage) {
- console.info(TAG, "onWindowStageDestroy uiability:" + JSON.stringify(uiability));
- console.info(TAG, "onWindowStageDestroy windowStage:" + JSON.stringify(windowStage));
- },
- onAbilityDestroy(uiability) {
- console.info(TAG, "onAbilityDestroy uiability:" + JSON.stringify(uiability));
- },
- onAbilityForeground(uiability) {
- console.info(TAG, "onAbilityForeground uiability:" + JSON.stringify(uiability));
- },
- onAbilityBackground(uiability) {
- console.info(TAG, "onAbilityBackground uiability:" + JSON.stringify(uiability));
- },
- onAbilityContinue(uiability) {
- console.info(TAG, "onAbilityContinue uiability:" + JSON.stringify(uiability));
- }
- }
- // 1. Obtain the application context through the context attribute.
- let applicationContext = this.context.getApplicationContext();
- // 2. Register a listener for the lifecycle changes through the application context.
- this.lifecycleId = applicationContext.on("abilityLifecycle", abilityLifecycleCallback);
- console.info(TAG, "register callback number: " + JSON.stringify(this.lifecycleId));
+ // Define a lifecycle ID.
+ lifecycleId: number;
+
+ onCreate(want, launchParam) {
+ // Define a lifecycle callback object.
+ let abilityLifecycleCallback = {
+ // Called when a UIAbility is created.
+ onAbilityCreate(uiAbility) {
+ console.log(TAG, `onAbilityCreate uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ },
+ // Called when a window is created.
+ onWindowStageCreate(uiAbility, windowStage: window.WindowStage) {
+ console.log(TAG, `onWindowStageCreate uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ console.log(TAG, `onWindowStageCreate windowStage: ${JSON.stringify(windowStage)}`);
+ },
+ // Called when the window becomes active.
+ onWindowStageActive(uiAbility, windowStage: window.WindowStage) {
+ console.log(TAG, `onWindowStageActive uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ console.log(TAG, `onWindowStageActive windowStage: ${JSON.stringify(windowStage)}`);
+ },
+ // Called when the window becomes inactive.
+ onWindowStageInactive(uiAbility, windowStage: window.WindowStage) {
+ console.log(TAG, `onWindowStageInactive uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ console.log(TAG, `onWindowStageInactive windowStage: ${JSON.stringify(windowStage)}`);
+ },
+ // Called when the window is destroyed.
+ onWindowStageDestroy(uiAbility, windowStage: window.WindowStage) {
+ console.log(TAG, `onWindowStageDestroy uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ console.log(TAG, `onWindowStageDestroy windowStage: ${JSON.stringify(windowStage)}`);
+ },
+ // Called when the UIAbility is destroyed.
+ onAbilityDestroy(uiAbility) {
+ console.log(TAG, `onAbilityDestroy uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ },
+ // Called when the UIAbility is switched from the background to the foreground.
+ onAbilityForeground(uiAbility) {
+ console.log(TAG, `onAbilityForeground uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ },
+ // Called when the UIAbility is switched from the foreground to the background.
+ onAbilityBackground(uiAbility) {
+ console.log(TAG, `onAbilityBackground uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ },
+ // Called when UIAbility is continued on another device.
+ onAbilityContinue(uiAbility) {
+ console.log(TAG, `onAbilityContinue uiAbility.launchWant: ${JSON.stringify(uiAbility.launchWant)}`);
+ }
}
+ // Obtain the application context.
+ let applicationContext = this.context.getApplicationContext();
+ // Register the application lifecycle callback.
+ this.lifecycleId = applicationContext.on('abilityLifecycle', abilityLifecycleCallback);
+ console.log(TAG, `register callback number: ${this.lifecycleId}`);
+ }
- onDestroy() {
- let applicationContext = this.context.getApplicationContext();
- applicationContext.off("abilityLifecycle", this.lifecycleId, (error, data) => {
- console.info(TAG, "unregister callback success, err: " + JSON.stringify(error));
- });
- }
+ // ...
+
+ onDestroy() {
+ // Obtain the application context.
+ let applicationContext = this.context.getApplicationContext();
+ // Deregister the application lifecycle callback.
+ applicationContext.off('abilityLifecycle', this.lifecycleId);
+ }
}
```
diff --git a/en/application-dev/application-models/common-event-overview.md b/en/application-dev/application-models/common-event-overview.md
index 0d3788b41b516d0af9619d320ceeefc3f52c74c5..e8be9abaa3015a5512c47af55d2f364be0de79ad 100644
--- a/en/application-dev/application-models/common-event-overview.md
+++ b/en/application-dev/application-models/common-event-overview.md
@@ -15,14 +15,15 @@ Common events are classified into system common events and custom common events.
Common events are also classified into unordered, ordered, and sticky common events.
-- Unordered common events: CES forwards common events based on the subscription sequence, regardless of whether subscribers receive the events.
+- Unordered common events: common events that CES forwards regardless of whether subscribers receive the events and when they subscribe to the events.
-- Ordered common event: CES forwards common events to the next subscriber only after receiving a reply from the previous subscriber.
+- Ordered common events: common events that CES forwards based on the subscriber priority. CES forwards common events to the subscriber with lower priority only after receiving a reply from the previous subscriber with higher priority. Subscribers with the same priority receive common events in a random order.
-- Sticky common event: a public event that can be sent to a subscriber before they initiate a subscription. Only system applications or system services can send sticky common event, and they must request the **ohos.permission.COMMONEVENT_STICKY** permission. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+- Sticky common events: common events that can be sent to a subscriber before or after they initiate a subscription. Only system applications and system services can send sticky common events, which remain in the system after being sent. The sends must first request the **ohos.permission.COMMONEVENT_STICKY** permission. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
Each application can subscribe to common events as required. After your application subscribes to a common event, the system sends it to your application every time the event is published. Such an event may be published by the system, other applications, or your own application.
**Figure 1** Common events
+

\ No newline at end of file
diff --git a/en/application-dev/application-models/common-event-static-subscription.md b/en/application-dev/application-models/common-event-static-subscription.md
new file mode 100644
index 0000000000000000000000000000000000000000..85852f5712df84107c6593160d276ed33557baf9
--- /dev/null
+++ b/en/application-dev/application-models/common-event-static-subscription.md
@@ -0,0 +1,105 @@
+# Subscribing to Common Events in Static Mode (for System Applications Only)
+
+## When to Use
+
+A static subscriber is started once it receives a target event published by the system or application. At the same time, the **onReceiveEvent** callback is triggered, in which you can implement the service logic. For example, if an application needs to execute some initialization tasks during device power-on, the application can subscribe to the power-on event in static mode. After receiving the power-on event, the application is started to execute the initialization tasks. Subscribing to a common event in static mode is achieved by configuring a declaration file and implementing a class that inherits from **StaticSubscriberExtensionAbility**. Note that this subscribing mode has negative impact on system power consumption. Therefore, exercise caution when using this mode.
+
+## How to Develop
+
+1. Declaring a Static Subscriber
+
+ To declare a static subscriber, create an ExtensionAbility, which is derived from the **StaticSubscriberExtensionAbility** class, in the project. The sample code is as follows:
+
+ ```ts
+ import StaticSubscriberExtensionAbility from '@ohos.application.StaticSubscriberExtensionAbility'
+
+ export default class StaticSubscriber extends StaticSubscriberExtensionAbility {
+ onReceiveEvent(event) {
+ console.log('onReceiveEvent, event:' + event.event);
+ }
+ }
+ ```
+
+ You can implement service logic in the **onReceiveEvent** callback.
+
+
+
+2. Project Configuration for a Static Subscriber
+
+ After writing the static subscriber code, configure the subscriber in the **module.json5** file. The configuration format is as follows:
+
+ ```ts
+ {
+ "module": {
+ ......
+ "extensionAbilities": [
+ {
+ "name": "StaticSubscriber",
+ "srcEntrance": "./ets/StaticSubscriber/StaticSubscriber.ts",
+ "description": "$string:StaticSubscriber_desc",
+ "icon": "$media:icon",
+ "label": "$string:StaticSubscriber_label",
+ "type": "staticSubscriber",
+ "visible": true,
+ "metadata": [
+ {
+ "name": "ohos.extension.staticSubscriber",
+ "resource": "$profile:subscribe"
+ }
+ ]
+ }
+ ]
+ ......
+ }
+ }
+ ```
+
+ Pay attention to the following fields in the JSON file:
+
+ - **srcEntrance**: entry file path of the ExtensionAbility, that is, the file path of the static subscriber declared in Step 2.
+
+ - **type**: ExtensionAbility type. For a static subscriber, set this field to **staticSubscriber**.
+
+ - **metadata**: level-2 configuration file information of the ExtensionAbility. The configuration information varies according to the ExtensionAbility type. Therefore, you must use different config files to indicate the specific configuration.
+ - **name**: name of the ExtensionAbility. For a static subscriber, declare the name as **ohos.extension.staticSubscriber** for successful identification.
+ - **resource**: path that stores the ExtensionAbility configuration, which is customizable. In this example, the path is **resources/base/profile/subscribe.json**.
+
+ A level-2 configuration file pointed to by **metadata** must be in the following format:
+
+ ```ts
+ {
+ "commonEvents": [
+ {
+ "name": "xxx",
+ "permission": "xxx",
+ "events":[
+ "xxx"
+ ]
+ }
+ ]
+ }
+ ```
+
+ If the level-2 configuration file is not declared in this format, the file cannot be identified. The fields are described as follows:
+
+ - **name**: name of the ExtensionAbility, which must be the same as the name of **extensionAbility** declared in **module.json5**.
+
+ - **permission**: permission required for the publisher. If a publisher without the required permission attempts to publish an event, the event is regarded as invalid and will not be published.
+
+ - **events**: list of target events to subscribe to.
+
+3. Device System Configuration
+
+ In the device system configuration file **/system/etc/app/install_list_capability.json**, add the bundle name of the static subscriber.
+
+ ```json
+ {
+ "install_list": [
+ {
+ "bundleName": "ohos.extension.staticSubscriber",
+ "allowCommonEvent": ["usual.event.A", "usual.event.B"],
+ }
+ ]
+ }
+ ```
+
diff --git a/en/application-dev/application-models/common-event-subscription-overview.md b/en/application-dev/application-models/common-event-subscription-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..20064af92d3df2e6f7ab7d62c4f71f911848057a
--- /dev/null
+++ b/en/application-dev/application-models/common-event-subscription-overview.md
@@ -0,0 +1,7 @@
+# Common Event Subscription Overview
+
+The common event service provides two subscription modes: dynamic and static. The biggest difference between these two modes is that dynamic subscription requires the application to be running, while static subscription does not.
+
+- In dynamic subscription mode, a subscriber subscribes to common events by calling an API during the running period. For details, see [Subscribing to Common Events in Dynamic Mode](common-event-subscription.md).
+
+- In static subscription mode, a subscriber subscribes to common events by configuring a declaration file and implementing a class that inherits from StaticSubscriberExtensionAbility. For details, see [Subscribing to Common Events in Static Mode](common-event-static-subscription.md).
diff --git a/en/application-dev/application-models/common-event-subscription.md b/en/application-dev/application-models/common-event-subscription.md
index ce61e40458a7cbd5c9ec226138535da93d3766b1..6cdc52ef9b798e48a911892f965db8fbf2aaa67f 100644
--- a/en/application-dev/application-models/common-event-subscription.md
+++ b/en/application-dev/application-models/common-event-subscription.md
@@ -1,9 +1,9 @@
-# Subscribing to Common Events
+# Subscribing to Common Events in Dynamic Mode
## When to Use
-You can create a subscriber object to subscribe to a common event so as to obtain the parameters passed in the event. Certain system common events [require specific permissions](../security/accesstoken-guidelines.md) to subscribe to. For details, see [Required Permissions](../reference/apis/js-apis-commonEventManager.md#support).
+In dynamic subscription mode, an application subscribes to a common event when it is running. If the subscribed event is published during the running period, the subscriber application will receive the event, together with the parameters passed in the event. For example, if an application expects to be notified of low battery so that it can reduce power consumption accordingly when running, then the application can subscribe to the low-battery event. Upon receiving the event, the application can close some unnecessary tasks to reduce power consumption. Certain system common events [require specific permissions](../security/accesstoken-guidelines.md) to subscribe to. For details, see [Required Permissions](../reference/apis/js-apis-commonEventManager.md#support).
## Available APIs
diff --git a/en/application-dev/application-models/common-event-unsubscription.md b/en/application-dev/application-models/common-event-unsubscription.md
index c87017ef08c05e8a22097c4bd2a05f52fc758134..1a44675d61947325a8c1a8790ff5f53626e43f57 100644
--- a/en/application-dev/application-models/common-event-unsubscription.md
+++ b/en/application-dev/application-models/common-event-unsubscription.md
@@ -1,9 +1,9 @@
-# Unsubscribing from Common Events
+# Unsubscribing from Common Events in Dynamic Mode
## When to Use
-You can call [unsubscribe()](../reference/apis/js-apis-commonEventManager.md#commoneventmanagerunsubscribe) to unsubscribe from a common event that is no longer required.
+You can call [unsubscribe()](../reference/apis/js-apis-commonEventManager.md#commoneventmanagerunsubscribe) to unsubscribe from a common event that is no longer required in dynamic mode.
## Available APIs
@@ -21,12 +21,12 @@ You can call [unsubscribe()](../reference/apis/js-apis-commonEventManager.md#com
import commonEventManager from '@ohos.commonEventManager';
```
-2. Subscribe to an event by following the procedure described in [Subscribing to Common Events](common-event-subscription.md).
+2. Subscribe to an event by following the procedure described in [Subscribing to Common Events in Dynamic Mode](common-event-subscription.md).
3. Call **unsubscribe** in **CommonEvent** to unsubscribe from the common event.
```ts
- // The subscriber object iscreated during event subscription.
+ // The subscriber object is created during event subscription.
if (subscriber !== null) {
commonEventManager.unsubscribe(subscriber, (err) => {
if (err) {
diff --git a/en/application-dev/application-models/component-startup-rules.md b/en/application-dev/application-models/component-startup-rules.md
index 0e6c2ce33c68913221c7b09f02e96327b0ea1c30..26b2446893aea096611f896e878ef15888830afa 100644
--- a/en/application-dev/application-models/component-startup-rules.md
+++ b/en/application-dev/application-models/component-startup-rules.md
@@ -30,7 +30,7 @@ In view of this, OpenHarmony formulates a set of component startup rules, as fol
- An application is considered as a foreground application only when the application process gains focus or its UIAbility component is running in the foreground.
- Verify the **ohos.permission.START_ABILITIES_FROM_BACKGROUND** permission.
-- **When the startAbilityByCall() method is used, verify the call permission.** For details, see [Using Ability Call to Implement UIAbility Interaction](uiability-intra-device-interaction.md#using-ability-call-to-implement-uiability-interaction) and [Using Cross-Device Ability Call](hop-multi-device-collaboration.md#using-cross-device-ability-call).
+- **When the startAbilityByCall() method is used, verify the call permission.** For details, see [Using Call to Implement UIAbility Interaction](uiability-intra-device-interaction.md#using-call-to-implement-uiability-interaction) and [Using Cross-Device Call](hop-multi-device-collaboration.md#using-cross-device-call).
- Verify the **ohos.permission.ABILITY_BACKGROUND_COMMUNICATION** permission.
diff --git a/en/application-dev/application-models/create-pageability.md b/en/application-dev/application-models/create-pageability.md
index 783646ff4cfd5fa2ab193005bfa9d182dc75b70c..d0f308ebc08e035d5568ee0e127a9739e400d124 100644
--- a/en/application-dev/application-models/create-pageability.md
+++ b/en/application-dev/application-models/create-pageability.md
@@ -76,22 +76,22 @@ In the FA model, you can call **getContext** of **featureAbility** to obtain the
The following code snippet shows how to use **getContext()** to obtain the application context and distributed directory:
```ts
-import featureAbility from '@ohos.ability.featureAbility'
-import fileIo from '@ohos.fileio'
+import featureAbility from '@ohos.ability.featureAbility';
+import fs from '@ohos.file.fs';
(async () => {
- let dir: string
+ let dir: string;
try {
- console.info('Begin to getOrCreateDistributedDir')
- dir = await featureAbility.getContext().getOrCreateDistributedDir()
+ console.info('Begin to getOrCreateDistributedDir');
+ dir = await featureAbility.getContext().getOrCreateDistributedDir();
console.info('distribute dir is ' + dir)
} catch (error) {
- console.error('getOrCreateDistributedDir failed with ' + error)
+ console.error('getOrCreateDistributedDir failed with ' + error);
}
let fd: number;
let path = dir + "/a.txt";
- fd = fileIo.openSync(path, 0o2 | 0o100, 0o666);
- fileIo.close(fd);
+ fd = fs.openSync(path, fs.OpenMode.READ_WRITE).fd;
+ fs.close(fd);
})()
```
diff --git a/en/application-dev/application-models/data-share-via-want.md b/en/application-dev/application-models/data-share-via-want.md
index c04bea2916647804b51022cee1853f3b5d0a7d90..d5512e0c446b94dcf384504f11ff25d458cfeafc 100644
--- a/en/application-dev/application-models/data-share-via-want.md
+++ b/en/application-dev/application-models/data-share-via-want.md
@@ -1,111 +1,133 @@
# Using Want to Share Data Between Applications
-
Users often need to share data (such as a text or an image) from one application to another. The following uses PDF file sharing as an example to describe how to use Want to share data between applications.
+Data sharing requires two UIAbility components (one for the sharing party and the other for the shared party) and one system component (used as the application sharing box). When the sharing party initiates data sharing by calling **startAbility()**, the system implicitly matches and displays all applications that support the type of data to share. After the user selects an application, the system starts the application to complete data sharing.
-## Prerequisites
-
-1. There are two UIAbility components (one for the sharing party and the other for the shared party) and one system component (used as the application selector). When the sharing party initiates data sharing through **startAbility()**, the application selector is started. The system implicitly matches and displays all applications that support the type of data to share. After the user selects an application, the system starts that application to complete data sharing.
-
-2. In this section, data sharing is triggered by touching a button. You can use other ways to trigger data sharing during application development. This section focuses on the Want configuration used for data sharing.
-
-3. The following actions are involved in this section:
- - **ACTION_SELECT (ohos.want.action.select)**: action of displaying the application selector.
- - **ACTION_SEND_DATA (ohos.want.action.sendData)**: action of launching the UI for sending a single data record. It is used to transfer data to the shared party.
-
-
-## How to Develop
-
-- Sharing party
- 1. In the stage mode, the [File Descriptor (FD)](../reference/apis/js-apis-fileio.md#fileioopensync) is used for file transfer. This example assumes that the path of the file to share is obtained.
-
- ```ts
- import fileIO from '@ohos.fileio';
-
- // let path = ...
- // Open the file whose path is a variable.
- let fileFd = fileIO.openSync(path, 0o102, 0o666);
- ```
-
- 2. As described in the prerequisites, the sharing party starts an application selector and shares the data to the selector, and the selector transfers the data to the shared party. Want of the sharing party must be nested at two layers. At the first layer, implicit Want is used together with the **ohos.want.action.select** action to display the application selector. At the second layer, complete Want is declared in the custom field **parameters** to transfer the data to share.
-
- ```ts
- import wantConstant from '@ohos.app.ability.wantConstant';
-
- // let path = ...
- // let fileFd = ...
- // let fileSize = ...
- let want = {
- / This action is used to implicitly match the application selector.
- action: wantConstant.Action.ACTION_SELECT,
- // This is the custom parameter in the first layer of Want,
- / which is intended to add information to the application selector.
- parameters: {
- // MIME type of PDF.
- "ability.picker.type": "application/pdf",
- "ability.picker.fileNames": [path],
- "ability.picker.fileSizes": [fileSize],
- // This nested Want ,which will be directly sent to the selected application.
- "ability.want.params.INTENT": {
- "action": "ohos.want.action.sendData",
- "type": "application/pdf",
- "parameters": {
- "keyFd": {"type": "FD", "value": fileFd}
- }
- }
- }
+In this section, data sharing is triggered by touching a button. You can use other ways to trigger data sharing during application development. This section focuses on how to configure Want to implement data sharing.
+
+The following actions are involved for data sharing:
+
+- **ohos.want.action.select**: action of starting the application sharing box.
+- **ohos.want.action.sendData**: action of sending a single data record, that is, transferring data to the shared party.
+
+## Sharing Party
+
+The sharing party starts an application sharing box and transfers the data to the shared party. Therefore, Want of the sharing party must be nested at two layers. In the first layer, implicit Want is used together with the **ohos.want.action.select** action to display the application sharing box. In the second layer, the data to share is declared
+
+in the custom field **parameters**, and then the Want that includes the **ohos.want.action.sendData** action and the **parameters** field is transferred to the application sharing box. The shared party obtains the shared data from **parameters**.
+
+```ts
+import common from '@ohos.app.ability.common';
+
+let fileType = 'application/pdf';
+let fileName = 'TestFile.pdf';
+let fileFd = -1; // Obtain the file descriptor (FD) of the file to share.
+let fileSize; // Obtain the size of the file to share.
+
+function implicitStartAbility() {
+ let context = getContext(this) as common.UIAbilityContext;
+ let wantInfo = {
+ / This action is used to implicitly match the application sharing box.
+ action: 'ohos.want.action.select',
+ // This is the custom parameter in the first layer of Want,
+ / which is intended to add information to the application sharing box.
+ parameters: {
+ // MIME type of PDF.
+ 'ability.picker.type': fileType,
+ 'ability.picker.fileNames': [fileName],
+ 'ability.picker.fileSizes': [fileSize],
+ // This is nested Want ,which will be directly sent to the selected application.
+ 'ability.want.params.INTENT': {
+ 'action': 'ohos.want.action.sendData',
+ 'type': 'application/pdf',
+ 'parameters': {
+ 'keyFd': { 'type': 'FD', 'value': fileFd }
+ }
}
- ```
-
- In the preceding code, the custom field **parameters** is used. The **ability.picker.\*** fields in the first-layer **parameters** are used to pass the information to be displayed on the application selector. The following fields are involved:
-
- - **"ability.picker.type"**: The application selector renders the file type icon based on this field.
- - **"ability.picker.fileNames"**: The application selector displays the file name based on this field.
- - **"ability.picker.fileSizes"**: The application selector displays the file size based on this field. The unit is byte.
- - **"ability.picker.fileNames"** and **"ability.picker.fileSizes"** are arrays and have a one-to-one mapping.
-
- For example, when **"ability.picker.type"** is **"application/pdf"**, **"ability.picker.fileNames"** is **"["APIs.pdf"]"**, and **"ability.picker.fileSizes"** is **"[350 \* 1024]"**, the application selector is displayed as follows:
-
- 
-
- In the preceding code, the **ability.want.params.INTENT** field is nested Want. In this field, **action** and **type** are used for implicit matching by the application selector. For details about implicit matching, see [Matching Rules of Implicit Want](explicit-implicit-want-mappings.md#matching-rules-of-implicit-want). After the user selects an application, the nested Want of the **ability.want.params.INTENT** field is passed to that application.
-
-- Shared party
- 1. As mentioned above, the application selector performs implicit matching based on the **ability.want.params.INTENT** field. Therefore, you must set **skills** in the ability configuration file (**module.json5** file in the stage model) of the shared party as follows:
-
- ```ts
- "skills": [
- {
- "entities": [
+ }
+ }
+ context.startAbility(wantInfo).then(() => {
+ // ...
+ }).catch((err) => {
+ // ...
+ })
+}
+```
+
+> **NOTE**
+>
+> Data sharing can be implemented only in FD format. For details about how to obtain the FD and file name, see [File Management](../reference/apis/js-apis-file-fs.md).
+
+In the preceding code, under the custom field **parameters**, the following **ability.picker.*** fields are used to pass the information to be displayed on the application sharing box:
+
+- **ability.picker.type**: file type icon.
+- **ability.picker.fileNames**: file name.
+- **ability.picker.fileSizes**: file size, in bytes.
+- **ability.picker.fileNames** and **ability.picker.fileSizes** are arrays and have a one-to-one mapping.
+
+The following figure shows an example.
+
+
+
+## Shared Party
+
+To enable the shared party to identify the shared content, configure **skills** in the [module.json5 file](../quick-start/module-configuration-file.md) of the UIAbility of the shared party. The **actions** and **type** fields in **uris** match the **action** and **type** fields in **ability.want.params.INTENT** of the sharing party, respectively.
+
+```json
+{
+ "module": {
+ // ...
+ "abilities": [
+ {
+ // ...
+ "skills": [
+ {
// ...
- ],
- "actions": [
+ "actions": [
+ "action.system.home",
"ohos.want.action.sendData"
// ...
- ],
- "uris": [
- {
- "type": "application/pdf"
- },
- // ...
- ]
- },
- ]
- ```
-
- The **actions** and **type** fields in **uris** match the **action** and **type** fields in **ability.want.params.INTENT**, respectively.
-
- Files can be transferred in FD mode, but not URI mode. In implicit matching, the **type** field in Want must match the **type** field in **uris** under **skills** of the shared party. Therefore, specify only the **type** field in **uris**. If **host** and **port** are specified, the matching fails. The application selector initiates implicit matching based on **ability.want.params.INTENT**. Therefore, when the **uri** field added to **ability.want.params.INTENT** matches the **uris** field under **skills**, the matching is successful and additional data can be transferred.
- 2. After the application selector starts the shared party, the system calls **onCreate** and passes **ability.want.params.INTENT** to the **want** parameter.
-
- ```ts
- onCreate(want, launchParam) {
- // When keyFd is undefined, the application crashes.
- if (want["parameters"]["keyFd"] !== undefined) {
- // Receive the file descriptor.
- let fd = want["parameters"]["keyFd"].value;
- // ...
- }
+ ],
+ "uris": [
+ {
+ "type": "application/pdf"
+ },
+ ]
+ }
+ ]
}
- ```
+ ]
+ }
+}
+```
+
+After the user selects an application, the Want nested in the **ability.want.params.INTENT** field is passed to that application. The UIAbility of the shared party, after being started, can call [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) or [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonnewwant) to obtain the passed Want.
+
+The following is an example of the Want obtained. You can use the FD of the shared file to perform required operations.
+
+```json
+{
+ "deviceId": "",
+ "bundleName": "com.example.myapplication",
+ "abilityName": "EntryAbility",
+ "moduleName": "entry",
+ "uri": "",
+ "type": "application/pdf",
+ "flags": 0,
+ "action": "ohos.want.action.sendData",
+ "parameters": {
+ "component.startup.newRules": true,
+ "keyFd": {
+ "type": "FD",
+ "value": 36
+ },
+ "mime-type": "application/pdf",
+ "moduleName": "entry",
+ "ohos.aafwk.param.callerPid": 3488,
+ "ohos.aafwk.param.callerToken": 537379209,
+ "ohos.aafwk.param.callerUid": 20010014
+ },
+ "entities": []
+}
+```
diff --git a/en/application-dev/application-models/datashareextensionability.md b/en/application-dev/application-models/datashareextensionability.md
index 5b07ba68180fbcc2a51047d37ca9a82addd89cd8..f671848f890277af92fc23869c5db0d57b02a316 100644
--- a/en/application-dev/application-models/datashareextensionability.md
+++ b/en/application-dev/application-models/datashareextensionability.md
@@ -1,4 +1,4 @@
-# DataShareExtensionAbility
+# DataShareExtensionAbility (for System Applications Only)
-DataShareExtensionAbility is available only for system application. It provides the data sharing capability. System applications can implement a DataShareExtensionAbility or access an existing DataShareExtensionAbility in the system. Third-party applications can only access an existing DataShareExtensionAbility. For details, see [DataShare Development](../database/database-datashare-guidelines.md).
+DataShareExtensionAbility provides the data sharing capability. System applications can implement a DataShareExtensionAbility or access an existing DataShareExtensionAbility in the system. Third-party applications can only access an existing DataShareExtensionAbility. For details, see [DataShare Development](../database/database-datashare-guidelines.md).
diff --git a/en/application-dev/application-models/enterprise-extensionAbility.md b/en/application-dev/application-models/enterprise-extensionAbility.md
index 514e254f77981977c7c425a4ea2ddbebbcff9ca8..0038b41e5b4f654d8c7924ec1232bb342dd616cb 100644
--- a/en/application-dev/application-models/enterprise-extensionAbility.md
+++ b/en/application-dev/application-models/enterprise-extensionAbility.md
@@ -1,54 +1,51 @@
# EnterpriseAdminExtensionAbility Development
-## Introduction
+## Introduction to EnterpriseAdminExtensionAbility
-**EnterpriseAdminExtensionAbility** is essential to a mobile device management (MDM) application. When developing an MDM application for an enterprise, you must inherit the **EnterpriseAdminExtensionAbility** class and have the MDM service logic implemented in an **EnterpriseAdminExtensionAbility** instance. The **EnterpriseAdminExtensionAbility** class provides callbacks for the enable, disable, install, and uninstall events of a device administrator application, implementing notification of system administrator status changes.
+EnterpriseAdminExtensionAbility is a mandatory component for Mobile Device Management (MDM) applications. When developing MDM applications for enterprises, you need to inherit EnterpriseAdminExtensionAbility and implement MDM service logic in the EnterpriseAdminExtensionAbility instance. EnterpriseAdminExtensionAbility implements notifications of system management status changes and defines the callbacks for when a device administrator application is enabled or disabled or an application is installed or uninstalled.
## Constraints
-- ***Function constraints***
+EnterpriseAdminExtensionAbility is applicable only to enterprise administrator applications.
- The APIs provided can be used only by device administrator applications.
-
-
-## Scenarios: Listening for the Enable, Disable, Install, and Uninstall Events of a Device Administrator Application
+## Observing Activation/Deactivation of a Device Administrator Application and Installation/Removal of an Application
### Overview
-**onAdminEnabled**: called when the enterprise administrator or employee deploys an MDM application and enables the DeviceAdmin permission for the application. The MDM application can set the initialization policy in the **onAdminEnabled** callback.
+**onAdminEnabled**: When an enterprise administrator or employee deploys an MDM application and activates the device administrator application, this callback is invoked to notify the MDM application that the DeviceAdmin permission is activated. The initialization policy of the MDM application can set in **onAdminEnabled**.
-**onAdminDisabled**: called when the system or employee disables the DeviceAdmin permission to notify the enterprise administrator that the device is no longer managed.
+**onAdminDisabled**: When the device administrator application is deactivated, the callback is invoked to notify the MDM application that the DeviceAdmin permission is deactivated.
-**onBundleAdded**: called to notify the enterprise administrator that the specified MDM application is installed on the device. In enterprise application administration settings, after the enterprise administrator subscribes to application installation and uninstallation events, the MDM application reports the events through the callbacks.
+**onBundleAdded**: The enterprise administrator can subscribe to application installation and uninstallation events. When an application is installed on an enterprise device, the MDM application reports the event in this callback to notify the enterprise administrator.
-**onBundleRemoved**: called to notify the enterprise administrator that the specified MDM application is uninstalled on the device.
+**onBundleRemoved**: When an application is removed from an enterprise device, the MDM application reports the event in this callback to notify the enterprise administrator.
### Available APIs
| Class | API | Description |
-| :------------------------------ | ----------------------------------------- | ---------------------------- |
-| EnterpriseAdminExtensionAbility | onAdminDisabled(): void | Called when the device administrator application is enabled.|
-| EnterpriseAdminExtensionAbility | onBundleAdded(bundleName: string): void | Called when the MDM application is installed. |
-| EnterpriseAdminExtensionAbility | onAdminEnabled(): void | Called when the device administrator application is disabled. |
-| EnterpriseAdminExtensionAbility | onBundleRemoved(bundleName: string): void | Called when the MDM application is uninstalled. |
+| ------------------------------ | ----------------------------------------- | ---------------------------- |
+| EnterpriseAdminExtensionAbility | onAdminEnabled(): void | Called when a device administrator application is activated. |
+| EnterpriseAdminExtensionAbility | onAdminDisabled(): void | Called when a device administrator application is deactivated.|
+| EnterpriseAdminExtensionAbility | onBundleAdded(bundleName: string): void | Called when an application is installed on a device. |
+| EnterpriseAdminExtensionAbility | onBundleRemoved(bundleName: string): void | Called when an application is removed from a device. |
### How to Develop
-To implement **EnterpriseAdminExtensionAbility**, enable the device administrator application and create an **ExtensionAbility** instance from the code directory of the device administrator application. The procedure is as follows:
+To implement EnterpriseAdminExtensionAbility, you need to activate the device administrator application and create **ExtensionAbility** in the code directory of the device administrator application. The procedure is as follows:
1. In the **ets** directory of the target module, right-click and choose **New > Directory** to create a directory named **EnterpriseExtAbility**.
-2. Right-click the **EnterpriseExtAbility** directory and choose **New > TypeScript File** to create a file named **EnterpriseExtAbility.ts**.
-3. Open the **EnterpriseExtAbility.ts** file and import the **EnterpriseAdminExtensionAbility** module. Customize a class that inherits from **EnterpriseAdminExtensionAbility** and add the required callbacks, such as **onAdminEnabled()** and **onAdminDisabled()**, through which the enterprise administrator can receive notification when the device administrator application is enabled or disabled.
+2. Right-click the **EnterpriseExtAbility** directory, and choose **New > TypeScript File** to create a file named **EnterpriseExtAbility.ts**.
+3. Open the **EnterpriseExtAbility.ts** file and import the **EnterpriseAdminExtensionAbility** module. Inherit the **EnterpriseAdminExtensionAbility** module to the custom class and add application notification callbacks, such as **onAdminEnabled()** and **onAdminDisabled()**. When the device administrator application is activated or deactivated, the device administrator can receive notifications.
```ts
import EnterpriseAdminExtensionAbility from '@ohos.enterprise.EnterpriseAdminExtensionAbility';
-
+
export default class EnterpriseAdminAbility extends EnterpriseAdminExtensionAbility {
-
+
onAdminEnabled() {
console.info("onAdminEnabled");
}
-
+
onAdminDisabled() {
console.info("onAdminDisabled");
}
@@ -56,14 +53,14 @@ To implement **EnterpriseAdminExtensionAbility**, enable the device administrato
onBundleAdded(bundleName: string) {
console.info("EnterpriseAdminAbility onBundleAdded bundleName:" + bundleName)
}
-
+
onBundleRemoved(bundleName: string) {
console.info("EnterpriseAdminAbility onBundleRemoved bundleName" + bundleName)
}
};
```
-4. Register **ServiceExtensionAbility** in the [module.json5](../quick-start/module-configuration-file.md) file of the target module. Among the parameters, set **type** to **enterpriseAdmin** and **srcEntrance** to the code path of the current ExtensionAbility.
+4. Register **ServiceExtensionAbility** in the [**module.json5**](../quick-start/module-configuration-file.md) file corresponding to the project module. Set **type** to **enterpriseAdmin** and **srcEntrance** to the path of the ExtensionAbility code.
```ts
"extensionAbilities": [
@@ -78,10 +75,9 @@ To implement **EnterpriseAdminExtensionAbility**, enable the device administrato
## Example
-Use the **subscribeManagedEvent** and **unsubscribeManagedEvent** APIs in the **@ohos.enterprise.adminManager** module to subscribe to and unsubscribe from the application installation and uninstallation event, respectively. After the subscription is successful, the MDM application notifies the enterprise administrator when it is installed or uninstalled on the device.
+Use **subscribeManagedEvent** in the **@ohos.enterprise.adminManager** module to subscribe to application installation and removal events. When an application is installed or removed, the MDM application is notified of the event. Then, the MDM application reports the event in the callback to notify the enterprise administrator. To unsubscribe from events, use **unsubscribeManagedEvent**.
```ts
- @State managedEvents: Array = [0,1]
@State subscribeManagedEventMsg: string = ""
@State unsubscribeManagedEventMsg: string = ""
@@ -108,4 +104,3 @@ Use the **subscribeManagedEvent** and **unsubscribeManagedEvent** APIs in the **
}
```
-
diff --git a/en/application-dev/application-models/explicit-implicit-want-mappings.md b/en/application-dev/application-models/explicit-implicit-want-mappings.md
index 16854efb9236dc6bdc9fbe990c9cbe3581495633..9e748a31795e3afc713e7091067a8164e8a623cc 100644
--- a/en/application-dev/application-models/explicit-implicit-want-mappings.md
+++ b/en/application-dev/application-models/explicit-implicit-want-mappings.md
@@ -62,7 +62,7 @@ The system matches the **action** attribute in the **want** parameter passed by
**Figure 1** Matching rules of action in the want parameter
- 
+ 
### Matching Rules of entities in the want Parameter
@@ -79,19 +79,15 @@ The system matches the **entities** attribute in the **want** parameter passed b
- If **entities** in the passed **want** parameter is specified, and **entities** under **skills** of an ability is specified but does not contain **entities** in the passed **want** parameter, the matching fails.
- Figure 2 Matching rule of entities in the want parameter
+ **Figure 2** Matching rule of entities in the want parameter
- 
+ 
### Matching Rules of uri and type in the want Parameter
When the **uri** and **type** parameters are specified in the **want** parameter to initiate a component startup request, the system traverses the list of installed components and matches the **uris** array under **skills** of the abilities one by one. If one of the **uris** arrays under **skills** matches the **uri** and **type** in the passed **want**, the matching is successful.
-Figure 3 Matching rules when uri and type are specified in the want parameter
-
-
-
There are four combinations of **uri** and **type** settings. The matching rules are as follows:
- Neither **uri** or **type** is specified in the **want** parameter.
@@ -111,11 +107,17 @@ There are four combinations of **uri** and **type** settings. The matching rules
- If the **uris** array under **skills** of an ability is unspecified, the matching fails.
- If the **uris** array under **skills** of an ability contains an element whose [uri is matched](#matching-rules-of-uri) and [type is matched](#matching-rules-of-type), the matching is successful. Otherwise, the matching fails.
+Leftmost URI matching: When only **scheme**, a combination of **scheme** and **host**, or a combination of **scheme**, **host**, and **port** is configured in the **uris** array under **skills** of the ability,
+the matching is successful only if the leftmost URI in the passed **want** parameter matches **scheme**, the combination of **scheme** and **host**, or the combination of **scheme**, **host**, and **port**.
-To simplify the description, **uri** and **type** passed in the **want** parameter are called **w_uri** and **w_type**, respectively; the **uris** array under **skills** of an ability to match is called **s_uris**; each element in the array is called **s_uri**. Matching is performed from top to bottom.
+**Figure 3** Matching rules when uri and type are specified in the want parameter
+ 
+
+
+To simplify the description, **uri** and **type** passed in the **want** parameter are called **w_uri** and **w_type**, respectively; the **uris** array under **skills** of an ability to match is called **s_uris**; each element in the array is called **s_uri**. Matching is performed from top to bottom.
-Figure 4 Matching rules of uri and type in the want parameter
+**Figure 4** Matching rules of uri and type in the want parameter

@@ -128,7 +130,9 @@ To simplify the description, **uri** in the passed **want** parameter is called
- If **host** of **s_uri** is unspecified and **scheme** of **w_uri** and **scheme** of **s_uri** are the same, the matching is successful. Otherwise, the matching fails.
-- If **path**, **pathStartWith**, and **pathRegex** of **s_uri** are unspecified and **w_uri** and **s_uri** are the same, the matching is successful. Otherwise, the matching fails.
+- If **port** of **s_uri** is unspecified and the combination of **scheme** and **host** of **w_uri** is the same as the combination of **scheme** and **host** of **s_uri**, the matching is successful. Otherwise, the matching fails.
+
+- If **path**, **pathStartWith**, and **pathRegex** of **s_uri** are unspecified and the combination of **scheme**, **host**, and **port** of **w_uri** is the same as the combination of **scheme**, **host**, and **port** of **s_uri**, the matching is successful. Otherwise, the matching fails.
- If **path** of **s_uri** is specified and the **full path expressions** of **w_uri** and **s_uri** are the same, the matching is successful. Otherwise, the matching of **pathStartWith** continues.
@@ -139,12 +143,17 @@ To simplify the description, **uri** in the passed **want** parameter is called
> **NOTE**
>
> The **scheme**, **host**, **port**, **path**, **pathStartWith**, and **pathRegex** attributes of **uris** under **skills** of an ability are concatenated. If **path**, **pathStartWith**, and **pathRegex** are declared in sequence, **uris** can be concatenated into the following expressions:
->
+>
> - **Full path expression**: `scheme://host:port/path`
->
+>
> - **Prefix expression**: `scheme://host:port/pathStartWith`
->
+>
> - **Regular expression**: `scheme://host:port/pathRegex`
+>
+> - **Prefix URI expression**: When only **scheme**, a combination of **scheme** and **host**, or a combination of **scheme**, **host**, and **port** is configured in the configuration file, the matching is successful if a URI prefixed with the configuration file is passed in.
+> * `scheme://`
+> * `scheme://host`
+> * `scheme://host:port`
### Matching Rules of type
diff --git a/en/application-dev/application-models/extensionability-overview.md b/en/application-dev/application-models/extensionability-overview.md
index 8b3197383e17810cfee7c044611cf2286f4a987d..a287fe9ac7a590bb8675a0ae0f459463ade4ff1b 100644
--- a/en/application-dev/application-models/extensionability-overview.md
+++ b/en/application-dev/application-models/extensionability-overview.md
@@ -9,7 +9,7 @@ An [ExtensionAbilityType](../reference/apis/js-apis-bundleManager.md#extensionab
- [FormExtensionAbility](../reference/apis/js-apis-app-form-formExtensionAbility.md): ExtensionAbility component of the form type, which provides APIs related to widgets.
-- [WorkSchedulerExtensionAbility](../reference/apis/js-apis-resourceschedule-workScheduler.md): ExtensionAbility component of the work_scheduler type, which provides callbacks for Work Scheduler tasks.
+- [WorkSchedulerExtensionAbility](../reference/apis/js-apis-WorkSchedulerExtensionAbility.md): ExtensionAbility component of the work_scheduler type, which provides callbacks for Work Scheduler tasks.
- [InputMethodExtensionAbility](../reference/apis/js-apis-inputmethod.md): ExtensionAbility component of the input_method type, which provides an input method framework that can be used to hide the keyboard, obtain the list of installed input methods, display the dialog box for input method selection, and more.
@@ -21,10 +21,16 @@ An [ExtensionAbilityType](../reference/apis/js-apis-bundleManager.md#extensionab
- [StaticSubscriberExtensionAbility](../reference/apis/js-apis-application-staticSubscriberExtensionAbility.md): ExtensionAbility component of the static_subscriber type, which provides APIs for static broadcast.
-- [WindowExtensionAbility](../reference/apis/js-apis-application-windowExtensionAbility.md): ExtensionAbility component of the window type, which allows system applications to display UIs of other applications.
+- [WindowExtensionAbility](../reference/apis/js-apis-application-windowExtensionAbility.md): ExtensionAbility component of the window type, which allows a system application to be embedded in and displayed over another application.
- [EnterpriseAdminExtensionAbility](../reference/apis/js-apis-EnterpriseAdminExtensionAbility.md): ExtensionAbility component of the enterprise_admin type, which provides APIs for processing enterprise management events, such as application installation events on devices and events indicating too many incorrect screen-lock password attempts.
+> **NOTE**
+> 1. Third-party applications cannot implement ServiceExtensionAbility, DataShareExtensionAbility, StaticSubscriberExtensionAbility, or WindowExtensionAbility.
+>
+> 2. To implement transaction processing in the background for a third-party application, use background tasks rather than ServiceExtensionAbility. For details, see [Background Task](../task-management/background-task-overview.md).
+>
+> 3. Third-party applications can use other types of ExtensionAbility components that have been defined.
## Using ExtensionAbility of the Specified Type
diff --git a/en/application-dev/application-models/figures/ability-startup-with-implicit-want1.png b/en/application-dev/application-models/figures/ability-startup-with-implicit-want1.png
new file mode 100644
index 0000000000000000000000000000000000000000..3f871f4816dfcf60a5c30e39b6d0ead2f8eb711e
Binary files /dev/null and b/en/application-dev/application-models/figures/ability-startup-with-implicit-want1.png differ
diff --git a/en/application-dev/application-models/figures/ability-startup-with-implicit-want2.png b/en/application-dev/application-models/figures/ability-startup-with-implicit-want2.png
new file mode 100644
index 0000000000000000000000000000000000000000..4f1656a3c20e472e260e8e125c42b47c11a35abb
Binary files /dev/null and b/en/application-dev/application-models/figures/ability-startup-with-implicit-want2.png differ
diff --git a/en/application-dev/application-models/figures/common-event.png b/en/application-dev/application-models/figures/common-event.png
index 24b51ff8718ae504ba69c1e12656d4daad797a62..fe2591d12d5f2c570d7be942d33ec330a6eb6c98 100644
Binary files a/en/application-dev/application-models/figures/common-event.png and b/en/application-dev/application-models/figures/common-event.png differ
diff --git a/en/application-dev/application-models/figures/mission-chain3.png b/en/application-dev/application-models/figures/mission-chain3.png
index e02c135ad4a90f99bb65bdccd821d29990b9536e..0357874ea633a490da800ef5baa2e70d53ce6a2d 100644
Binary files a/en/application-dev/application-models/figures/mission-chain3.png and b/en/application-dev/application-models/figures/mission-chain3.png differ
diff --git a/en/application-dev/application-models/figures/mission-list-recent.png b/en/application-dev/application-models/figures/mission-list-recent.png
new file mode 100644
index 0000000000000000000000000000000000000000..bfc35532ad4907fd3a1bfcb61110ed393ea19d1c
Binary files /dev/null and b/en/application-dev/application-models/figures/mission-list-recent.png differ
diff --git a/en/application-dev/application-models/figures/mission-set-task-snapshot-icon.png b/en/application-dev/application-models/figures/mission-set-task-snapshot-icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..9d1ba2503f4e1a5d3b2aafdd93923c3f6c411998
Binary files /dev/null and b/en/application-dev/application-models/figures/mission-set-task-snapshot-icon.png differ
diff --git a/en/application-dev/application-models/figures/mission-set-task-snapshot-label.png b/en/application-dev/application-models/figures/mission-set-task-snapshot-label.png
new file mode 100644
index 0000000000000000000000000000000000000000..c8348685cc0fd521186aa10e8d04495422fc0206
Binary files /dev/null and b/en/application-dev/application-models/figures/mission-set-task-snapshot-label.png differ
diff --git a/en/application-dev/application-models/figures/stage-want1.png b/en/application-dev/application-models/figures/stage-want1.png
deleted file mode 100644
index 558f0a8588d7785eaad1402e68d6ba60c3118f27..0000000000000000000000000000000000000000
Binary files a/en/application-dev/application-models/figures/stage-want1.png and /dev/null differ
diff --git a/en/application-dev/application-models/figures/stage-want2.png b/en/application-dev/application-models/figures/stage-want2.png
deleted file mode 100644
index 72829adade52ee11419d726f19e218ec4de15220..0000000000000000000000000000000000000000
Binary files a/en/application-dev/application-models/figures/stage-want2.png and /dev/null differ
diff --git a/en/application-dev/application-models/figures/start-uiability-floating-window.png b/en/application-dev/application-models/figures/start-uiability-floating-window.png
new file mode 100644
index 0000000000000000000000000000000000000000..8626c3704f3e60c8efb3d6b6ea0468a7c2958a4f
Binary files /dev/null and b/en/application-dev/application-models/figures/start-uiability-floating-window.png differ
diff --git a/en/application-dev/application-models/figures/uiability-intra-device-interaction.png b/en/application-dev/application-models/figures/uiability-intra-device-interaction.png
index 92292f2c6ef4c9cbd06da2a523f27b571a957e2b..344cf05e96c539ca73fdb9282625a1d1cb8584e7 100644
Binary files a/en/application-dev/application-models/figures/uiability-intra-device-interaction.png and b/en/application-dev/application-models/figures/uiability-intra-device-interaction.png differ
diff --git a/en/application-dev/application-models/figures/want-uri-type1.png b/en/application-dev/application-models/figures/want-uri-type1.png
index e0fe40d1a3cd40b72379bd947aaf2e3977021b32..ed53694a9608e8529c5e4633fca42b041bc7ab76 100644
Binary files a/en/application-dev/application-models/figures/want-uri-type1.png and b/en/application-dev/application-models/figures/want-uri-type1.png differ
diff --git a/en/application-dev/application-models/hop-cross-device-migration.md b/en/application-dev/application-models/hop-cross-device-migration.md
index 6d30435a819da49855cf9ae818bac419a1c0b614..c51e82e15f4e14f4b42b25e656543a01d84406fb 100644
--- a/en/application-dev/application-models/hop-cross-device-migration.md
+++ b/en/application-dev/application-models/hop-cross-device-migration.md
@@ -1,9 +1,9 @@
-# Cross-Device Migration
+# Cross-Device Migration (for System Applications Only)
## When to Use
-Cross-device migration is available only for system applications. The main task is to migrate the current task (including the page control status) of an application to the target device so that the task can continue on it. Cross-device migration supports the following functionalities:
+The main task of cross-device migration is to migrate the current task (including the page control status) of an application to the target device so that the task can continue on it. Cross-device migration supports the following functionalities:
- Storage and restoration of custom data
@@ -47,25 +47,16 @@ The table below describes the main APIs used for cross-device migration. For det
## How to Develop
-1. Configure the data synchronization permission in the **module.json5** file. The sample code is as follows:
-
- ```json
- {
- "module": {
- "requestPermissions":[
- {
- "name" : "ohos.permission.DISTRIBUTED_DATASYNC",
- }
- ]
- }
- }
- ```
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-2. Configure the fields related to cross-device migration in the configuration file.
- - Configure the application to support migration.
-
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
- Set the **continuable** field in the **module.json5** file to **true**. The default value is **false**. If this parameter is set to **false**, the application cannot be continued on the target device.
+3. Configure the fields related to cross-device migration in the configuration file.
+
+ Configure the application to support migration.
+ Set the **continuable** field in the **module.json5** file to **true**. The default value is **false**. If this parameter is set to **false**, the application cannot be continued on the target device.
+
+
```json
{
"module": {
@@ -80,47 +71,31 @@ The table below describes the main APIs used for cross-device migration. For det
}
```
- - Configure the application launch type. For details, see [UIAbility Component Launch Type](uiability-launch-type.md).
-
-3. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows:
-
- ```ts
- requestPermission() {
- let context = this.context
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC']
- context.requestPermissionsFromUser(permissions).then((data) => {
- console.info("Succeed to request permission from user with data: "+ JSON.stringify(data))
- }).catch((error) => {
- console.info("Failed to request permission from user with error: "+ JSON.stringify(error))
- })
- }
- ```
+ Configure the application launch type. For details, see [UIAbility Component Launch Type](uiability-launch-type.md).
4. Implement [onContinue()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncontinue) in the UIAbility of the initiator.
-
[onContinue()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityoncontinue) is called on the initiator. You can save the data in this method to implement application compatibility check and migration decision.
-
- Saving migrated data: You can save the data to be migrated in key-value pairs in **wantParam**.
- Checking application compatibility: You can obtain the version number of the target application from **wantParam** and check the compatibility between the target application and the current application.
- Making a migration decision: You can determine whether to support the migration based on the return value of **onContinue()**. For details about the return value, see [Available APIs](#available-apis).
- The sample code is as follows:
-
- ```ts
- import UIAbility from '@ohos.app.ability.UIAbility';
- import AbilityConstant from '@ohos.app.ability.AbilityConstant';
-
- onContinue(wantParam : {[key: string]: any}) {
- console.info(`onContinue version = ${wantParam.version}, targetDevice: ${wantParam.targetDevice}`)
- let workInput = AppStorage.Get('ContinueWork');
- // Set the user input data into wantParam.
- wantParam["work"] = workInput // set user input data into want params
- console.info(`onContinue input = ${wantParam["input"]}`);
- return AbilityConstant.OnContinueResult.AGREE
- }
- ```
+ The sample code is as follows:
+
+ ```ts
+ import UIAbility from '@ohos.app.ability.UIAbility';
+ import AbilityConstant from '@ohos.app.ability.AbilityConstant';
+
+ onContinue(wantParam : {[key: string]: any}) {
+ console.info(`onContinue version = ${wantParam.version}, targetDevice: ${wantParam.targetDevice}`)
+ let workInput = AppStorage.Get('ContinueWork');
+ // Set the user input data into wantParam.
+ wantParam["work"] = workInput // set user input data into want params
+ console.info(`onContinue input = ${wantParam["input"]}`);
+ return AbilityConstant.OnContinueResult.AGREE
+ }
+ ```
5. Implement **onCreate()** and **onNewWant()** in the UIAbility of the target application to implement data restoration.
- Implementation example of **onCreate** in the multi-instance scenario
diff --git a/en/application-dev/application-models/hop-multi-device-collaboration.md b/en/application-dev/application-models/hop-multi-device-collaboration.md
index fe22c3b33db46b5a353295582a5cc6a27f690d20..adbaef7a927f61bc04b7b9066264f8367ef90d76 100644
--- a/en/application-dev/application-models/hop-multi-device-collaboration.md
+++ b/en/application-dev/application-models/hop-multi-device-collaboration.md
@@ -1,9 +1,9 @@
-# Multi-device Collaboration
+# Multi-device Collaboration (for System Applications Only)
## When to Use
-Multi-device coordination is available only for system applications. It involves the following scenarios:
+Multi-device coordination involves the following scenarios:
- [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned)
@@ -11,7 +11,7 @@ Multi-device coordination is available only for system applications. It involves
- [Connecting to ServiceExtensionAbility Across Devices](#connecting-to-serviceextensionability-across-devices)
-- [Using Cross-Device Ability Call](#using-cross-device-ability-call)
+- [Using Cross-Device Call](#using-cross-device-call)
## Multi-Device Collaboration Process
@@ -47,24 +47,12 @@ On device A, touch the **Start** button provided by the initiator application to
### How to Develop
-1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows:
-
- ```ts
- requestPermission() {
- let context = this.context;
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'];
- context.requestPermissionsFromUser(permissions).then((data) => {
- console.info("Succeed to request permission from user with data: "+ JSON.stringify(data));
- }).catch((error) => {
- console.info("Failed to request permission from user with error: "+ JSON.stringify(error));
- })
- }
- ```
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
3. Obtain the device ID of the target device.
-
+
```ts
import deviceManager from '@ohos.distributedHardware.deviceManager';
@@ -94,7 +82,7 @@ On device A, touch the **Start** button provided by the initiator application to
```
4. Set the target component parameters, and call [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start UIAbility or ServiceExtensionAbility.
-
+
```ts
let want = {
deviceId: getRemoteDeviceId(),
@@ -102,7 +90,7 @@ On device A, touch the **Start** button provided by the initiator application to
abilityName: 'FuncAbility',
moduleName: 'module1', // moduleName is optional.
}
- // context is the ability-level context of the initiator UIAbility.
+ // context is the AbilityContext of the initiator UIAbility.
this.context.startAbility(want).then(() => {
// ...
}).catch((err) => {
@@ -118,35 +106,23 @@ On device A, touch the **Start** button provided by the initiator application to
### Available APIs
-**Table 2** APIs for starting an ability across devices and returning the result data
+**Table 2** APIs for starting a UIAbility across devices and returning the result data
| API| Description|
| -------- | -------- |
| startAbilityForResult(want: Want, callback: AsyncCallback<AbilityResult>): void; | Starts a UIAbility. This API uses an asynchronous callback to return the result when the UIAbility is terminated.|
-| terminateSelfWithResult(parameter: AbilityResult, callback: AsyncCallback<void>): void;| Terminates this UIAbility. This API uses an asynchronous callback to return the ability result information. It is used together with **startAbilityForResult**.|
-| terminateSelfWithResult(parameter: AbilityResult): Promise<void>; | Terminates this UIAbility. This API uses a promise to return the ability result information. It is used together with **startAbilityForResult**.|
+| terminateSelfWithResult(parameter: AbilityResult, callback: AsyncCallback<void>): void;| Terminates this UIAbility. This API uses an asynchronous callback to return the result information. It is used together with **startAbilityForResult**.|
+| terminateSelfWithResult(parameter: AbilityResult): Promise<void>; | Terminates this UIAbility. This API uses a promise to return the result information. It is used together with **startAbilityForResult**.|
### How to Develop
-1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows:
-
- ```ts
- requestPermission() {
- let context = this.context;
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'];
- context.requestPermissionsFromUser(permissions).then((data) => {
- console.info("Succeed to request permission from user with data: "+ JSON.stringify(data));
- }).catch((error) => {
- console.info("Failed to request permission from user with error: "+ JSON.stringify(error));
- })
- }
- ```
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
3. Set the target component parameters on the initiator, and call **startAbilityForResult()** to start the target UIAbility. **data** in the asynchronous callback is used to receive the information returned by the target UIAbility to the initiator UIAbility after the target UIAbility terminates itself. For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned).
-
+
```ts
let want = {
deviceId: getRemoteDeviceId(),
@@ -154,7 +130,7 @@ On device A, touch the **Start** button provided by the initiator application to
abilityName: 'FuncAbility',
moduleName: 'module1', // moduleName is optional.
}
- // context is the ability-level context of the initiator UIAbility.
+ // context is the AbilityContext of the initiator UIAbility.
this.context.startAbilityForResult(want).then((data) => {
// ...
}).catch((err) => {
@@ -163,7 +139,7 @@ On device A, touch the **Start** button provided by the initiator application to
```
4. After the UIAbility task at the target device is complete, call **terminateSelfWithResult()** to return the data to the initiator UIAbility.
-
+
```ts
const RESULT_CODE: number = 1001;
let abilityResult = {
@@ -174,20 +150,20 @@ On device A, touch the **Start** button provided by the initiator application to
moduleName: 'module1',
},
}
- // context is the ability-level context of the target UIAbility.
+ // context is the AbilityContext of the target UIAbility.
this.context.terminateSelfWithResult(abilityResult, (err) => {
// ...
});
```
5. The initiator UIAbility receives the information returned by the target UIAbility and processes the information.
-
+
```ts
const RESULT_CODE: number = 1001;
// ...
- // context is the ability-level context of the initiator UIAbility.
+ // context is the UIAbilityContext of the initiator UIAbility.
this.context.startAbilityForResult(want).then((data) => {
if (data?.resultCode === RESULT_CODE) {
// Parse the information returned by the target UIAbility.
@@ -218,21 +194,9 @@ A system application can connect to a service on another device by calling [conn
### How to Develop
-1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows:
-
- ```ts
- requestPermission() {
- let context = this.context;
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'];
- context.requestPermissionsFromUser(permissions).then((data) => {
- console.info("Succeed to request permission from user with data: "+ JSON.stringify(data));
- }).catch((error) => {
- console.info("Failed to request permission from user with error: "+ JSON.stringify(error));
- })
- }
- ```
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
3. (Optional) [Implement a background service](serviceextensionability.md#implementing-a-background-service). Perform this operation only if no background service is available.
@@ -292,7 +256,7 @@ A system application can connect to a service on another device by calling [conn
For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned).
5. Disconnect the connection. Use **disconnectServiceExtensionAbility()** to disconnect from the background service.
-
+
```ts
let connectionId = 1 // ID returned when the service is connected through connectServiceExtensionAbility.
this.context.disconnectServiceExtensionAbility(connectionId).then((data) => {
@@ -303,111 +267,97 @@ A system application can connect to a service on another device by calling [conn
```
-## Using Cross-Device Ability Call
+## Using Cross-Device Call
-The basic principle of cross-device ability call is the same as that of intra-device ability call. For details, see [Using Ability Call to Implement UIAbility Interaction](uiability-intra-device-interaction.md#using-ability-call-to-implement-uiability-interaction).
+The basic principle of cross-device call is the same as that of intra-device call. For details, see [Using Call to Implement UIAbility Interaction (for System Applications Only)](uiability-intra-device-interaction.md#using-call-to-implement-uiability-interaction-for-system-applications-only).
-The following describes how to implement multi-device collaboration through cross-device ability call.
+The following describes how to implement multi-device collaboration through cross-device call.
### Available APIs
-**Table 4** Ability call APIs
+**Table 4** Call APIs
| API| Description|
| -------- | -------- |
| startAbilityByCall(want: Want): Promise<Caller>; | Starts a UIAbility in the foreground or background and obtains the caller object for communicating with the UIAbility.|
-| on(method: string, callback: CalleeCallBack): void | Callback invoked when the callee ability registers a method.|
-| off(method: string): void | Callback invoked when the callee ability deregisters a method.|
-| call(method: string, data: rpc.Sequenceable): Promise<void> | Sends agreed sequenceable data to the callee ability.|
-| callWithResult(method: string, data: rpc.Sequenceable): Promise<rpc.MessageParcel> | Sends agreed sequenceable data to the callee ability and obtains the agreed sequenceable data returned by the callee ability.|
+| on(method: string, callback: CalleeCallBack): void | Callback invoked when the CalleeAbility registers a method.|
+| off(method: string): void | Callback invoked when the CalleeAbility deregisters a method.|
+| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the CalleeAbility.|
+| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence>| Sends agreed parcelable data to the CalleeAbility and obtains the agreed parcelable data returned by the CalleeAbility.|
| release(): void | Releases the caller object.|
-| on(type: "release", callback: OnReleaseCallback): void | Callback invoked when the caller object is released.|
+| on(type: "release", callback: OnReleaseCallback): void | Callback invoked when the caller object is released.|
### How to Develop
-1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-2. Request the data synchronization permission. The sample code for displaying a dialog box to request the permission is as follows:
-
- ```ts
- requestPermission() {
- let context = this.context;
- let permissions: Array = ['ohos.permission.DISTRIBUTED_DATASYNC'];
- context.requestPermissionsFromUser(permissions).then((data) => {
- console.info("Succeed to request permission from user with data: "+ JSON.stringify(data));
- }).catch((error) => {
- console.info("Failed to request permission from user with error: "+ JSON.stringify(error));
- })
- }
- ```
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-3. Create the callee ability.
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
+
+3. Create the CalleeAbility.
- For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener.
+ For the CalleeAbility, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener.
- 1. Configure the launch type of the UIAbility.
+ 1. Configure the launch type of the UIAbility.
- Set **launchType** of the callee ability to **singleton** in the **module.json5** file.
+ Set **launchType** of the CalleeAbility to **singleton** in the **module.json5** file.
- | JSON Field| Description|
- | -------- | -------- |
- | "launchType"| Ability launch type. Set this parameter to **singleton**.|
+ | JSON Field| Description|
+ | -------- | -------- |
+ | "launchType"| UIAbility launch type. Set this parameter to **singleton**.|
- An example of the UIAbility configuration is as follows:
+ An example of the UIAbility configuration is as follows:
-
- ```json
- "abilities":[{
- "name": ".CalleeAbility",
- "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts",
- "launchType": "singleton",
- "description": "$string:CalleeAbility_desc",
- "icon": "$media:icon",
- "label": "$string:CalleeAbility_label",
- "visible": true
- }]
- ```
+
+ ```json
+ "abilities":[{
+ "name": ".CalleeAbility",
+ "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts",
+ "launchType": "singleton",
+ "description": "$string:CalleeAbility_desc",
+ "icon": "$media:icon",
+ "label": "$string:CalleeAbility_label",
+ "visible": true
+ }]
+ ```
+ 2. Import the **UIAbility** module.
+
+ ```ts
+ import Ability from '@ohos.app.ability.UIAbility';
+ ```
+ 3. Define the agreed parcelable data.
- 2. Import the **UIAbility** module.
-
- ```ts
- import Ability from '@ohos.app.ability.UIAbility';
- ```
-
- 3. Define the agreed sequenceable data.
-
- The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string.
-
- ```ts
- export default class MySequenceable {
- num: number = 0;
- str: string = "";
-
- constructor(num, string) {
- this.num = num;
- this.str = string;
- }
-
- marshalling(messageParcel) {
- messageParcel.writeInt(this.num);
- messageParcel.writeString(this.str);
- return true;
- }
-
- unmarshalling(messageParcel) {
- this.num = messageParcel.readInt();
- this.str = messageParcel.readString();
- return true;
- }
- }
- ```
+ The data formats sent and received by the CallerAbility and CalleeAbility must be consistent. In the following example, the data formats are number and string.
- 4. Implement **Callee.on** and **Callee.off**.
-
- In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate()** of the ability and deregistered in **onDestroy()**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements.
-
+
+ ```ts
+ export default class MyParcelable {
+ num: number = 0;
+ str: string = "";
+
+ constructor(num, string) {
+ this.num = num;
+ this.str = string;
+ }
+
+ marshalling(messageSequence) {
+ messageSequence.writeInt(this.num);
+ messageSequence.writeString(this.str);
+ return true;
+ }
+
+ unmarshalling(messageSequence) {
+ this.num = messageSequence.readInt();
+ this.str = messageSequence.readString();
+ return true;
+ }
+ }
+ ```
+ 4. Implement **Callee.on** and **Callee.off**.
+
+ In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate()** of the UIAbility and deregistered in **onDestroy()**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements.
+
```ts
const TAG: string = '[CalleeAbility]';
const MSG_SEND_METHOD: string = 'CallSendMsg';
@@ -415,14 +365,14 @@ The following describes how to implement multi-device collaboration through cros
function sendMsgCallback(data) {
console.info('CalleeSortFunc called');
- // Obtain the sequenceable data sent by the caller ability.
- let receivedData = new MySequenceable(0, '');
- data.readSequenceable(receivedData);
+ // Obtain the parcelable data sent by the CallerAbility.
+ let receivedData = new MyParcelable(0, '');
+ data.readParcelable(receivedData);
console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`);
// Process the data.
- // Return the sequenceable data result to the caller ability.
- return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`);
+ // Return the parcelable data result to the CallerAbility.
+ return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`);
}
export default class CalleeAbility extends Ability {
@@ -436,25 +386,24 @@ The following describes how to implement multi-device collaboration through cros
onDestroy() {
try {
- this.callee.off(MSG_SEND_METHOD);
+ this.callee.off(MSG_SEND_METHOD);
} catch (error) {
console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`);
}
}
}
```
-
-4. Obtain the caller object and access the callee ability.
+
+4. Obtain the caller object and access the CalleeAbility.
1. Import the **UIAbility** module.
```ts
import Ability from '@ohos.app.ability.UIAbility';
```
-
2. Obtain the caller object.
-
- The **context** attribute of the ability implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the caller object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements.
-
+
+ The **context** attribute of the UIAbility implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the UIAbility, uses **startAbilityByCall** to start the CalleeAbility, obtain the caller object, and register the **onRelease** and **onRemoteStateChange** listeners of the CallerAbility. You need to implement processing based on service requirements.
+
```ts
async onButtonGetRemoteCaller() {
@@ -469,11 +418,19 @@ The following describes how to implement multi-device collaboration through cros
if (data != null) {
caller = data;
console.info('get remote caller success');
- // Register the onRelease() listener of the caller ability.
+ // Register the onRelease listener of the CallerAbility.
caller.onRelease((msg) => {
console.info(`remote caller onRelease is called ${msg}`);
})
console.info('remote caller register OnRelease succeed');
+ // Register the onRemoteStateChange listener of the CallerAbility.
+ try {
+ caller.onRemoteStateChange((str) => {
+ console.log('Remote state changed ' + str);
+ });
+ } catch (error) {
+ console.log('Caller.onRemoteStateChange catch error, error.code: ${JSON.stringify(error.code)}, error.message: ${JSON.stringify(error.message)}');
+ }
}
}).catch((error) => {
console.error(`get remote caller failed with ${error}`);
@@ -483,21 +440,21 @@ The following describes how to implement multi-device collaboration through cros
For details about how to implement **getRemoteDeviceId()**, see [Starting UIAbility and ServiceExtensionAbility Across Devices (No Data Returned)](#starting-uiability-and-serviceextensionability-across-devices-no-data-returned).
-5. Sends agreed sequenceable data to the callee ability.
- 1. The sequenceable data can be sent to the callee ability with or without a return value. The method and sequenceable data must be consistent with those of the callee ability. The following example describes how to send data to the callee ability.
+5. Sends agreed parcelable data to the CalleeAbility.
+ 1. The parcelable data can be sent to the CalleeAbility with or without a return value. The method and parcelable data must be consistent with those of the CalleeAbility. The following example describes how to send data to the CalleeAbility.
```ts
const MSG_SEND_METHOD: string = 'CallSendMsg';
async onButtonCall() {
try {
- let msg = new MySequenceable(1, 'origin_Msg');
+ let msg = new MyParcelable(1, 'origin_Msg');
await this.caller.call(MSG_SEND_METHOD, msg);
} catch (error) {
console.info(`caller call failed with ${error}`);
}
}
```
- 2. In the following, **CallWithResult** is used to send data **originMsg** to the callee ability and assign the data processed by the **CallSendMsg** method to **backMsg**.
+ 2. In the following, **CallWithResult** is used to send data **originMsg** to the CalleeAbility and assign the data processed by the **CallSendMsg** method to **backMsg**.
```ts
const MSG_SEND_METHOD: string = 'CallSendMsg';
@@ -505,12 +462,12 @@ The following describes how to implement multi-device collaboration through cros
backMsg: string = '';
async onButtonCallWithResult(originMsg, backMsg) {
try {
- let msg = new MySequenceable(1, originMsg);
+ let msg = new MyParcelable(1, originMsg);
const data = await this.caller.callWithResult(MSG_SEND_METHOD, msg);
console.info('caller callWithResult succeed');
- let result = new MySequenceable(0, '');
- data.readSequenceable(result);
+ let result = new MyParcelable(0, '');
+ data.readParcelable(result);
backMsg(result.str);
console.info(`caller result is [${result.num}, ${result.str}]`);
} catch (error) {
diff --git a/en/application-dev/application-models/inputmethodextentionability.md b/en/application-dev/application-models/inputmethodextentionability.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a7856f402bf30b1610521e3cf05dda7145c3509
--- /dev/null
+++ b/en/application-dev/application-models/inputmethodextentionability.md
@@ -0,0 +1,366 @@
+# InputMethodExtensionAbility Development
+
+[InputMethodExtensionAbility](../reference/apis/js-apis-inputmethod-extension-ability.md) is an ExtensionAbility component of the inputMethod type that provides extension capabilities for the input method framework.
+
+InputMethodExtensionAbility can be started or connected by other application components to process transactions in the background based on the request of the caller.
+
+
+InputMethodExtensionAbility provides related capabilities through the [InputMethodExtensionContext](../reference/apis/js-apis-inputmethod-extension-context.md).
+
+
+## Implementing an Input Method Application
+
+InputMethodExtensionAbility provides the **onCreate()** and **onDestory()** callbacks, as described below. Override them as required.
+
+- **onCreate**
+
+ This callback is triggered when a service is created for the first time. You can perform initialization operations, for example, registering a common event listener.
+
+ > **NOTE**
+ >
+ > If a service has been created, starting it again does not trigger the **onCreate()** callback.
+
+- **onDestroy**
+
+ This callback is triggered when the service is no longer used and the instance is ready for destruction. You can clear resources in this callback, for example, deregister the listener.
+
+
+## How to Develop
+
+To implement an input method application, manually create an InputMethodExtensionAbility component in DevEco Studio. The procedure is as follows:
+
+In the **ets** directory of the target module, right-click and choose **New** > **Extention Ability** > **InputMethod** to a minimum template of InputMethodExtensionAbility.
+
+> **NOTE**
+>
+> When compiling the input method application, use the signature at the system_basic level. Otherwise, the application will not be able to start the keyboard.
+
+The minimum template implements an input method application with the most basic features, such as starting the keyboard, entering text, and deleting input. You can diversify the feature set of the application by, for example, adding the feature to hide the keyboard.
+
+The minimum template contains four files: **KeyboardController.ts**, **InputMethodService.ts**, **Index.ets**, and **KeyboardKeyData.ts**. The file directory is as follows:
+
+```
+/src/main/
+├── ets/inputmethodextability
+│ └──model/KeyboardController.ts # Shows the keyboard.
+│ └──InputMethodService.ts # Customizes a class that inherits from InputMethodExtensionAbility and add the required lifecycle callbacks.
+│ └──pages
+│ └── Index.ets # Draws the keyboard and adds the input and deletion features.
+│ └── KeyboardKeyData.ts # Defines keyboard attributes.
+├── resources/base/profile/main_pages.json
+```
+
+## File Introduction
+
+1. **InputMethodService.ts** file:
+
+ In this file, add the dependency package for importing InputMethodExtensionAbility. Customize a class that inherits from InputMethodExtensionAbility and add the required lifecycle callbacks.
+
+ ```ts
+ import InputMethodExtensionAbility from '@ohos.InputMethodExtensionAbility';
+ import { KeyboardController } from './model/KeyboardController'
+
+ export default class InputDemoService extends InputMethodExtensionAbility {
+ private keyboardController: KeyboardController;
+
+ onCreate(want) {
+ this.keyboardController = new KeyboardController(this.context);
+ this.keyboardController.onCreate(); // Initialize the window and register an event listener for the input method framework.
+ }
+
+ onDestroy() {
+ console.log("onDestroy.");
+ this.context.destroy();
+ }
+ }
+ ```
+
+2. **KeyboardController.ts** file:
+
+ ```ts
+ import inputMethodEngine from '@ohos.inputMethodEngine';
+ import display from '@ohos.display';
+ import windowManager from '@ohos.window';
+
+ // Call the getInputMethodAbility API to obtain an instance, and then call the other APIs of the input method framework based on the instance.
+ globalThis.inputAbility = inputMethodEngine.getInputMethodAbility();
+
+ export class KeyboardController {
+ mContext; // Save the context attribute in InputMethodExtensionAbility.
+ WINDOW_TYPE_INPUT_METHOD_FLOAT = 2105; // Define the window type. The value 2105 indicates the input method window type, which is used to create an input method application window.
+ windowName = 'inputApp';
+ private windowHeight: number = 0;
+ private windowWidth: number = 0;
+ private nonBarPosition: number = 0;
+ private isWindowShowing: boolean = false;
+
+ constructor(context) {
+ this.mContext = context;
+ }
+
+ public onCreate(): void
+ {
+ this.initWindow(); // Initialize the window.
+ this.registerListener(); // Register an event listener for the input method framework.
+ }
+
+ public onDestroy(): void // Destroy the instance.
+ {
+ this.unRegisterListener(); // Deregister the event listener.
+ let win = windowManager.findWindow(this.windowName);
+ win.destroyWindow(); // Destroy the window.
+ this.mContext.terminateSelf(); // Terminate the InputMethodExtensionAbility service.
+ }
+
+ private initWindow(): void // Initialize the window.
+ {
+ let dis = display.getDefaultDisplaySync();
+ let dWidth = dis.width;
+ let dHeight = dis.height;
+ let keyHeightRate = 0.47;
+ let keyHeight = dHeight * keyHeightRate;
+ this.windowWidth = dWidth;
+ this.windowHeight = keyHeight;
+ this.nonBarPosition = dHeight - keyHeight;
+
+ let config = {
+ name: this.windowName,
+ windowType: this.WINDOW_TYPE_INPUT_METHOD_FLOAT,
+ ctx: this.mContext
+ }
+ windowManager.createWindow(config).then((win) => { // Create a window of the specified type.
+ win.resize(dWidth, keyHeight).then(() => {
+ win.moveWindowTo(0, this.nonBarPosition).then(() => {
+ win.setUIContent('pages/InputMethodExtAbility/Index').then(() => {
+ });
+ });
+ });
+ });
+ }
+
+ private registerListener(): void
+ {
+ this.registerInputListener(); // Register an event listener for the input method framework service.
+ globalThis.inputAbility.on('keyboardShow', () => {// Register an event listener for the keyboard .
+ if (this.isWindowShowing) {
+ return;
+ }
+ this.isWindowShowing = true;
+ this.showHighWindow(); // Show the window.
+ });
+ ...
+ // Register a listener for keyboard hiding.
+ }
+
+ private registerInputListener() { // Register a listener for the enabling and disabling events of the input method framework service.
+ globalThis.inputAbility.on('inputStart', (kbController, textInputClient) => {
+ globalThis.textInputClient = textInputClient; // This is an input method client instance, based on which you can call the functional APIs that the input method framework provides for the input method application.
+ globalThis.keyboardController = kbController;
+ })
+ globalThis.inputAbility.on('inputStop', (imeId) => {
+ if (imeId == "Bundle name/Ability name") {
+ this.onDestroy();
+ }
+ });
+ }
+
+ private unRegisterListener(): void
+ {
+ globalThis.inputAbility.off('inputStart');
+ globalThis.inputAbility.off('inputStop', () => {});
+ globalThis.inputAbility.off('keyboardShow');
+ }
+
+ private showHighWindow() {
+ let win = windowManager.findWindow(this.windowName)
+ win.resize(this.windowWidth, this.windowHeight).then(() => {
+ win.moveWindowTo(0, this.nonBarPosition).then(() => {
+ win.showWindow().then(() => {
+ this.isWindowShowing = false;
+ })
+ })
+ })
+ }
+ }
+ ```
+
+3. **KeyboardKeyData.ts** file:
+
+ In this file you can define the content displayed on the soft keyboard.
+
+ ```ts
+ export interface sourceListType {
+ content: string,
+ }
+
+ export let numberSourceListData: sourceListType[] = [
+ {
+ content: '1'
+ },
+ {
+ content: '2'
+ },
+ {
+ content: '3'
+ },
+ {
+ content: '4'
+ },
+ {
+ content: '5'
+ },
+ {
+ content: '6'
+ },
+ {
+ content: '7'
+ },
+ {
+ content: '8'
+ },
+ {
+ content: '9'
+ },
+ {
+ content: '0'
+ }
+ ]
+ ```
+
+4. **Index.ets** file:
+
+ This file describes the functions of keys. For example, the number keys print numbers in the text box, and the delete key deletes what's entered.
+
+ Add the path to this file to the **src** field in the **resources/base/profile/main_pages.json** file.
+
+ ```ets
+ import { numberSourceListData, sourceListType } from './keyboardKeyData'
+
+ @Component
+ struct keyItem {
+ private keyValue: sourceListType
+ @State keyBgc: string = "#fff"
+ @State keyFontColor: string = "#000"
+
+ build() {
+ Column() {
+ Flex({ direction: FlexDirection.Column,
+ alignItems: ItemAlign.Center, justifyContent: FlexAlign.Center }) {
+ Text(this.keyValue.content).fontSize(20).fontColor(this.keyFontColor)
+ }
+ }
+ .backgroundColor(this.keyBgc)
+ .borderRadius(6)
+ .width("8%")
+ .height("65%")
+ .onTouch((event: TouchEvent) => {
+ if (event.type === TouchType.Down) {
+ globalThis.textInputClient.insertText(this.keyValue.content);
+ }
+ })
+ }
+ }
+
+ // Component used for deletion.
+ @Component
+ export struct deleteItem {
+ @State keyBgc: string = "#fff"
+ @State keyFontColor: string = "#000"
+
+ build() {
+ Column() {
+ Flex({ direction: FlexDirection.Column,
+ alignItems: ItemAlign.Center, justifyContent: FlexAlign.Center }) {
+ Text("Delete").fontSize(20).fontColor(this.keyFontColor)
+ }
+ }
+ .backgroundColor(this.keyBgc)
+ .width("13%")
+ .borderRadius(6)
+ .onTouch((event: TouchEvent) => {
+ if (event.type === TouchType.Down) {
+ globalThis.textInputClient.deleteForward(1);
+ }
+ })
+ }
+ }
+
+ // Numeric keyboard
+ @Component
+ struct numberMenu {
+ private numberList: sourceListType[]
+
+ build() {
+ Flex({ direction: FlexDirection.Column, alignItems: ItemAlign.Center, justifyContent: FlexAlign.SpaceEvenly }) {
+ Flex({ justifyContent: FlexAlign.SpaceBetween }) {
+ ForEach(this.numberList, (item: sourceListType) => { // First row on the numeric keyboard
+ keyItem({ keyValue: item })
+ }, (item: sourceListType) => item.content);
+ }
+ .padding({ top: "2%" })
+ .width("96%")
+ .height("25%")
+
+ Flex({ justifyContent: FlexAlign.SpaceBetween }) {
+ deleteItem()
+ }
+ .width("96%")
+ .height("25%")
+ }
+ }
+ }
+
+ @Entry
+ @Component
+ struct Index {
+ private numberList: sourceListType[] = numberSourceListData
+
+ build() {
+ Stack() {
+ Flex({
+ direction: FlexDirection.Column,
+ alignItems: ItemAlign.Center,
+ justifyContent: FlexAlign.End
+ }) {
+ Flex({
+ direction: FlexDirection.Column,
+ alignItems: ItemAlign.Center,
+ justifyContent: FlexAlign.SpaceBetween
+ }) {
+ numberMenu({
+ numberList: this.numberList
+ })
+ }
+ .align(Alignment.End)
+ .width("100%")
+ .height("75%")
+ }
+ .height("100%").align(Alignment.End).backgroundColor("#cdd0d7")
+ }
+ .position({ x: 0, y: 0 }).zIndex(99999)
+ }
+ }
+ ```
+
+ Register the InputMethodExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the target module. Set **type** to **"inputMethod"** and **srcEntrance** to the code path of the InputMethodExtensionAbility component.
+
+ ```ts
+ {
+ "module": {
+ // ...
+ "extensionAbilities": [
+ {
+ "description": "inputMethod",
+ "icon": "$media:icon",
+ "name": "InputMethodExtAbility",
+ "srcEntrance": "./ets/inputmethodextability/InputMethodService.ts",
+ "type": "inputMethod",
+ "visible": true,
+ }
+ ]
+ }
+ }
+ ```
+
+
+
diff --git a/en/application-dev/application-models/itc-with-worker.md b/en/application-dev/application-models/itc-with-worker.md
index 8cbe53eeea067ae1875a8ff4b73bc4cde7bdd629..996ab941b0244571dff6116a45ab5e2165cf1184 100644
--- a/en/application-dev/application-models/itc-with-worker.md
+++ b/en/application-dev/application-models/itc-with-worker.md
@@ -18,7 +18,7 @@ To develop the Worker mode, perform the following steps:
}
```
-2. Create the **worker.js** file based on the configuration in **build-profile.json5**.
+2. Create the **worker.ts** file based on the configuration in **build-profile.json5**.
```ts
import worker from '@ohos.worker';
@@ -58,7 +58,7 @@ To develop the Worker mode, perform the following steps:
```ts
import worker from '@ohos.worker';
- let wk = new worker.ThreadWorker("../workers/worker.js");
+ let wk = new worker.ThreadWorker("../workers/worker.ts");
// Send a message to the worker thread.
wk.postMessage("message from main thread.")
@@ -74,6 +74,6 @@ To develop the Worker mode, perform the following steps:
> **NOTE**
>
-> - If the relative path of **worker.ts** configured in **build-profile.json5** is **./src/main/ets/workers/worker.ts**, pass in the path **entry/ets/workers/worker.ts** when creating a worker thread in the stage model, and pass in the path **../workers/worker.js** when creating a worker thread in the FA model.
+> - If the relative path of **worker.ts** configured in **build-profile.json5** is **./src/main/ets/workers/worker.ts**, pass in the path **entry/ets/workers/worker.ts** when creating a worker thread in the stage model, and pass in the path **../workers/worker.ts** when creating a worker thread in the FA model.
>
> - For details about the data types supported between the main thread and worker thread, see [Sequenceable Data Types](../reference/apis/js-apis-worker.md#sequenceable-data-types).
diff --git a/en/application-dev/application-models/mission-management-overview.md b/en/application-dev/application-models/mission-management-overview.md
index 3346e8105deef0dce6dc785b7e88b10e2a4ce3e1..ba55ebb136ebffca0294bf69013f2f2ab4392e7f 100644
--- a/en/application-dev/application-models/mission-management-overview.md
+++ b/en/application-dev/application-models/mission-management-overview.md
@@ -30,102 +30,100 @@ Missions are managed by system applications (such as home screen), rather than t
A UIAbility instance corresponds to an independent mission. Therefore, when an application calls [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start a UIAbility, a mission is created.
-
-To call [missionManager](../reference/apis/js-apis-application-missionManager.md) to manage missions, the home screen application must request the **ohos.permission.MANAGE_MISSIONS** permission. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-
-You can use **missionManager** to manage missions, for example, listening for mission changes, obtaining mission information or snapshots, and clearing, locking, or unlocking missions. The sample code is as follows:
-
-
-
-```ts
-import missionManager from '@ohos.app.ability.missionManager'
-
-let listener = {
- // Listen for mission creation.
- onMissionCreated: function (mission) {
- console.info("--------onMissionCreated-------")
- },
- // Listen for mission destruction.
- onMissionDestroyed: function (mission) {
- console.info("--------onMissionDestroyed-------")
- },
- // Listen for mission snapshot changes.
- onMissionSnapshotChanged: function (mission) {
- console.info("--------onMissionSnapshotChanged-------")
- },
- // Listen for switching the mission to the foreground.
- onMissionMovedToFront: function (mission) {
- console.info("--------onMissionMovedToFront-------")
- },
- // Listen for mission icon changes.
- onMissionIconUpdated: function (mission, icon) {
- console.info("--------onMissionIconUpdated-------")
- },
- // Listen for mission name changes.
- onMissionLabelUpdated: function (mission) {
- console.info("--------onMissionLabelUpdated-------")
- },
- // Listen for mission closure events.
- onMissionClosed: function (mission) {
- console.info("--------onMissionClosed-------")
- }
-};
-
-// 1. Register a mission change listener.
-let listenerId = missionManager.on('mission', listener);
-
-// 2. Obtain the latest 20 missions in the system.
-missionManager.getMissionInfos("", 20, (error, missions) => {
- console.info("getMissionInfos is called, error.code = " + error.code);
- console.info("size = " + missions.length);
- console.info("missions = " + JSON.stringify(missions));
-});
-
-// 3. Obtain the detailed information about a mission.
-let missionId = 11; // The mission ID 11 is only an example.
-let mission = missionManager.getMissionInfo("", missionId).catch(function (err) {
- console.info(err);
-});
-
-// 4. Obtain the mission snapshot.
-missionManager.getMissionSnapShot("", missionId, (error, snapshot) => {
- console.info("getMissionSnapShot is called, error.code = " + error.code);
- console.info("bundleName = " + snapshot.ability.bundleName);
-})
-
-// 5. Obtain the low-resolution mission snapshot.
-missionManager.getLowResolutionMissionSnapShot("", missionId, (error, snapshot) => {
- console.info("getLowResolutionMissionSnapShot is called, error.code = " + error.code);
- console.info("bundleName = " + snapshot.ability.bundleName);
-})
-
-// 6. Lock or unlock the mission.
-missionManager.lockMission(missionId).then(() => {
- console.info("lockMission is called ");
-});
-
-missionManager.unlockMission(missionId).then(() => {
- console.info("unlockMission is called ");
-});
-
-// 7. Switch the mission to the foreground.
-missionManager.moveMissionToFront(missionId).then(() => {
- console.info("moveMissionToFront is called ");
-});
-
-// 8. Clear a single mission.
-missionManager.clearMission(missionId).then(() => {
- console.info("clearMission is called ");
-});
-
-// 9. Clear all missions.
-missionManager.clearAllMissions().catch(function (err) {
- console.info(err);
-});
-
-// 10. Deregister the mission change listener.
-missionManager.off('mission', listenerId, (error) => {
- console.info("unregisterMissionListener");
-})
-```
+To call [missionManager](../reference/apis/js-apis-application-missionManager.md) to manage missions, the home screen application must request the **ohos.permission.MANAGE_MISSIONS** permission. For details about the configuration, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+
+You can use **missionManager** to manage missions, for example, listening for mission changes, obtaining mission information or snapshots, and clearing, locking, or unlocking missions.
+
+ ```ts
+ import missionManager from '@ohos.app.ability.missionManager'
+
+ let listener = {
+ // Listen for mission creation.
+ onMissionCreated: function (mission) {
+ console.info("--------onMissionCreated-------")
+ },
+ // Listen for mission destruction.
+ onMissionDestroyed: function (mission) {
+ console.info("--------onMissionDestroyed-------")
+ },
+ // Listen for mission snapshot changes.
+ onMissionSnapshotChanged: function (mission) {
+ console.info("--------onMissionSnapshotChanged-------")
+ },
+ // Listen for switching the mission to the foreground.
+ onMissionMovedToFront: function (mission) {
+ console.info("--------onMissionMovedToFront-------")
+ },
+ // Listen for mission icon changes.
+ onMissionIconUpdated: function (mission, icon) {
+ console.info("--------onMissionIconUpdated-------")
+ },
+ // Listen for mission name changes.
+ onMissionLabelUpdated: function (mission) {
+ console.info("--------onMissionLabelUpdated-------")
+ },
+ // Listen for mission closure events.
+ onMissionClosed: function (mission) {
+ console.info("--------onMissionClosed-------")
+ }
+ };
+
+ // 1. Register a mission change listener.
+ let listenerId = missionManager.on('mission', listener);
+
+ // 2. Obtain the latest 20 missions in the system.
+ missionManager.getMissionInfos("", 20, (error, missions) => {
+ console.info("getMissionInfos is called, error.code = " + error.code);
+ console.info("size = " + missions.length);
+ console.info("missions = " + JSON.stringify(missions));
+ });
+
+ // 3. Obtain the detailed information about a mission.
+ let missionId = 11; // The mission ID 11 is only an example.
+ let mission = missionManager.getMissionInfo("", missionId).catch(function (err) {
+ console.info(err);
+ });
+
+ // 4. Obtain the mission snapshot.
+ missionManager.getMissionSnapShot("", missionId, (error, snapshot) => {
+ console.info("getMissionSnapShot is called, error.code = " + error.code);
+ console.info("bundleName = " + snapshot.ability.bundleName);
+ })
+
+ // 5. Obtain the low-resolution mission snapshot.
+ missionManager.getLowResolutionMissionSnapShot("", missionId, (error, snapshot) => {
+ console.info("getLowResolutionMissionSnapShot is called, error.code = " + error.code);
+ console.info("bundleName = " + snapshot.ability.bundleName);
+ })
+
+ // 6. Lock or unlock the mission.
+ missionManager.lockMission(missionId).then(() => {
+ console.info("lockMission is called ");
+ });
+
+ missionManager.unlockMission(missionId).then(() => {
+ console.info("unlockMission is called ");
+ });
+
+ // 7. Switch the mission to the foreground.
+ missionManager.moveMissionToFront(missionId).then(() => {
+ console.info("moveMissionToFront is called ");
+ });
+
+ // 8. Clear a single mission.
+ missionManager.clearMission(missionId).then(() => {
+ console.info("clearMission is called ");
+ });
+
+ // 9. Clear all missions.
+ missionManager.clearAllMissions().catch(function (err) {
+ console.info(err);
+ });
+
+ // 10. Deregister the mission change listener.
+ missionManager.off('mission', listenerId, (error) => {
+ console.info("unregisterMissionListener");
+ })
+ ```
+
+
diff --git a/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md b/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fdc03477c0552f523a0ee9c40c6fa5b8d7c4363
--- /dev/null
+++ b/en/application-dev/application-models/mission-set-icon-name-for-task-snapshot.md
@@ -0,0 +1,51 @@
+# Setting the Icon and Name of a Mission Snapshot
+
+Setting a unique icon and name for each mission snapshot of an application helps you better manage the missions and functions of the application.
+
+By default, the **icon** and **label** fields in the [abilities tag](../quick-start/module-configuration-file.md#abilities) of the [module.json5 file](../quick-start/module-configuration-file.md) are used to set the icon and label.
+
+Figure 1 Mission snapshot of a UIAbility
+
+
+
+You can also use [UIAbilityContext.setMissionIcon()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionicon) and [UIAbilityContext.setMissionLabel()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionlabel) to customize the icon and name for a mission snapshot. For example, for a UIAbility instance with the launch type set to **standard**, you can configure the icon and name for each mission snapshot based on different functions.
+
+This document describes the following operations:
+
+- [Setting a Mission Snapshot Icon (for System Applications Only)](#setting-a-mission-snapshot-icon-for-system-applications-only)
+- [Setting a Mission Snapshot Name](#setting-a-mission-snapshot-name)
+
+## Setting a Mission Snapshot Icon (for System Applications Only)
+
+Call [UIAbilityContext.setMissionIcon()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionicon) to set the icon of a mission snapshot. The icon is an object of the [PixelMap](../reference/apis/js-apis-image.md#pixelmap7) type. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
+```ts
+let imagePixelMap: PixelMap = undefined; // Obtain the PixelMap information.
+
+this.context.setMissionIcon(imagePixelMap, (err) => {
+ console.error(`setMissionLabel failed, code is ${err.code}, message is ${err.message}`);
+})
+```
+
+The display effect is shown below.
+
+Figure 2 Mission snapshot icon
+
+
+
+## Setting a Mission Snapshot Name
+
+Call [UIAbilityContext.setMissionLabel()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextsetmissionlabel) to set the name of a mission snapshot.
+
+```ts
+this.context.setMissionLabel('test').then(() => {
+ console.info('setMissionLabel succeeded.');
+}).catch((err) => {
+ console.error(`setMissionLabel failed, code is ${err.code}, message is ${err.message}`);
+});
+```
+
+The display effect is shown below.
+
+Figure 3 Mission snapshot name
+
+
\ No newline at end of file
diff --git a/en/application-dev/application-models/request-permissions.md b/en/application-dev/application-models/request-permissions.md
index 670860d87dbb56adceb02f4ca350c24b61260d30..a29b793c9949d1080c8322681ba27cf6495f29ac 100644
--- a/en/application-dev/application-models/request-permissions.md
+++ b/en/application-dev/application-models/request-permissions.md
@@ -9,37 +9,7 @@ During application development, you must declare the required permission in the
To declare a permission in **config.json**, add **reqPermissions** under **module** and list the permission.
+For example, to request the permission to access the calendar, perform the following steps:
-For example, to declare the permission to access the calendar, request the **ohos.permission.READ_CALENDAR** permission. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-
-The sample code in the **config.json** file is as follows:
-
-```json
-{
- "module": {
- // ...
- "reqPermissions": [
- {
- "name": "ohos.permission.READ_CALENDAR"
- // ...
- }
- ]
- }
-}
-```
-
-
-Request the permission from uses in the form of a dialog box:
-
-```ts
-import featureAbility from '@ohos.ability.featureAbility';
-
-let context = featureAbility.getContext();
-let permissions: Array = ['ohos.permission.READ_CALENDAR']
-context.requestPermissionsFromUser(permissions, 1).then((data) => {
- console.info("Succeed to request permission from user with data: " + JSON.stringify(data))
-}).catch((error) => {
- console.info("Failed to request permission from user with error: " + JSON.stringify(error))
-})
-```
+1. Request the **ohos.permission.DISTRIBUTED_DATASYNC** permission. For details, see [Declaring Permissions in the Configuration File](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
+2. Display a dialog box to ask authorization from the user when the application is started for the first time. For details, see [Requesting User Authorization](../security/accesstoken-guidelines.md#requesting-user-authorization).
diff --git a/en/application-dev/application-models/serviceextensionability.md b/en/application-dev/application-models/serviceextensionability.md
index d64d884b1e3021193f63445913886830218df6e1..9ff7a0ae5d6df7574da19565c81411236dba3dda 100644
--- a/en/application-dev/application-models/serviceextensionability.md
+++ b/en/application-dev/application-models/serviceextensionability.md
@@ -18,9 +18,9 @@ Each type of ExtensionAbility has its own context. ServiceExtensionAbility has [
This topic describes how to use ServiceExtensionAbility in the following scenarios:
-- [Implementing a Background Service](#implementing-a-background-service)
+- [Implementing a Background Service (for System Applications Only)](#implementing-a-background-service-for-system-applications-only)
-- [Starting a Background Service](#starting-a-background-service)
+- [Starting a Background Service (for System Applications Only)](#starting-a-background-service-for-system-applications-only)
- [Connecting to a Background Service](#connecting-to-a-background-service)
@@ -33,9 +33,9 @@ This topic describes how to use ServiceExtensionAbility in the following scenari
> - Third-party applications can connect to ServiceExtensionAbility provided by the system only when they gain focus in the foreground.
-## Implementing a Background Service
+## Implementing a Background Service (for System Applications Only)
-This feature applies only to system applications. [ServiceExtensionAbility](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md) provides the callbacks **onCreate()**, **onRequest()**, **onConnect()**, **onDisconnect()**, and **onDestory()**. Override them as required. The following figure shows the lifecycle of ServiceExtensionAbility.
+[ServiceExtensionAbility](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md) provides the callbacks **onCreate()**, **onRequest()**, **onConnect()**, **onDisconnect()**, and **onDestory()**. Override them as required. The following figure shows the lifecycle of ServiceExtensionAbility.
**Figure 1** ServiceExtensionAbility lifecycle

@@ -164,9 +164,9 @@ To implement a background service, manually create a ServiceExtensionAbility com
```
-## Starting a Background Service
+## Starting a Background Service (for System Applications Only)
-This feature applies only to system applications. A system application uses the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartserviceextensionability) method to start a background service. The [onRequest()](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md#serviceextensionabilityonrequest) callback is invoked, and the **Want** object passed by the caller is received through the callback. After the background service is started, its lifecycle is independent of that of the client. In other words, even if the client is destroyed, the background service can still run. Therefore, the background service must be stopped by calling [terminateSelf()](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextterminateself) when its work is complete. Alternatively, another component can call [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstopserviceextensionability) to stop the background service.
+A system application uses the [startServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartserviceextensionability) method to start a background service. The [onRequest()](../reference/apis/js-apis-app-ability-serviceExtensionAbility.md#serviceextensionabilityonrequest) callback is invoked, and the **Want** object passed by the caller is received through the callback. After the background service is started, its lifecycle is independent of that of the client. In other words, even if the client is destroyed, the background service can still run. Therefore, the background service must be stopped by calling [terminateSelf()](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextterminateself) when its work is complete. Alternatively, another component can call [stopServiceExtensionAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstopserviceextensionability) to stop the background service.
> **NOTE**
>
diff --git a/en/application-dev/application-models/stage-model-development-overview.md b/en/application-dev/application-models/stage-model-development-overview.md
index d7f8123a379fc7950820e531a14f45dfca68f961..451649bdb1a63147b79f8c7e2d4523d6c651c548 100644
--- a/en/application-dev/application-models/stage-model-development-overview.md
+++ b/en/application-dev/application-models/stage-model-development-overview.md
@@ -10,7 +10,7 @@ The following figure shows the basic concepts used in the stage model.
- [UIAbility component](uiability-overview.md) and [ExtensionAbility component](extensionability-overview.md)
- The stage model provides two types of application components: UIAbility and ExtensionAbility. Both have specific classes and support object-oriented development. They are the specific implementation of the abstract ability concept on the stage model. They are also units scheduled by the Ability Manager Service (AMS), which schedules their lifecycles as well.
+ The stage model provides two types of application components: UIAbility and ExtensionAbility. Both have specific classes and support object-oriented development.
- UIAbility has the UI and is mainly used for user interaction. For example, with UIAbility, the Gallery application can display images in the liquid layout. After a user selects an image, it uses a new UI to display the image details. The user can touch the **Back** button to return to the liquid layout. The lifecycle of the UIAbility component contains the creation, destruction, foreground, and background states. Display-related states are exposed through WindowStage events.
@@ -22,6 +22,7 @@ The following figure shows the basic concepts used in the stage model.
- [Context](application-context-stage.md)
In the stage model, Context and its derived classes provide a variety of resources and capabilities that can be called during the runtime. The UIAbility component and ExtensionAbility derived classes have different Context classes. These classes, which all inherit from the base class Context, provide different capabilities.
+
- [AbilityStage](abilitystage.md)
Each HAP of the Entry or Feature type has an AbilityStage class instance during the runtime. When the code in the HAP is loaded to the process for the first time, the system creates an AbilityStage class instance first. Each UIAbility class defined in the HAP is associated with this class instance after instantiation. Through this class instance, you can obtain the runtime information of the UIAbility instances in the HAP.
diff --git a/en/application-dev/application-models/start-remote-pageability.md b/en/application-dev/application-models/start-remote-pageability.md
index 4e998a15d23d298bfdb402bd18ea0db2a9f819eb..36ee305b49698c1f6e6cf216174f77212f1d53e4 100644
--- a/en/application-dev/application-models/start-remote-pageability.md
+++ b/en/application-dev/application-models/start-remote-pageability.md
@@ -1,7 +1,7 @@
-# Starting a Remote PageAbility
+# Starting a Remote PageAbility (for System Applications Only)
-This feature applies only to system applications. The **startAbility()** method in the **featureAbility** class is used to start a remote PageAbility.
+The **startAbility()** method in the **featureAbility** class is used to start a remote PageAbility.
In addition to **'\@ohos.ability.featureAbility'**, you must import **'\@ohos.distributedHardware.deviceManager'**, which provides account-independent distributed device networking capabilities. Then you can use **getTrustedDeviceListSync** of the **DeviceManager** module to obtain the remote device ID and pass the remote device ID in the **want** parameter for starting the remote PageAbility.
diff --git a/en/application-dev/application-models/static-subscriber-extension-ability.md b/en/application-dev/application-models/static-subscriber-extension-ability.md
deleted file mode 100644
index ae6d9a80b7ab6c693d06e7bfe8bfb11b4db94ab8..0000000000000000000000000000000000000000
--- a/en/application-dev/application-models/static-subscriber-extension-ability.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# StaticSubscriberExtensionAbility Development
-
-## Scenario Description
-
-The common event service provides two subscription modes: dynamic and static. In dynamic subscription mode, a subscriber calls an API during the running period to subscribe to common events. For details, see [Subscribing to Common Events](common-event-subscription.md). In static subscription mode, no common event subscription API is called. A common event is subscribed by configuring a declaration file and implementing a class that inherits from **StaticSubscriberExtensionAbility**. A static subscriber is started once it receives a target event (for example, a power-on event) published by the system or application. At the same time, the **onReceiveEvent** callback is triggered, in which you can implement the service logic. **The static subscriber APIs are system APIs and can be used only by system applications that have passed the system-level power consumption review.**
-
-
-
-## How to Develop
-
-1. Prerequisites
-
- The application must meet the following requirements:
-
- The application is a system application.
-
- The application is developed using the full SDK.
-
- The application's power consumption has passed the system-level power consumption review. If you want to use static subscription in the debugging phase, add the bundle name of your application to the system configuration file **/etc/static_subscriber_config.json**.
-
-
-
-2. Declaring a Static Subscriber
-
- To declare a static subscriber, create an ExtensionAbility, which is derived from the **StaticSubscriberExtensionAbility** class, in the project. The sample code is as follows:
-
- ```ts
- import StaticSubscriberExtensionAbility from '@ohos.application.StaticSubscriberExtensionAbility'
-
- export default class StaticSubscriber extends StaticSubscriberExtensionAbility {
- onReceiveEvent(event) {
- console.log('onReceiveEvent, event:' + event.event);
- }
- }
- ```
-
- You can implement service logic in the **onReceiveEvent** callback.
-
-
-
-3. Project Configuration for a Static Subscriber
-
- After writing the static subscriber code, configure the subscriber in the **module.json5** file. The configuration format is as follows:
-
- ```ts
- {
- "module": {
- ......
- "extensionAbilities": [
- {
- "name": "StaticSubscriber",
- "srcEntrance": "./ets/StaticSubscriber/StaticSubscriber.ts",
- "description": "$string:StaticSubscriber_desc",
- "icon": "$media:icon",
- "label": "$string:StaticSubscriber_label",
- "type": "staticSubscriber",
- "visible": true,
- "metadata": [
- {
- "name": "ohos.extension.staticSubscriber",
- "resource": "$profile:subscribe"
- }
- ]
- }
- ]
- ......
- }
- }
- ```
-
- Pay attention to the following fields in the JSON file:
-
- **srcEntrance**: entry file path of the ExtensionAbility, that is, the file path of the static subscriber declared in Step 2.
-
- **type**: ExtensionAbility type. For a static subscriber, set this field to **staticSubscriber**.
-
- **metadata**: level-2 configuration file information of the ExtensionAbility. The configuration information varies according to the ExtensionAbility type. Therefore, you must use different config files to indicate the specific configuration. The **metadata** field contains two keywords: **name** and **resource**. The **name** field indicates the ExtensionAbility type name. For a static subscriber, declare the name as **ohos.extension.staticSubscriber** for successful identification. The **resource** field indicates the path that stores the ExtensionAbility configuration, which is customizable. In this example, the path is **resources/base/profile/subscribe.json**.
-
- A level-2 configuration file pointed to by **metadata** must be in the following format:
-
- ```ts
- {
- "commonEvents": [
- {
- "name": "xxx",
- "permission": "xxx",
- "events":[
- "xxx"
- ]
- }
- ]
- }
- ```
-
- If the level-2 configuration file is not declared in this format, the file cannot be identified. The fields are described as follows:
-
- **name**: name of the ExtensionAbility, which must be the same as the name of **extensionAbility** declared in **module.json5**.
-
- **permission**: permission required by the publisher. If a publisher without the required permission attempts to publish an event, the event is regarded as invalid and will not be published.
-
- **events**: list of subscribed target events
-
-
-
-## Samples
-
-For details about how to develop StaticSubscriberExtensionAbility, see [StaticSubscriber (ArkTS, API version 9, Full SDK)](https://gitee.com/openharmony/applications_app_samples/tree/master/ability/StaticSubscriber).
diff --git a/en/application-dev/application-models/subscribe-system-environment-variable-changes.md b/en/application-dev/application-models/subscribe-system-environment-variable-changes.md
new file mode 100644
index 0000000000000000000000000000000000000000..c231f483e9bcd8f83faf49d40007730d0f854de5
--- /dev/null
+++ b/en/application-dev/application-models/subscribe-system-environment-variable-changes.md
@@ -0,0 +1,172 @@
+# Subscribing to System Environment Variable Changes
+
+System environment variables are system settings (for example, the system language or screen direction) of a device that may change during the running of an application.
+
+By subscribing to the changes of system environment variables, the application can detect the changes in a timely manner and process the changes accordingly, providing better user experience. For example, when the system language changes, the application can display the UI in the new language; when the user rotates the device to landscape or portrait mode, the application can re-arrange the UI to adapt to the new screen orientation and size.
+
+The system environment variable changes are usually triggered by options in **Settings** or icons in **Control Panel**. For details about the system environment variables that support subscription, see [Configuration](../reference/apis/js-apis-app-ability-configuration.md).
+
+In OpenHarmony, you can subscribe to system environment variable changes in the following ways:
+
+- [Using ApplicationContext for Subscription](#using-applicationcontext-for-subscription)
+- [Using AbilityStage for Subscription](#using-abilitystage-for-subscription)
+- [Using UIAbility for Subscription](#using-uiability-for-subscription)
+- [Using ExtensionAbility for Subscription](#using-extensionability-for-subscription)
+
+## Using ApplicationContext for Subscription
+
+[ApplicationContext](../reference/apis/js-apis-inner-application-applicationContext.md) provides an API for registering a callback function to subscribe to the system environment variable changes. It also provides an API for deregistration so you can release related resources when they are no longer needed.
+
+1. Call **ApplicationContext.on(type: 'environment', callback: EnvironmentCallback)** to subscribe to changes in system environment variables. The code snippet below is used to subscribe to system language changes on a page.
+
+ ```ts
+ import common from '@ohos.app.ability.common';
+
+ @Entry
+ @Component
+ struct Index {
+ private context = getContext(this) as common.UIAbilityContext;
+ private callbackId: number; // ID of the subscription for system environment variable changes.
+
+ subscribeConfigurationUpdate() {
+ let systemLanguage: string = this.context.config.language; // Obtain the system language in use.
+
+ // 1. Obtain an ApplicationContext object.
+ let applicationContext = this.context.getApplicationContext();
+
+ // 2. Subscribe to system environment variable changes through ApplicationContext.
+ let environmentCallback = {
+ onConfigurationUpdated(newConfig) {
+ console.info(`onConfigurationUpdated systemLanguage is ${systemLanguage}, newConfig: ${JSON.stringify(newConfig)}`);
+
+ if (this.systemLanguage !== newConfig.language) {
+ console.info(`systemLanguage from ${systemLanguage} changed to ${newConfig.language}`);
+ systemLanguage = newConfig.language; // Save the new system language as the system language in use, which will be used for comparison.
+ }
+ },
+ onMemoryLevel(level) {
+ console.info(`onMemoryLevel level: ${level}`);
+ }
+ }
+
+ this.callbackId = applicationContext.on('environment', environmentCallback);
+ }
+
+ // Page display.
+ build() {
+ // ...
+ }
+ }
+ ```
+
+2. Call **ApplicationContext.off(type: 'environment', callbackId: number)** to release the resources.
+
+ ```ts
+ import common from '@ohos.app.ability.common';
+
+ @Entry
+ @Component
+ struct Index {
+ private context = getContext(this) as common.UIAbilityContext;
+ private callbackId: number; // ID of the subscription for system environment variable changes.
+
+ unsubscribeConfigurationUpdate() {
+ let applicationContext = this.context.getApplicationContext();
+ applicationContext.off('environment', this.callbackId);
+ }
+
+ // Page display.
+ build() {
+ // ...
+ }
+ }
+ ```
+
+## Using AbilityStage for Subscription
+
+The AbilityStage component provides the [AbilityStage.onConfigurationUpdate()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonconfigurationupdate) callback for subscribing to system environment variable changes. This callback is invoked when a system environment variable changes. In this callback, the latest system environment configuration is obtained through the [Configuration](../reference/apis/js-apis-app-ability-configuration.md) object.
+
+> **NOTE**
+>
+> - AbilityStage is not automatically generated in the default project of DevEco Studio. For details about how to create an AbilityStage file, see [AbilityStage Component Container](abilitystage.md).
+> - The callback used to subscribe to system environment variable changes has the same lifecycle as the [AbilityStage](../reference/apis/js-apis-app-ability-abilityStage.md) instance and will be destroyed when the instance is destroyed.
+
+The code snippet below uses the [AbilityStage.onConfigurationUpdate()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonconfigurationupdate) callback to subscribe to the system language changes.
+
+```ts
+import AbilityStage from '@ohos.app.ability.AbilityStage';
+
+let systemLanguage: string; // System language in use.
+
+export default class MyAbilityStage extends AbilityStage {
+ onCreate() {
+ systemLanguage = this.context.config.language; // Obtain the system language in use when the AbilityStage instance is loaded for the first time.
+ console.info(`systemLanguage is ${systemLanguage} `);
+ }
+
+ onConfigurationUpdate(newConfig) {
+ console.info(`onConfigurationUpdated systemLanguage is ${systemLanguage}, newConfig: ${JSON.stringify(newConfig)}`);
+
+ if (systemLanguage !== newConfig.language) {
+ console.info(`systemLanguage from ${systemLanguage} changed to ${newConfig.language}`);
+ systemLanguage = newConfig.language; // Save the new system language as the system language in use, which will be used for comparison.
+ }
+ }
+}
+```
+
+## Using UIAbility for Subscription
+
+The UIAbility component provides the **UIAbility.onConfigurationUpdate()** callback for subscribing to system environment variable changes. This callback is invoked when a system environment variable changes. In this callback, the latest system environment configuration is obtained through the [Configuration](../reference/apis/js-apis-app-ability-configuration.md) object, without restarting the UIAbility.
+
+> **NOTE**
+>
+> The callback used to subscribe to system environment variable changes has the same lifecycle as the UIAbility instance and will be destroyed when the instance is destroyed.
+
+The code snippet below uses the **onConfigurationUpdate()** callback to subscribe to the system language changes.
+
+```ts
+import UIAbility from '@ohos.app.ability.UIAbility';
+
+let systemLanguage: string; // System language in use.
+
+export default class EntryAbility extends UIAbility {
+ onCreate(want, launchParam) {
+ systemLanguage = this.context.config.language; // Obtain the system language in use when the UIAbility instance is loaded for the first time.
+ console.info(`systemLanguage is ${systemLanguage} `);
+ }
+
+ onConfigurationUpdate(newConfig) {
+ console.info(`onConfigurationUpdated systemLanguage is ${systemLanguage}, newConfig: ${JSON.stringify(newConfig)}`);
+
+ if (systemLanguage !== newConfig.language) {
+ console.info(`systemLanguage from ${systemLanguage} changed to ${newConfig.language}`);
+ systemLanguage = newConfig.language; // Save the new system language as the system language in use, which will be used for comparison.
+ }
+ }
+
+ // ...
+}
+```
+
+## Using ExtensionAbility for Subscription
+
+The ExtensionAbility component provides the **onConfigurationUpdate()** callback for subscribing system environment variable changes. This callback is invoked when a system environment variable changes. In this callback, the latest system environment configuration is obtained through the [Configuration](../reference/apis/js-apis-app-ability-configuration.md) object.
+
+> **NOTE**
+>
+> The callback used to subscribe to system environment variable changes has the same lifecycle as the ExtensionAbility instance and will be destroyed when the instance is destroyed.
+
+The code snippet below uses FormExtensionAbility as an example to describe how to use the **onConfigurationUpdate()** callback to subscribe to system environment variable changes.
+
+```ts
+import FormExtensionAbility from '@ohos.app.form.FormExtensionAbility';
+
+export default class EntryFormAbility extends FormExtensionAbility {
+ onConfigurationUpdate(newConfig) {
+ console.info(`newConfig is ${JSON.stringify(newConfig)}`);
+ }
+
+ // ...
+}
+```
diff --git a/en/application-dev/application-models/uiability-data-sync-with-ui.md b/en/application-dev/application-models/uiability-data-sync-with-ui.md
index 9ed8c8d6f3b307ef44097f1ff67e6dcf472f91a5..e5e5b7b54f2ad8e98048a08599360806e3ea578d 100644
--- a/en/application-dev/application-models/uiability-data-sync-with-ui.md
+++ b/en/application-dev/application-models/uiability-data-sync-with-ui.md
@@ -80,7 +80,7 @@ Before using the APIs provided by **EventHub**, you must obtain an **EventHub**
4. After **event1** is used, you can call [eventHub.off()](../reference/apis/js-apis-inner-application-eventHub.md#eventhuboff) to unsubscribe from the event.
```ts
- // context is the ability-level context of the UIAbility instance.
+ // context is the AbilityContext of the UIAbility instance.
this.context.eventHub.off('event1');
```
@@ -240,10 +240,6 @@ The following provides an example to describe the object overwritten problem in
struct Index {
onPageShow() {
let ctx = globalThis.context; // Obtain the context from globalThis and use it.
- let permissions = ['com.example.permission']
- ctx.requestPermissionsFromUser(permissions,(result) => {
- // ...
- });
}
// Page display.
build() {
@@ -251,7 +247,7 @@ The following provides an example to describe the object overwritten problem in
}
}
```
-
+
3. In the UIAbilityB file, [UIAbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md) is stored in **globalThis** and has the same name as that in the UIAbilityA file.
```ts
@@ -274,10 +270,6 @@ The following provides an example to describe the object overwritten problem in
struct Index {
onPageShow() {
let ctx = globalThis.context; // Obtain the context from globalThis and use it.
- let permissions = ['com.example.permission']
- ctx.requestPermissionsFromUser(permissions,(result) => {
- console.info('requestPermissionsFromUser result:' + JSON.stringify(result));
- });
}
// Page display.
build() {
@@ -285,7 +277,7 @@ The following provides an example to describe the object overwritten problem in
}
}
```
-
+
5. Switch the UIAbilityB instance to the background and switch the UIAbilityA instance to the foreground. In this case, UIAbilityA will not enter the **onCreate()** lifecycle again.
```ts
@@ -307,10 +299,6 @@ The following provides an example to describe the object overwritten problem in
struct Index {
onPageShow() {
let ctx = globalThis.context; // The context in globalThis is the context of UIAbilityB.
- let permissions=['com.example.permission'];
- ctx.requestPermissionsFromUser(permissions,(result) => { // Using this object causes a process breakdown.
- console.info('requestPermissionsFromUser result:' + JSON.stringify(result));
- });
}
// Page display.
build() {
diff --git a/en/application-dev/application-models/uiability-intra-device-interaction.md b/en/application-dev/application-models/uiability-intra-device-interaction.md
index ac3c18e36de67e66e496a92da2269c063503ce7e..c8ba4ec50fa9b67805685468f0b3ab5cdada2377 100644
--- a/en/application-dev/application-models/uiability-intra-device-interaction.md
+++ b/en/application-dev/application-models/uiability-intra-device-interaction.md
@@ -15,9 +15,11 @@ This topic describes the UIAbility interaction modes in the following scenarios.
- [Starting UIAbility of Another Application and Obtaining the Return Result](#starting-uiability-of-another-application-and-obtaining-the-return-result)
+- [Starting UIAbility with Window Mode Specified (for System Applications Only)](#starting-uiability-with-window-mode-specified-for-system-applications-only)
+
- [Starting a Specified Page of UIAbility](#starting-a-specified-page-of-uiability)
-- [Using Ability Call to Implement UIAbility Interaction](#using-ability-call-to-implement-uiability-interaction)
+- [Using Call to Implement UIAbility Interaction (for System Applications Only)](#using-call-to-implement-uiability-interaction-for-system-applications-only)
## Starting UIAbility in the Same Application
@@ -29,45 +31,52 @@ Assume that your application has two UIAbility components: EntryAbility and Func
1. In EntryAbility, call [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to start UIAbility. The [want](../reference/apis/js-apis-app-ability-want.md) parameter is the entry parameter for starting the UIAbility instance. In the **want** parameter, **bundleName** indicates the bundle name of the application to start; **abilityName** indicates the name of the UIAbility to start; **moduleName** is required only when the target UIAbility belongs to a different module; **parameters** is used to carry custom information. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
```ts
+ let context = ...; // UIAbilityContext
let wantInfo = {
- deviceId: '', // An empty deviceId indicates the local device.
- bundleName: 'com.example.myapplication',
- abilityName: 'FuncAbility',
- moduleName: 'module1', // moduleName is optional.
- parameters: {// Custom information.
- info: 'From the Index page of EntryAbility',
- },
+ deviceId: '', // An empty deviceId indicates the local device.
+ bundleName: 'com.example.myapplication',
+ abilityName: 'FuncAbility',
+ moduleName: 'module1', // moduleName is optional.
+ parameters: {// Custom information.
+ info: 'From the Index page of EntryAbility',
+ },
}
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbility(wantInfo).then(() => {
- // ...
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbility(wantInfo).then(() => {
+ // ...
}).catch((err) => {
- // ...
+ // ...
})
```
-
-2. Use the FuncAbility lifecycle callback to receive the parameters passed from EntryAbility.
+
+2. In FuncAbility, use [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) or [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonnewwant) to receive the parameters passed in by EntryAbility.
```ts
import UIAbility from '@ohos.app.ability.UIAbility';
import window from '@ohos.window';
export default class FuncAbility extends UIAbility {
- onCreate(want, launchParam) {
- // Receive the parameters passed by the caller UIAbility.
- let funcAbilityWant = want;
- let info = funcAbilityWant?.parameters?.info;
- // ...
- }
+ onCreate(want, launchParam) {
+ // Receive the parameters passed by the initiator UIAbility.
+ let funcAbilityWant = want;
+ let info = funcAbilityWant?.parameters?.info;
+ // ...
+ }
}
```
+ > **NOTE**
+ >
+ > In FuncAbility started, you can obtain the PID and bundle name of the UIAbility through **parameters** in the passed **want** parameter.
+
3. To stop the **UIAbility** instance after the FuncAbility service is complete, call [terminateSelf()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateself) in FuncAbility.
```ts
- // context is the ability-level context of the UIAbility instance to stop.
- this.context.terminateSelf((err) => {
- // ...
+ let context = ...; // UIAbilityContext
+
+ // context is the UIAbilityContext of the UIAbility instance to stop.
+ context.terminateSelf((err) => {
+ // ...
});
```
@@ -85,67 +94,70 @@ When starting FuncAbility from EntryAbility, you want the result to be returned
1. In EntryAbility, call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start FuncAbility. Use **data** in the asynchronous callback to receive information returned after FuncAbility stops itself. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
```ts
+ let context = ...; // UIAbilityContext
let wantInfo = {
- deviceId: '', // An empty deviceId indicates the local device.
- bundleName: 'com.example.myapplication',
- abilityName: 'FuncAbility',
- moduleName: 'module1', // moduleName is optional.
- parameters: {// Custom information.
- info: 'From the Index page of EntryAbility',
- },
+ deviceId: '', // An empty deviceId indicates the local device.
+ bundleName: 'com.example.myapplication',
+ abilityName: 'FuncAbility',
+ moduleName: 'module1', // moduleName is optional.
+ parameters: {// Custom information.
+ info: 'From the Index page of EntryAbility',
+ },
}
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbilityForResult(wantInfo).then((data) => {
- // ...
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbilityForResult(wantInfo).then((data) => {
+ // ...
}).catch((err) => {
- // ...
+ // ...
})
```
-
+
2. Call [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to stop FuncAbility. Use the input parameter **abilityResult** to carry the information that FuncAbility needs to return to EntryAbility.
```ts
+ let context = ...; // UIAbilityContext
const RESULT_CODE: number = 1001;
let abilityResult = {
- resultCode: RESULT_CODE,
- want: {
- bundleName: 'com.example.myapplication',
- abilityName: 'FuncAbility',
- moduleName: 'module1',
- parameters: {
- info: 'From the Index page of FuncAbility',
- },
+ resultCode: RESULT_CODE,
+ want: {
+ bundleName: 'com.example.myapplication',
+ abilityName: 'FuncAbility',
+ moduleName: 'module1',
+ parameters: {
+ info: 'From the Index page of FuncAbility',
},
+ },
}
- // context is the ability-level context of the callee UIAbility.
- this.context.terminateSelfWithResult(abilityResult, (err) => {
- // ...
+ // context is the AbilityContext of the target UIAbility.
+ context.terminateSelfWithResult(abilityResult, (err) => {
+ // ...
});
```
-
+
3. After FuncAbility stops itself, EntryAbility uses [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to receive the information returned by FuncAbility. The value of **RESULT_CODE** must be the same as the preceding value.
```ts
+ let context = ...; // UIAbilityContext
const RESULT_CODE: number = 1001;
// ...
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbilityForResult(want).then((data) => {
- if (data?.resultCode === RESULT_CODE) {
- // Parse the information returned by the callee UIAbility.
- let info = data.want?.parameters?.info;
- // ...
- }
- }).catch((err) => {
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbilityForResult(wantInfo).then((data) => {
+ if (data?.resultCode === RESULT_CODE) {
+ // Parse the information returned by the target UIAbility.
+ let info = data.want?.parameters?.info;
// ...
+ }
+ }).catch((err) => {
+ // ...
})
```
## Starting UIAbility of Another Application
-Generally, the user only needs to do a common operation (for example, selecting a document application to view the document content) to start the UIAbility of another application. The [implicit Want launch mode](want-overview.md#types-of-want) is recommended. The system identifies a matched UIAbility and starts it based on the **want** parameter of the caller.
+Generally, the user only needs to do a common operation (for example, selecting a document application to view the document content) to start the UIAbility of another application. The [implicit Want launch mode](want-overview.md#types-of-want) is recommended. The system identifies a matched UIAbility and starts it based on the **want** parameter of the initiator UIAbility.
There are two ways to start **UIAbility**: [explicit and implicit](want-overview.md).
@@ -181,36 +193,38 @@ This section describes how to start the UIAbility of another application through
}
```
-2. Include **entities** and **actions** of the caller's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
+2. Include **entities** and **actions** of the initiator UIAbility's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
```ts
+ let context = ...; // UIAbilityContext
let wantInfo = {
- deviceId: '', // An empty deviceId indicates the local device.
- // Uncomment the line below if you want to implicitly query data only in the specific bundle.
- // bundleName: 'com.example.myapplication',
- action: 'ohos.want.action.viewData',
- // entities can be omitted.
- entities: ['entity.system.default'],
+ deviceId: '', // An empty deviceId indicates the local device.
+ // Uncomment the line below if you want to implicitly query data only in the specific bundle.
+ // bundleName: 'com.example.myapplication',
+ action: 'ohos.want.action.viewData',
+ // entities can be omitted.
+ entities: ['entity.system.default'],
}
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbility(wantInfo).then(() => {
- // ...
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbility(wantInfo).then(() => {
+ // ...
}).catch((err) => {
- // ...
+ // ...
})
```
-
- The following figure shows the effect. When you click **Open PDF**, a dialog box is displayed for you to select.
-
- 
+
+ The following figure shows the effect. When you click **Open PDF**, a dialog box is displayed for you to select.
+ 
3. To stop the **UIAbility** instance after the document application is used, call [terminateSelf()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateself).
```ts
- // context is the ability-level context of the UIAbility instance to stop.
- this.context.terminateSelf((err) => {
- // ...
+ let context = ...; // UIAbilityContext
+
+ // context is the AbilityContext of the UIAbility instance to stop.
+ context.terminateSelf((err) => {
+ // ...
});
```
@@ -245,68 +259,121 @@ If you want to obtain the return result when using implicit Want to start the UI
}
```
-2. Call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start the UIAbility of the payment application. Include **entities** and **actions** of the caller's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. Use **data** in the asynchronous callback to receive the information returned to the caller after the payment UIAbility stops itself. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select.
+2. Call [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to start the UIAbility of the payment application. Include **entities** and **actions** of the initiator UIAbility's **want** parameter into **entities** and **actions** under **skills** of the target UIAbility. Use **data** in the asynchronous callback to receive the information returned to the initiator UIAbility after the payment UIAbility stops itself. After the system matches the UIAbility that meets the **entities** and **actions** information, a dialog box is displayed, showing the list of matched UIAbility instances for users to select.
```ts
+ let context = ...; // UIAbilityContext
let wantInfo = {
- deviceId: '', // An empty deviceId indicates the local device.
- // Uncomment the line below if you want to implicitly query data only in the specific bundle.
- // bundleName: 'com.example.myapplication',
- action: 'ohos.want.action.editData',
- // entities can be omitted.
- entities: ['entity.system.default'],
+ deviceId: '', // An empty deviceId indicates the local device.
+ // Uncomment the line below if you want to implicitly query data only in the specific bundle.
+ // bundleName: 'com.example.myapplication',
+ action: 'ohos.want.action.editData',
+ // entities can be omitted.
+ entities: ['entity.system.default'],
}
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbilityForResult(wantInfo).then((data) => {
- // ...
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbilityForResult(wantInfo).then((data) => {
+ // ...
}).catch((err) => {
- // ...
+ // ...
})
```
-
+
3. After the payment is finished, call [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) to stop the payment UIAbility and return the **abilityResult** parameter.
```ts
+ let context = ...; // UIAbilityContext
const RESULT_CODE: number = 1001;
let abilityResult = {
- resultCode: RESULT_CODE,
- want: {
- bundleName: 'com.example.myapplication',
- abilityName: 'EntryAbility',
- moduleName: 'entry',
- parameters: {
- payResult: 'OKay',
- },
+ resultCode: RESULT_CODE,
+ want: {
+ bundleName: 'com.example.myapplication',
+ abilityName: 'EntryAbility',
+ moduleName: 'entry',
+ parameters: {
+ payResult: 'OKay',
},
+ },
}
- // context is the ability-level context of the callee UIAbility.
- this.context.terminateSelfWithResult(abilityResult, (err) => {
- // ...
+ // context is the AbilityContext of the target UIAbility.
+ context.terminateSelfWithResult(abilityResult, (err) => {
+ // ...
});
```
-
+
4. Receive the information returned by the payment application in the callback of the [startAbilityForResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult) method. The value of **RESULT_CODE** must be the same as that returned by [terminateSelfWithResult()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextterminateselfwithresult).
```ts
+ let context = ...; // UIAbilityContext
const RESULT_CODE: number = 1001;
let want = {
// Want parameter information.
};
- // context is the ability-level context of the initiator UIAbility.
- this.context.startAbilityForResult(want).then((data) => {
- if (data?.resultCode === RESULT_CODE) {
- // Parse the information returned by the callee UIAbility.
- let payResult = data.want?.parameters?.payResult;
- // ...
- }
- }).catch((err) => {
+ // context is the UIAbilityContext of the initiator UIAbility.
+ context.startAbilityForResult(want).then((data) => {
+ if (data?.resultCode === RESULT_CODE) {
+ // Parse the information returned by the target UIAbility.
+ let payResult = data.want?.parameters?.payResult;
// ...
+ }
+ }).catch((err) => {
+ // ...
})
```
+## Starting UIAbility with Window Mode Specified (for System Applications Only)
+
+By specifying the window mode when starting the UIAbility of an application, the application can be displayed in different window modes, which can be full-screen, floating window, or split-screen.
+
+In full-screen mode, an application occupies the entire screen after being started. Users cannot view other windows or applications. This mode is suitable for an application that requires users to focus on a specific task or UI.
+
+In floating window mode, an application is displayed on the screen as a floating window after being started. Users can easily switch to other windows or applications. The mode is suitable for an application that requires users to process multiple tasks at the same time.
+
+In split-screen mode, two applications occupy the entire screen, with one on the left or in the upper part of the screen and the other on the right or in the lower part. This mode helps users improve multi-task processing efficiency.
+
+The window mode is specified by the **windowMode** field in the [StartOptions](../reference/apis/js-apis-app-ability-startOptions.md) parameter of [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability).
+
+> **NOTE**
+>
+> 1. If the **windowMode** field is not specified, the UIAbility is started in the default window mode.
+> 2. To ensure that the application can be displayed in the required window mode, check the **supportWindowMode** field in the [abilities](../quick-start/module-configuration-file.md#abilities) tag in the [module.json5 file](../quick-start/module-configuration-file.md) of the UIAbility and make sure the specified window mode is supported.
+
+The following uses the floating window mode as an example to describe how to start the FuncAbility from the EntryAbility page.
+
+1. Add the [StartOptions](../reference/apis/js-apis-app-ability-startOptions.md) parameter in [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability).
+2. Set the **windowMode** field in the [StartOptions](../reference/apis/js-apis-app-ability-startOptions.md) parameter to **WINDOW_MODE_FLOATING**, indicating that the UIAbility will be displayed in a floating window.
+
+For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
+
+```ts
+import AbilityConstant from '@ohos.app.ability.AbilityConstant';
+
+let context = ...; // UIAbilityContext
+let wantInfo = {
+ deviceId: '', // An empty deviceId indicates the local device.
+ bundleName: 'com.example.myapplication',
+ abilityName: 'FuncAbility',
+ moduleName: 'module1', // moduleName is optional.
+ parameters: {// Custom information.
+ info: 'From the Index page of EntryAbility',
+ },
+}
+let options = {
+ windowMode: AbilityConstant.WindowMode.WINDOW_MODE_FLOATING
+}
+// context is the UIAbilityContext of the initiator UIAbility.
+context.startAbility(wantInfo, options).then(() => {
+ // ...
+}).catch((err) => {
+ // ...
+})
+```
+
+The display effect is shown below.
+
## Starting a Specified Page of UIAbility
@@ -315,10 +382,11 @@ A UIAbility component can have multiple pages. When it is started in different s
### Specifying a Startup Page
-When the caller UIAbility starts another UIAbility, it usually needs to redirect to a specified page. For example, FuncAbility contains two pages: Index (corresponding to the home page) and Second (corresponding to function A page). You can configure the specified page URL in the **want** parameter by adding a custom parameter to **parameters** in **want**. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
+When the initiator UIAbility starts another UIAbility, it usually needs to redirect to a specified page. For example, FuncAbility contains two pages: Index (corresponding to the home page) and Second (corresponding to function A page). You can configure the specified page URL in the **want** parameter by adding a custom parameter to **parameters** in **want**. For details about how to obtain the context, see [Obtaining the Context of UIAbility](uiability-usage.md#obtaining-the-context-of-uiability).
```ts
+let context = ...; // UIAbilityContext
let wantInfo = {
deviceId: '', // An empty deviceId indicates the local device.
bundleName: 'com.example.myapplication',
@@ -328,8 +396,8 @@ let wantInfo = {
router: 'funcA',
},
}
-// context is the ability-level context of the initiator UIAbility.
-this.context.startAbility(wantInfo).then(() => {
+// context is the UIAbilityContext of the initiator UIAbility.
+context.startAbility(wantInfo).then(() => {
// ...
}).catch((err) => {
// ...
@@ -347,25 +415,25 @@ import UIAbility from '@ohos.app.ability.UIAbility'
import Window from '@ohos.window'
export default class FuncAbility extends UIAbility {
- funcAbilityWant;
-
- onCreate(want, launchParam) {
- // Receive the parameters passed by the caller UIAbility.
- this.funcAbilityWant = want;
- }
-
- onWindowStageCreate(windowStage: Window.WindowStage) {
- // Main window is created. Set a main page for this ability.
- let url = 'pages/Index';
- if (this.funcAbilityWant?.parameters?.router) {
- if (this.funcAbilityWant.parameters.router === 'funA') {
- url = 'pages/Second';
- }
- }
- windowStage.loadContent(url, (err, data) => {
- // ...
- });
+ funcAbilityWant;
+
+ onCreate(want, launchParam) {
+ // Receive the parameters passed by the initiator UIAbility.
+ this.funcAbilityWant = want;
+ }
+
+ onWindowStageCreate(windowStage: Window.WindowStage) {
+ // Main window is created. Set a main page for this UIAbility.
+ let url = 'pages/Index';
+ if (this.funcAbilityWant?.parameters?.router) {
+ if (this.funcAbilityWant.parameters.router === 'funA') {
+ url = 'pages/Second';
+ }
}
+ windowStage.loadContent(url, (err, data) => {
+ // ...
+ });
+ }
}
```
@@ -384,11 +452,11 @@ In summary, when a UIAbility instance of application A has been created and the
import UIAbility from '@ohos.app.ability.UIAbility'
export default class FuncAbility extends UIAbility {
- onNewWant(want, launchParam) {
- // Receive the parameters passed by the caller UIAbility.
- globalThis.funcAbilityWant = want;
- // ...
- }
+ onNewWant(want, launchParam) {
+ // Receive the parameters passed by the initiator UIAbility.
+ globalThis.funcAbilityWant = want;
+ // ...
+ }
}
```
@@ -419,215 +487,200 @@ In summary, when a UIAbility instance of application A has been created and the
> **NOTE**
>
-> When the [launch type of the callee UIAbility](uiability-launch-type.md) is set to **standard**, a new instance is created each time the callee UIAbility is started. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback will not be invoked.
+> When the [launch type of the target UIAbility](uiability-launch-type.md) is set to **standard**, a new instance is created each time the target UIAbility is started. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback will not be invoked.
-## Using Ability Call to Implement UIAbility Interaction
+## Using Call to Implement UIAbility Interaction (for System Applications Only)
-This feature applies only to system applications. Ability call is an extension of the UIAbility capability. It enables the UIAbility to be invoked by and communicate with external systems. The UIAbility invoked can be either started in the foreground or created and run in the background. You can use the ability call to implement data sharing between two UIAbility instances (caller ability and callee ability) through IPC.
+Call is an extension of the UIAbility capability. It enables the UIAbility to be invoked by and communicate with external systems. The UIAbility invoked can be either started in the foreground or created and run in the background. You can use the call to implement data sharing between two UIAbility instances (CallerAbility and CalleeAbility) through IPC.
-The core API used for the ability call is **startAbilityByCall**, which differs from **startAbility** in the following ways:
+The core API used for the call is **startAbilityByCall**, which differs from **startAbility** in the following ways:
-- **startAbilityByCall** supports ability launch in the foreground and background, whereas **startAbility** supports ability launch in the foreground only.
+- **startAbilityByCall** supports UIAbility launch in the foreground and background, whereas **startAbility** supports UIAbility launch in the foreground only.
-- The caller ability can use the caller object returned by **startAbilityByCall** to communicate with the callee ability, but **startAbility** does not provide the communication capability.
+- The CallerAbility can use the caller object returned by **startAbilityByCall** to communicate with the CalleeAbility, but **startAbility** does not provide the communication capability.
-Ability call is usually used in the following scenarios:
+Call is usually used in the following scenarios:
-- Communicating with the callee ability
+- Communicating with the CalleeAbility
-- Starting the callee ability in the background
+- Starting the CalleeAbility in the background
-**Table 1** Terms used in the ability call
+**Table 1** Terms used in the call
| **Term**| Description|
| -------- | -------- |
-| CallerAbility | UIAbility that triggers the ability call.|
-| CalleeAbility | UIAbility invoked by the ability call.|
-| Caller | Object returned by **startAbilityByCall** and used by the caller ability to communicate with the callee ability.|
-| Callee | Object held by the callee ability to communicate with the caller ability.|
+| CallerAbility| UIAbility that triggers the call.|
+| CalleeAbility | UIAbility invoked by the call.|
+| Caller | Object returned by **startAbilityByCall** and used by the CallerAbility to communicate with the CalleeAbility.|
+| Callee | Object held by the CalleeAbility to communicate with the CallerAbility.|
-The following figure shows the ability call process.
+The following figure shows the call process.
-Figure 1 Ability call process
+ Figure 1 Call process
-
+ 
-- The caller ability uses **startAbilityByCall** to obtain a caller object and uses **call()** of the caller object to send data to the callee ability.
+- The CallerAbility uses **startAbilityByCall** to obtain a caller object and uses **call()** of the caller object to send data to the CalleeAbility.
-- The callee ability, which holds a **Callee** object, uses **on()** of the **Callee** object to register a callback. This callback is invoked when the callee ability receives data from the caller ability.
+- The CalleeAbility, which holds a **Callee** object, uses **on()** of the **Callee** object to register a callback. This callback is invoked when the CalleeAbility receives data from the CallerAbility.
> **NOTE**
-> 1. Currently, only system applications can use the ability call.
+> 1. Currently, only system applications can use the call.
>
-> 2. The launch type of the callee ability must be **singleton**.
+> 2. The launch type of the CalleeAbility must be **singleton**.
>
-> 3. Both local (intra-device) and cross-device ability calls are supported. The following describes how to initiate a local call. For details about how to initiate a cross-device ability call, see [Using Cross-Device Ability Call](hop-multi-device-collaboration.md#using-cross-device-ability-call).
+> 3. Both local (intra-device) and cross-device calls are supported. The following describes how to initiate a local call. For details about how to initiate a cross-device call, see [Using Cross-Device Call](hop-multi-device-collaboration.md#using-cross-device-call).
### Available APIs
-The following table describes the main APIs used for the ability call. For details, see [AbilityContext](../reference/apis/js-apis-app-ability-uiAbility.md#caller).
+The following table describes the main APIs used for the call. For details, see [AbilityContext](../reference/apis/js-apis-app-ability-uiAbility.md#caller).
-**Table 2** Ability call APIs
+ **Table 2** Call APIs
| API| Description|
| -------- | -------- |
| startAbilityByCall(want: Want): Promise<Caller> | Starts a UIAbility in the foreground (through the **want** configuration) or background (default) and obtains the caller object for communication with the UIAbility. For details, see [AbilityContext](../reference/apis/js-apis-inner-application-uiAbilityContext.md#abilitycontextstartabilitybycall) or [ServiceExtensionContext](../reference/apis/js-apis-inner-application-serviceExtensionContext.md#serviceextensioncontextstartabilitybycall).|
-| on(method: string, callback: CalleeCallBack): void | Callback invoked when the callee ability registers a method.|
-| off(method: string): void | Callback invoked when the callee ability deregisters a method.|
-| call(method: string, data: rpc.Sequenceable): Promise<void> | Sends agreed sequenceable data to the callee ability.|
-| callWithResult(method: string, data: rpc.Sequenceable): Promise<rpc.MessageParcel> | Sends agreed sequenceable data to the callee ability and obtains the agreed sequenceable data returned by the callee ability.|
+| on(method: string, callback: CalleeCallBack): void | Callback invoked when the CalleeAbility registers a method.|
+| off(method: string): void | Callback invoked when the CalleeAbility deregisters a method.|
+| call(method: string, data: rpc.Parcelable): Promise<void> | Sends agreed parcelable data to the CalleeAbility.|
+| callWithResult(method: string, data: rpc.Parcelable): Promise<rpc.MessageSequence> | Sends agreed parcelable data to the CalleeAbility and obtains the agreed parcelable data returned by the CalleeAbility.|
| release(): void | Releases the caller object.|
| on(type: "release", callback: OnReleaseCallback): void | Callback invoked when the caller object is released.|
-The implementation of using the ability call for UIAbility interaction involves two parts.
+The implementation of using the call for UIAbility interaction involves two parts.
-- [Creating a Callee Ability](#creating-a-callee-ability)
+- [Creating a CalleeAbility](#creating-a-calleeability)
-- [Accessing the Callee Ability](#accessing-the-callee-ability)
+- [Accessing the CalleeAbility](#accessing-the-calleeability)
-### Creating a Callee Ability
+### Creating a CalleeAbility
-For the callee ability, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener.
+For the CalleeAbility, implement the callback to receive data and the methods to marshal and unmarshal data. When data needs to be received, use **on()** to register a listener. When data does not need to be received, use **off()** to deregister the listener.
-1. Configure the ability launch type.
+1. Configure the launch type of the UIAbility.
+
+ For example, set the launch type of the CalleeAbility to **singleton**. For details, see [UIAbility Component Launch Type](uiability-launch-type.md).
- Set **launchType** of the callee ability to **singleton** in the **module.json5** file.
-
- | JSON Field| Description|
- | -------- | -------- |
- | "launchType" | Ability launch type. Set this parameter to **singleton**.|
-
- An example of the ability configuration is as follows:
-
- ```json
- "abilities":[{
- "name": ".CalleeAbility",
- "srcEntrance": "./ets/CalleeAbility/CalleeAbility.ts",
- "launchType": "singleton",
- "description": "$string:CalleeAbility_desc",
- "icon": "$media:icon",
- "label": "$string:CalleeAbility_label",
- "visible": true
- }]
- ```
-
2. Import the **UIAbility** module.
-
+
```ts
- import Ability from '@ohos.app.ability.UIAbility';
+ import UIAbility from '@ohos.app.ability.UIAbility';
```
-3. Define the agreed sequenceable data.
+3. Define the agreed parcelable data.
- The data formats sent and received by the caller and callee abilities must be consistent. In the following example, the data formats are number and string.
+ The data formats sent and received by the CallerAbility and CalleeAbility must be consistent. In the following example, the data formats are number and string.
+
```ts
- export default class MySequenceable {
- num: number = 0
- str: string = ""
+ export default class MyParcelable {
+ num: number = 0;
+ str: string = '';
- constructor(num, string) {
- this.num = num
- this.str = string
- }
+ constructor(num, string) {
+ this.num = num;
+ this.str = string;
+ }
- marshalling(messageParcel) {
- messageParcel.writeInt(this.num)
- messageParcel.writeString(this.str)
- return true
- }
+ marshalling(messageSequence) {
+ messageSequence.writeInt(this.num);
+ messageSequence.writeString(this.str);
+ return true
+ }
- unmarshalling(messageParcel) {
- this.num = messageParcel.readInt()
- this.str = messageParcel.readString()
- return true
- }
+ unmarshalling(messageSequence) {
+ this.num = messageSequence.readInt();
+ this.str = messageSequence.readString();
+ return true;
+ }
}
```
4. Implement **Callee.on** and **Callee.off**.
- The time to register a listener for the callee ability depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the ability and deregistered in **onDestroy**. After receiving sequenceable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The sample code is as follows:
+ The time to register a listener for the CalleeAbility depends on your application. The data sent and received before the listener is registered and that after the listener is deregistered are not processed. In the following example, the **MSG_SEND_METHOD** listener is registered in **onCreate** of the UIAbility and deregistered in **onDestroy**. After receiving parcelable data, the application processes the data and returns the data result. You need to implement processing based on service requirements. The sample code is as follows:
+
```ts
const TAG: string = '[CalleeAbility]';
const MSG_SEND_METHOD: string = 'CallSendMsg';
function sendMsgCallback(data) {
- console.info('CalleeSortFunc called');
+ console.info('CalleeSortFunc called');
- // Obtain the sequenceable data sent by the caller ability.
- let receivedData = new MySequenceable(0, '');
- data.readSequenceable(receivedData);
- console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`);
+ // Obtain the parcelable data sent by the CallerAbility.
+ let receivedData = new MyParcelable(0, '');
+ data.readParcelable(receivedData);
+ console.info(`receiveData[${receivedData.num}, ${receivedData.str}]`);
- // Process the data.
- // Return the sequenceable data result to the caller ability.
- return new MySequenceable(receivedData.num + 1, `send ${receivedData.str} succeed`);
+ // Process the data.
+ // Return the parcelable data result to the CallerAbility.
+ return new MyParcelable(receivedData.num + 1, `send ${receivedData.str} succeed`);
}
- export default class CalleeAbility extends Ability {
- onCreate(want, launchParam) {
- try {
- this.callee.on(MSG_SEND_METHOD, sendMsgCallback);
- } catch (error) {
- console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`);
- }
+ export default class CalleeAbility extends UIAbility {
+ onCreate(want, launchParam) {
+ try {
+ this.callee.on(MSG_SEND_METHOD, sendMsgCallback);
+ } catch (error) {
+ console.info(`${MSG_SEND_METHOD} register failed with error ${JSON.stringify(error)}`);
}
+ }
- onDestroy() {
- try {
- this.callee.off(MSG_SEND_METHOD);
- } catch (error) {
- console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`);
- }
+ onDestroy() {
+ try {
+ this.callee.off(MSG_SEND_METHOD);
+ } catch (error) {
+ console.error(TAG, `${MSG_SEND_METHOD} unregister failed with error ${JSON.stringify(error)}`);
}
+ }
}
```
-### Accessing the Callee Ability
+### Accessing the CalleeAbility
1. Import the **UIAbility** module.
```ts
- import Ability from '@ohos.app.ability.UIAbility';
+ import UIAbility from '@ohos.app.ability.UIAbility';
```
2. Obtain the caller interface.
-
- The **context** attribute of the ability implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **context** attribute of the ability, uses **startAbilityByCall** to start the callee ability, obtain the caller object, and register the **onRelease** listener of the caller ability. You need to implement processing based on service requirements.
+ The **UIAbilityContext** attribute implements **startAbilityByCall** to obtain the caller object for communication. The following example uses **this.context** to obtain the **UIAbilityContext**, uses **startAbilityByCall** to start the CalleeAbility, obtain the caller object, and register the **onRelease** listener of the CallerAbility. You need to implement processing based on service requirements.
+
+
```ts
- // Register the onRelease() listener of the caller ability.
+ // Register the onRelease() listener of the CallerAbility.
private regOnRelease(caller) {
- try {
- caller.on("release", (msg) => {
- console.info(`caller onRelease is called ${msg}`);
- })
- console.info('caller register OnRelease succeed');
- } catch (error) {
- console.info(`caller register OnRelease failed with ${error}`);
- }
+ try {
+ caller.on('release', (msg) => {
+ console.info(`caller onRelease is called ${msg}`);
+ })
+ console.info('caller register OnRelease succeed');
+ } catch (error) {
+ console.info(`caller register OnRelease failed with ${error}`);
+ }
}
async onButtonGetCaller() {
- try {
- this.caller = await context.startAbilityByCall({
- bundleName: 'com.samples.CallApplication',
- abilityName: 'CalleeAbility'
- })
- if (this.caller === undefined) {
- console.info('get caller failed')
- return
- }
- console.info('get caller success')
- this.regOnRelease(this.caller)
- } catch (error) {
- console.info(`get caller failed with ${error}`)
+ try {
+ this.caller = await context.startAbilityByCall({
+ bundleName: 'com.samples.CallApplication',
+ abilityName: 'CalleeAbility'
+ })
+ if (this.caller === undefined) {
+ console.info('get caller failed')
+ return
}
+ console.info('get caller success')
+ this.regOnRelease(this.caller)
+ } catch (error) {
+ console.info(`get caller failed with ${error}`)
+ }
}
```
diff --git a/en/application-dev/application-models/uiability-launch-type.md b/en/application-dev/application-models/uiability-launch-type.md
index 70c212ed46e769dbdf4e0c1fd347403c463f6004..6442ffc4359254bbed7d7da85ec4b753f0f3eea4 100644
--- a/en/application-dev/application-models/uiability-launch-type.md
+++ b/en/application-dev/application-models/uiability-launch-type.md
@@ -19,7 +19,7 @@ Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbility
**Figure 1** Demonstration effect in singleton mode
-
+
> **NOTE**
>
@@ -47,9 +47,9 @@ To use the singleton mode, set **launchType** in the [module.json5 configuration
In standard mode, each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, a new UIAbility instance of this type is created in the application process. Multiple UIAbility instances of this type are displayed in **Recents**.
-**Figure 2** Demonstration effect in standard mode
+**Figure 2** Demonstration effect in standard mode
-
+
To use the standard mode, set **launchType** in the [module.json5 configuration file](../quick-start/module-configuration-file.md) to **standard**.
@@ -75,12 +75,12 @@ The **specified** mode is used in some special scenarios. For example, in a docu
**Figure 3** Demonstration effect in specified mode
-
+
-For example, there are EntryAbility and SpecifiedAbility, and the launch type of SpecifiedAbility is set to **specified**. You are required to start SpecifiedAbility from EntryAbility.
+For example, there are two UIAbility components: EntryAbility and SpecifiedAbility (with the launch type **specified**). You are required to start SpecifiedAbility from EntryAbility.
+
+1. In SpecifiedAbility, set the **launchType** field in the [module.json5 file](../quick-start/module-configuration-file.md) to **specified**.
-1. In SpecifiedAbility, set the **launchType** field in the [module.json5 configuration file](../quick-start/module-configuration-file.md) to **specified**.
-
```json
{
"module": {
@@ -95,9 +95,8 @@ For example, there are EntryAbility and SpecifiedAbility, and the launch type of
}
```
-2. Before a UIAbility instance is created, you can create a unique string key for the instance. The key is bound to the UIAbility instance when it is created. Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, the application is asked which UIAbility instance is used to respond to the [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) request.
- In EntryAbility, add a custom parameter, for example, **instanceKey**, to the [want](want-overview.md) parameter in [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to distinguish the UIAbility instances.
-
+2. Create a unique string key for the instance. Each time [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called, the application, based on the key, identifies the UIAbility instance used to respond to the request. In EntryAbility, add a custom parameter, for example, **instanceKey**, to the **want** parameter in [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) to distinguish the UIAbility instance.
+
```ts
// Configure an independent key for each UIAbility instance.
// For example, in the document usage scenario, use the document path as the key.
@@ -114,17 +113,18 @@ For example, there are EntryAbility and SpecifiedAbility, and the launch type of
instanceKey: getInstance(),
},
}
- // context is the ability-level context of the initiator UIAbility.
+ // context is the UIAbilityContext of the initiator UIAbility.
this.context.startAbility(want).then(() => {
// ...
}).catch((err) => {
// ...
})
```
-
-3. During running, the internal service of UIAbility determines whether to create multiple instances. If the key is matched, the UIAbility instance bound to the key is started. Otherwise, a new UIAbility instance is created.
- The launch type of SpecifiedAbility is set to **specified**. Before SpecifiedAbility is started, the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback of the corresponding AbilityStage instance is invoked to parse the input **want** parameter and obtain the custom parameter **instanceKey**. A string key identifier is returned through the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback of the AbilityStage instance. [If the returned key corresponds to a started UIAbility instance](mission-management-launch-type.md#fig14520125175314), that UIAbility instance is switched to the foreground and gains focus again. Otherwise, a new instance is created and started.
-
+
+3. Before SpecifiedAbility is started, the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback of the corresponding AbilityStage instance is invoked to obtain the key of the UIAbility, because the launch type of SpecifiedAbility is set to **specified**. If a UIAbility instance matching the key exists, the system starts the UIAbility instance and invokes its [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback. Otherwise, the system creates a new UIAbility instance and invokes its [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) and [onWindowStageCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonwindowstagecreate) callbacks.
+
+ In the sample code, the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback parses the **want** parameter to obtain the custom parameter **instanceKey**. The service logic returns a key string based on **instanceKey** parameter to identify the UIAbility instance. If the returned key maps to a started UIAbility instance, the system pulls the UIAbility instance back to the foreground and obtains the focus. If the returned key does not map to a started UIAbility instance, the system creates a new UIAbility instance and starts it.
+
```ts
import AbilityStage from '@ohos.app.ability.AbilityStage';
@@ -133,7 +133,7 @@ For example, there are EntryAbility and SpecifiedAbility, and the launch type of
// In the AbilityStage instance of the callee, a key value corresponding to a UIAbility instance is returned for UIAbility whose launch type is specified.
// In this example, SpecifiedAbility of module1 is returned.
if (want.abilityName === 'SpecifiedAbility') {
- // The returned string key is a custom string.
+ // The returned key string is a custom string.
return `SpecifiedAbilityInstance_${want.parameters.instanceKey}`;
}
@@ -141,22 +141,17 @@ For example, there are EntryAbility and SpecifiedAbility, and the launch type of
}
}
```
-
+
> **NOTE**
>
> 1. Assume that the application already has a UIAbility instance created, and the launch type of the UIAbility instance is set to **specified**. If [startAbility()](../reference/apis/js-apis-inner-application-uiAbilityContext.md#uiabilitycontextstartability) is called again to start the UIAbility instance, and the [onAcceptWant()](../reference/apis/js-apis-app-ability-abilityStage.md#abilitystageonacceptwant) callback of [AbilityStage](../reference/apis/js-apis-app-ability-abilityStage.md) matches a created UIAbility instance, the original UIAbility instance is started, and no new UIAbility instance is created. In this case, the [onNewWant()](../reference/apis/js-apis-app-ability-uiAbility.md#abilityonnewwant) callback is invoked, but the [onCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityoncreate) and [onWindowStageCreate()](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonwindowstagecreate) callbacks are not.
> 2. AbilityStage is not automatically generated in the default project of DevEco Studio. For details about how to create an AbilityStage file, see [AbilityStage Component Container](abilitystage.md).
-
- For example, in the document application, different key values are bound to different document instances. Each time a document is created, a new key value (for example, file path) is passed, and a new UIAbility instance is created when UIAbility is started in AbilityStage. However, when you open an existing document, the same UIAbility instance is started again in AbilityStage.
-
-The following steps are used as an example.
+ For example, in the document application, different keys are bound to different document instances. Each time a document is created, a new key (for example, file path) is passed, and a new UIAbility instance is created when UIAbility is started in AbilityStage. However, when you open an existing document, the same UIAbility instance is started again in AbilityStage.
+
+ The following steps are used as an example.
+
1. Open file A. A UIAbility instance, for example, UIAbility instance 1, is started.
-
2. Close the process of file A in **Recents**. UIAbility instance 1 is destroyed. Return to the home screen and open file A again. A new UIAbility instance is started, for example, UIAbility instance 2.
-
3. Return to the home screen and open file B. A new UIAbility instance is started, for example, UIAbility instance 3.
-
- 4. Return to the home screen and open file A again. UIAbility instance 2 is started.
-
-
\ No newline at end of file
+ 4. Return to the home screen and open file A again. UIAbility instance 2 is started. This is because the system automatically matches the key of the UIAbility instance and starts the UIAbility instance that has a matching key. In this example, UIAbility instance 2 has the same key as file A. Therefore, the system pulls back UIAbility instance 2 and focuses it without creating a new instance.
diff --git a/en/application-dev/application-models/uiability-overview.md b/en/application-dev/application-models/uiability-overview.md
index 14cb5c4652749c97dd6e50c4232b6f65fb6feaab..cd059b9555bfd80c02c9bce66f4c50cd58fff568 100644
--- a/en/application-dev/application-models/uiability-overview.md
+++ b/en/application-dev/application-models/uiability-overview.md
@@ -3,10 +3,27 @@
## Overview
-UIAbility has the UI and is mainly used for user interaction.
+UIAbility is a type of application component that provides the UI for user interaction.
-UIAbility is the basic unit scheduled by the system and provides a window for applications to draw UIs. A UIAbility component can implement a functional module through multiple pages. Each UIAbility component instance corresponds to a mission in **Recents**.
+The following design philosophy is behind UIAbility:
+1. Native support for [cross-device migration](hop-cross-device-migration.md) and [multi-device collaboration](hop-multi-device-collaboration.md) at the application component level
+
+2. Support for multiple device types and window forms
+
+For details, see [Interpretation of the Application Model](application-model-description.md).
+
+The UIAbility division principles and suggestions are as follows:
+
+UIAbility is the basic unit scheduled by the system and provides a window for applications to draw UIs. An application can contain one or more UIAbility components. For example, for a payment application, you can use two UIAbility components to carry the entry and payment functionalities.
+
+Each UIAbility component instance is displayed as a mission in Recents.
+
+You can develop a single UIAbility or multiple UIAbilities for your application based on service requirements.
+
+- If you want only one mission to be displayed in Recents, use one UIAbility and multiple pages.
+
+- If you want multiple missions to be displayed in Recents or multiple windows to be opened simultaneously, use multiple UIAbilities.
## Privacy Statement Configuration
@@ -32,8 +49,3 @@ To enable an application to properly use a UIAbility component, declare the UIAb
}
}
```
-
-> **NOTE**
->
-> For the ability composition, see [Adding an Ability to a Module](https://developer.harmonyos.com/en/docs/documentation/doc-guides-V3/ohos-adding-ability-0000001218280664-V3).
-
diff --git a/en/application-dev/application-models/windowextensionability.md b/en/application-dev/application-models/windowextensionability.md
new file mode 100644
index 0000000000000000000000000000000000000000..069897ad02435070ac470f5d2d3d528c76b417e8
--- /dev/null
+++ b/en/application-dev/application-models/windowextensionability.md
@@ -0,0 +1,112 @@
+# WindowExtensionAbility
+
+[WindowExtensionAbility](../reference/apis/js-apis-application-windowExtensionAbility.md) is a type of ExtensionAbility component that allows a system application to be embedded in and displayed over another application.
+
+
+The WindowExtensionAbility component must be used together with the [AbilityComponent](../reference/arkui-ts/ts-container-ability-component.md) to process services of the started application. WindowExtensionAbility is run in connection mode. A system application must use the AbilityComponent to start the WindowExtensionAbility component.
+
+Each ExtensionAbility has its own context. For WindowExtensionAbility,
+the context is [WindowExtensionContext](../reference/apis/js-apis-inner-application-windowExtensionContext.md).
+
+> **NOTE**
+>
+> **WindowExtensionAbility** is a system API. To embed a third-party application in another application and display it over the application, switch to the full SDK by following the instructions provided in [Guide to Switching to Full SDK](../../application-dev/quick-start/full-sdk-switch-guide.md).
+>
+
+
+## Setting an Embedded Ability (for System Applications Only)
+
+The **WindowExtensionAbility** class provides **onConnect()**, **onDisconnect()**, and **onWindowReady()** lifecycle callbacks, which can be overridden.
+
+- The **onWindowReady()** callback is invoked when a window is created for the ability.
+
+- The **onConnect()** callback is invoked when the AbilityComponent corresponding to the window connects to the ability.
+
+- The **onDisconnect()** callback is invoked when the AbilityComponent disconnects from the ability.
+
+
+**How to Develop**
+
+To implement an embedded application, manually create a WindowExtensionAbility in DevEco Studio as follows:
+
+1. In the **ets** directory of the **Module** project, right-click and choose **New > Directory** to create a directory named **WindowExtAbility**.
+
+2. Right-click the **WindowExtAbility** directory, and choose **New > TypeScript File** to create a file named **WindowExtAbility.ts**.
+
+3. Open the **WindowExtAbility.ts** file and import the dependency package of **WindowExtensionAbility**. Customize a class that inherits from **WindowExtensionAbility** and implement the **onWindowReady()**, **onConnect()**, and **onDisconnect()** lifecycle callbacks.
+
+ ```ts
+ import Extension from '@ohos.application.WindowExtensionAbility'
+
+ export default class WindowExtAbility extends Extension {
+ onWindowReady(window) {
+ window.loadContent('WindowExtAbility/pages/index1').then(() => {
+ window.getProperties().then((pro) => {
+ console.log("WindowExtension " + JSON.stringify(pro));
+ })
+ window.show();
+ })
+ }
+
+ onConnect(want) {
+ console.info('JSWindowExtension onConnect ' + want.abilityName);
+ }
+
+ onDisconnect(want) {
+ console.info('JSWindowExtension onDisconnect ' + want.abilityName);
+ }
+ }
+ ```
+
+4. Register the WindowExtensionAbility in the [module.json5 file](../quick-start/module-configuration-file.md) corresponding to the **Module** project. Set **type** to **"window"** and **srcEntrance** to the code path of the ExtensionAbility component.
+
+ ```json
+ {
+ "module": {
+ "extensionAbilities": [
+ {
+ "name": "WindowExtAbility",
+ "srcEntrance": "./ets/WindowExtAbility/WindowExtAbility.ts",
+ "icon": "$media:icon",
+ "description": "WindowExtension",
+ "type": "window",
+ "visible": true,
+ }
+ ],
+ }
+ }
+ ```
+
+
+## Starting an Embedded Ability (for System Applications Only)
+
+System applications can load the created WindowExtensionAbility through the AbilityComponent.
+
+**How to Develop**
+
+1. To connect to an embedded application, add the AbilityComponent to the corresponding pages in the DevEco Studio project.
+
+2. Set **bundleName** and **abilityName** in the AbilityComponent.
+
+3. Set the width and height. The sample code is as follows:
+
+```ts
+@Entry
+@Component
+struct Index {
+ @State message: string = 'Hello World'
+
+ build() {
+ Row() {
+ Column() {
+ AbilityComponent({ abilityName: "WindowExtAbility", bundleName: "com.example.WindowExtAbility"})
+ .width(500)
+ .height(500)
+ }
+ .width('100%')
+ }
+ .height('100%')
+ .backgroundColor(0x64BB5c)
+ }
+}
+```
diff --git a/en/application-dev/application-test/arkxtest-guidelines.md b/en/application-dev/application-test/arkxtest-guidelines.md
index bd82cae45fb4c673f014bcc13cfc02beb3853a2e..64edba5e9f4d4ebbd6b7bfbff44c4b01c8a67d4d 100644
--- a/en/application-dev/application-test/arkxtest-guidelines.md
+++ b/en/application-dev/application-test/arkxtest-guidelines.md
@@ -108,7 +108,7 @@ You write a UI test script based on the unit test framework, adding the invoking
In this example, the UI test script is written based on the preceding unit test script. First, add the dependency package, as shown below:
```js
-import {UiDriver,BY,UiComponent,MatchPattern} from '@ohos.uitest'
+import {Driver,ON,Component,MatchPattern} from '@ohos.uitest'
```
Then, write specific test code. Specifically, implement the click action on the started application page and add checkpoint check cases.
@@ -131,16 +131,16 @@ export default function abilityTest() {
expect(Ability.context.abilityInfo.name).assertEqual('EntryAbility');
})
//ui test code
- //init uidriver
- var driver = await UiDriver.create();
+ //init driver
+ var driver = await Driver.create();
await driver.delayMs(1000);
- //find button by text 'Next'
- var button = await driver.findComponent(BY.text('Next'));
+ //find button on text 'Next'
+ var button = await driver.findComponent(ON.text('Next'));
//click button
await button.click();
await driver.delayMs(1000);
//check text
- await driver.assertComponentExist(BY.text('after click'));
+ await driver.assertComponentExist(ON.text('after click'));
await driver.pressBack();
done();
})
@@ -195,14 +195,15 @@ The framework supports multiple test case execution modes, which are triggered b
| itName | Test case to be executed. | {itName} | -s itName testAttributeIt |
| timeout | Timeout interval for executing a test case. | Positive integer (unit: ms). If no value is set, the default value 5000 is used. | -s timeout 15000 |
| breakOnError | Whether to enable break-on-error mode. When this mode is enabled, the test execution process exits if a test assertion error or any other error occurs.| **true**/**false** (default value) | -s breakOnError true |
+| random | Whether to execute test cases in random sequence.| **true**/**false** (default value) | -s random true |
| testType | Type of the test case to be executed. | function, performance, power, reliability, security, global, compatibility, user, standard, safety, resilience| -s testType function |
| level | Level of the test case to be executed. | 0, 1, 2, 3, 4 | -s level 0 |
-| size | Size of the test case to be executed. | small, medium, large | -s size small |
+| size | Size of the test case to be executed. | small, medium, large | -s size small |
| stress | Number of times that the test case is executed. | Positive integer | -s stress 1000 |
**Running Commands**
-> Configure hdc-related environment variables, and then perform the following:
+> Before running commands in the CLI, make sure hdc-related environment variables have been configured.
- Open the CLI.
- Run the **aa test** commands.
diff --git a/en/application-dev/application-test/figures/Execute.PNG b/en/application-dev/application-test/figures/Execute.PNG
index ba96bdfdaf430249f3506153a45c6fe439eda5cc..0260b7983a13851dc1ef8e45928f952eb509a7d8 100644
Binary files a/en/application-dev/application-test/figures/Execute.PNG and b/en/application-dev/application-test/figures/Execute.PNG differ
diff --git a/en/application-dev/connectivity/Readme-EN.md b/en/application-dev/connectivity/Readme-EN.md
index 578e2a3c56c8a1f6cce377eb39ef9a7756d74491..7176cb8fb438cbe8beec5b36bdd290c0b01bbd1f 100755
--- a/en/application-dev/connectivity/Readme-EN.md
+++ b/en/application-dev/connectivity/Readme-EN.md
@@ -5,6 +5,10 @@
- [HTTP Data Request](http-request.md)
- [WebSocket Connection](websocket-connection.md)
- [Socket Connection](socket-connection.md)
+ - [Network Policy Management](net-policy-management.md)
+ - [Network Sharing](net-sharing.md)
+ - [Ethernet Connection](net-ethernet.md)
+ - [Network Connection Management](net-connection-manager.md)
- IPC & RPC
- [IPC & RPC Overview](ipc-rpc-overview.md)
- [IPC & RPC Development](ipc-rpc-development-guideline.md)
diff --git a/en/application-dev/connectivity/http-request.md b/en/application-dev/connectivity/http-request.md
index da1a7e1c517f284037a41a88e2167b6d1d2406aa..39ada2bc9b21b8e5d157806f5164c02219c65296 100644
--- a/en/application-dev/connectivity/http-request.md
+++ b/en/application-dev/connectivity/http-request.md
@@ -1,6 +1,6 @@
# HTTP Data Request
-## Use Cases
+## When to Use
An application can initiate a data request over HTTP. Common HTTP methods include **GET**, **POST**, **OPTIONS**, **HEAD**, **PUT**, **DELETE**, **TRACE**, and **CONNECT**.
@@ -14,40 +14,49 @@ For details about how to apply for permissions, see [Access Control Development]
The following table provides only a simple description of the related APIs. For details, see [API Reference](../reference/apis/js-apis-http.md).
-| API | Description |
-| ----------------------------------------- | --------------------------------------------------------- |
-| createHttp() | Creates an HTTP request. |
-| request() | Initiates an HTTP request to a given URL. |
-| destroy() | Destroys an HTTP request. |
+| API | Description |
+| ----------------------------------------- | ----------------------------------- |
+| createHttp() | Creates an HTTP request. |
+| request() | Initiates an HTTP request to a given URL. |
+| request2()10+ | Initiates an HTTP network request based on the URL and returns a streaming response.|
+| destroy() | Destroys an HTTP request. |
| on(type: 'headersReceive') | Registers an observer for HTTP Response Header events. |
-| off(type: 'headersReceive') | Unregisters the observer for HTTP Response Header events. |
+| off(type: 'headersReceive') | Unregisters the observer for HTTP Response Header events.|
+| once\('headersReceive'\)8+ | Registers a one-time observer for HTTP Response Header events.|
+| on\('dataReceive'\)10+ | Registers an observer for events indicating receiving of HTTP streaming responses. |
+| off\('dataReceive'\)10+ | Unregisters the observer for events indicating receiving of HTTP streaming responses. |
+| on\('dataEnd'\)10+ | Registers an observer for events indicating completion of receiving HTTP streaming responses. |
+| off\('dataEnd'\)10+ | Unregisters the observer for events indicating completion of receiving HTTP streaming responses.|
+| on\('dataProgress'\)10+ | Registers an observer for events indicating progress of receiving HTTP streaming responses. |
+| off\('dataProgress'\)10+ | Unregisters the observer for events indicating progress of receiving HTTP streaming responses.|
-## How to Develop
+## How to Develop request APIs
-1. Import the required HTTP module.
-2. Create an **HttpRequest** object.
-3. (Optional) Listen for HTTP Response Header events.
-4. Initiate an HTTP request to a given URL.
-5. (Optional) Process the HTTP Response Header event and the return result of the HTTP request.
+1. Import the **http** namespace from **@ohos.net.http.d.ts**.
+2. Call **createHttp()** to create an **HttpRequest** object.
+3. Call **httpRequest.on()** to subscribe to HTTP response header events. This API returns a response earlier than the request. You can subscribe to HTTP response header events based on service requirements.
+4. Call **httpRequest.request()** to initiate a network request. You need to pass in the URL and optional parameters of the HTTP request.
+5. Parse the returned result based on service requirements.
+6. Call **off()** to unsubscribe from HTTP response header events.
+7. Call **httpRequest.destroy()** to release resources after the request is processed.
```js
+// Import the http namespace.
import http from '@ohos.net.http';
-// Each HttpRequest corresponds to an HttpRequestTask object and cannot be reused.
+// Each httpRequest corresponds to an HTTP request task and cannot be reused.
let httpRequest = http.createHttp();
-
-// Subscribe to the HTTP response header, which is returned earlier than HttpRequest. You can subscribe to HTTP Response Header events based on service requirements.
-// on('headerReceive', AsyncCallback) will be replaced by on('headersReceive', Callback) in API version 8. 8+
+// This API is used to listen for the HTTP Response Header event, which is returned earlier than the result of the HTTP request. It is up to you whether to listen for HTTP Response Header events.
+// on('headerReceive', AsyncCallback) is replaced by on('headersReceive', Callback) since API version 8.
httpRequest.on('headersReceive', (header) => {
console.info('header: ' + JSON.stringify(header));
});
-
httpRequest.request(
- // Set the URL of the HTTP request. You need to define the URL. Set the parameters of the request in extraData.
+ // Customize EXAMPLE_URL in extraData on your own. It is up to you whether to add parameters to the URL.
"EXAMPLE_URL",
{
method: http.RequestMethod.POST, // Optional. The default value is http.RequestMethod.GET.
- // You can add the header field based on service requirements.
+ // You can add header fields based on service requirements.
header: {
'Content-Type': 'application/json'
},
@@ -55,21 +64,105 @@ httpRequest.request(
extraData: {
"data": "data to send",
},
- connectTimeout: 60000, // Optional. The default value is 60000, in ms.
+ expectDataType: http.HttpDataType.STRING, // Optional. This field specifies the type of the return data.
+ usingCache: true, // Optional. The default value is true.
+ priority: 1, // Optional. The default value is 1.
+ connectTimeout: 60000 // Optional. The default value is 60000, in ms.
readTimeout: 60000, // Optional. The default value is 60000, in ms.
+ usingProtocol: http.HttpProtocol.HTTP1_1, // Optional. The default protocol type is automatically specified by the system.
+ usingProxy: false, // Optional. By default, network proxy is not used. This field is supported since API 10.
}, (err, data) => {
if (!err) {
- // data.result contains the HTTP response. Parse the response based on service requirements.
- console.info('Result:' + data.result);
- console.info('code:' + data.responseCode);
- // data.header contains the HTTP response header. Parse the content based on service requirements.
+ // data.result carries the HTTP response. Parse the response based on service requirements.
+ console.info('Result:' + JSON.stringify(data.result));
+ console.info('code:' + JSON.stringify(data.responseCode));
+ // data.header carries the HTTP response header. Parse the content based on service requirements.
console.info('header:' + JSON.stringify(data.header));
- console.info('cookies:' + data.cookies); // 8+
+ console.info('cookies:' + JSON.stringify(data.cookies)); // 8+
} else {
console.info('error:' + JSON.stringify(err));
- // Call the destroy() method to destroy the request if it is no longer needed.
+ // Unsubscribe from HTTP Response Header events.
+ httpRequest.off('headersReceive');
+ // Call the destroy() method to release resources after HttpRequest is complete.
httpRequest.destroy();
}
}
);
```
+
+## How to Develop request2 APIs
+
+1. Import the **http** namespace from **@ohos.net.http.d.ts**.
+2. Call **createHttp()** to create an **HttpRequest** object.
+3. Depending on your need, call **on()** of the **HttpRequest** object to subscribe to HTTP response header events as well as events indicating receiving of HTTP streaming responses, progress of receiving HTTP streaming responses, and completion of receiving HTTP streaming responses.
+4. Call **request2()** to initiate a network request. You need to pass in the URL and optional parameters of the HTTP request.
+5. Parse the returned response code as needed.
+6. Call **off()** of the **HttpRequest** object to unsubscribe from the related events.
+7. Call **httpRequest.destroy()** to release resources after the request is processed.
+
+```js
+// Import the http namespace.
+import http from '@ohos.net.http'
+
+// Each httpRequest corresponds to an HTTP request task and cannot be reused.
+let httpRequest = http.createHttp();
+// Subscribe to HTTP response header events.
+httpRequest.on('headersReceive', (header) => {
+ console.info('header: ' + JSON.stringify(header));
+});
+// Subscribe to events indicating receiving of HTTP streaming responses.
+let res = '';
+httpRequest.on('dataReceive', (data) => {
+ res += data;
+ console.info('res: ' + res);
+});
+// Subscribe to events indicating completion of receiving HTTP streaming responses.
+httpRequest.on('dataEnd', () => {
+ console.info('No more data in response, data receive end');
+});
+// Subscribe to events indicating progress of receiving HTTP streaming responses.
+httpRequest.on('dataProgress', (data) => {
+ console.log("dataProgress receiveSize:" + data.receiveSize+ ", totalSize:" + data.totalSize);
+});
+
+httpRequest.request2(
+ // Customize EXAMPLE_URL in extraData on your own. It is up to you whether to add parameters to the URL.
+ "EXAMPLE_URL",
+ {
+ method: http.RequestMethod.POST, // Optional. The default value is http.RequestMethod.GET.
+ // You can add header fields based on service requirements.
+ header: {
+ 'Content-Type': 'application/json'
+ },
+ // This field is used to transfer data when the POST request is used.
+ extraData: {
+ "data": "data to send",
+ },
+ expectDataType: http.HttpDataType.STRING, // Optional. This field specifies the type of the return data.
+ usingCache: true, // Optional. The default value is true.
+ priority: 1, // Optional. The default value is 1.
+ connectTimeout: 60000 // Optional. The default value is 60000, in ms.
+ readTimeout: 60000, // Optional. The default value is 60000, in ms. If a large amount of data needs to be transmitted, you are advised to set this parameter to a larger value to ensure normal data transmission.
+ usingProtocol: http.HttpProtocol.HTTP1_1, // Optional. The default protocol type is automatically specified by the system.
+ }, (err, data) => {
+ console.info('error:' + JSON.stringify(err));
+ console.info('ResponseCode :' + JSON.stringify(data));
+ // Unsubscribe from HTTP Response Header events.
+ httpRequest.off('headersReceive');
+ // Unregister the observer for events indicating receiving of HTTP streaming responses.
+ httpRequest.off('dataReceive');
+ // Unregister the observer for events indicating progress of receiving HTTP streaming responses.
+ httpRequest.off('dataProgress');
+ // Unregister the observer for events indicating completion of receiving HTTP streaming responses.
+ httpRequest.off('dataEnd');
+ // Call the destroy() method to release resources after HttpRequest is complete.
+ httpRequest.destroy();
+ }
+);
+
+```
+
+## Samples
+The following sample is provided to help you better understand how to develop the HTTP data request feature:
+- [HTTP Data Request (ArkTS) (API9)](https://gitee.com/openharmony/applications_app_samples/tree/master/code/BasicFeature/Connectivity/Http)
+- [HTTP Communication (ArkTS) (API9)](https://gitee.com/openharmony/codelabs/tree/master/NetworkManagement/SmartChatEtsOH)
diff --git a/en/application-dev/connectivity/ipc-rpc-development-guideline.md b/en/application-dev/connectivity/ipc-rpc-development-guideline.md
index 5512d7a016754c94174fe269d5ed58424a218fb6..89bff0d4a168c74309f6bc711a3725fd4c9aad1b 100644
--- a/en/application-dev/connectivity/ipc-rpc-development-guideline.md
+++ b/en/application-dev/connectivity/ipc-rpc-development-guideline.md
@@ -1,4 +1,4 @@
-# IPC & RPC Development Guidelines
+# IPC & RPC Development
## When to Use
diff --git a/en/application-dev/connectivity/net-connection-manager.md b/en/application-dev/connectivity/net-connection-manager.md
new file mode 100644
index 0000000000000000000000000000000000000000..1eddb3b5bbe47cb4d02123986647955d0492629e
--- /dev/null
+++ b/en/application-dev/connectivity/net-connection-manager.md
@@ -0,0 +1,246 @@
+# Network Connection Management
+
+## Introduction
+The Network Connection Management module provides basic network management capabilities, including management of Wi-Fi/cellular/Ethernet connection priorities, network quality evaluation, subscription to network connection status changes, query of network connection information, and DNS resolution.
+
+> **NOTE**
+> To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-connection.md).
+
+## Basic Concepts
+- Producer: a provider of data networks, such as Wi-Fi, cellular, and Ethernet.
+- Consumer: a user of data networks, for example, an application or a system service.
+- Network probe: a mechanism used to detect the network availability to prevent the switch from an available network to an unavailable network. The probe type can be binding network detection, DNS detection, HTTP detection, or HTTPS detection.
+- Network selection: a mechanism used to select the optimal network when multiple networks coexist. It is triggered when the network status, network information, or network quality evaluation score changes.
+
+## **Constraints**
+- Programming language: C++ and JS
+- System: Linux kernel
+- The initial APIs of this module are supported since API version 8. Newly added APIs will be marked with a superscript to indicate their earliest API version.
+
+## When to Use
+Typical application scenarios of network connection management are as follows:
+- Subscribing to status changes of the specified network
+- Obtaining the list of all registered networks
+- Querying network connection information based on the data network
+- Resolving the domain name of a network to obtain all IP addresses
+
+The following describes the development procedure specific to each application scenario.
+## Available APIs
+For the complete list of APIs and example code, see [Network Connection Management](../reference/apis/js-apis-net-connection.md).
+
+| Type| API| Description|
+| ---- | ---- | ---- |
+| ohos.net.connection | function getDefaultNet(callback: AsyncCallback\): void; |Creates a **NetHandle** object that contains the **netId** of the default network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getGlobalHttpProxy10+(callback: AsyncCallback\): void;| Obtains the global HTTP proxy for the network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function setGlobalHttpProxy10+(httpProxy: HttpProxy, callback: AsyncCallback): void;| Sets the global HTTP proxy for the network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getAppNet9+(callback: AsyncCallback\): void;| Obtains a **NetHandle** object that contains the **netId** of the network bound to the application. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function setAppNet9+(netHandle: NetHandle, callback: AsyncCallback\): void;| Binds an application to the specified network. The application can access the external network only through this network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getDefaultNetSync9+(): NetHandle; |Obtains the default active data network in synchronous mode. You can use **getNetCapabilities** to obtain information such as the network type and capabilities.|
+| ohos.net.connection | function hasDefaultNet(callback: AsyncCallback\): void; |Checks whether the default network is available. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getAllNets(callback: AsyncCallback\>): void;| Obtains the list of **NetHandle** objects of the connected network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getConnectionProperties(netHandle: NetHandle, callback: AsyncCallback\): void; |Obtains link information of the default network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getNetCapabilities(netHandle: NetHandle, callback: AsyncCallback\): void; |Obtains the capability set of the default network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function isDefaultNetMetered9+(callback: AsyncCallback): void; |Checks whether the data traffic usage on the current network is metered. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function reportNetConnected(netHandle: NetHandle, callback: AsyncCallback\): void;| Reports a **netAavailable** event to NetManager. If this API is called, the application considers that its network status (ohos.net.connection.NetCap.NET_CAPABILITY_VAILDATED) is inconsistent with that of NetManager. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function reportNetDisconnected(netHandle: NetHandle, callback: AsyncCallback\): void;| Reports a **netAavailable** event to NetManager. If this API is called, the application considers that its network status (ohos.net.connection.NetCap.NET_CAPABILITY_VAILDATED) is inconsistent with that of NetManager. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function getAddressesByName(host: string, callback: AsyncCallback\>): void; |Obtains all IP addresses of the specified network by resolving the domain name. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function enableAirplaneMode(callback: AsyncCallback\): void; | Enables the airplane mode. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function disableAirplaneMode(callback: AsyncCallback\): void;| Disables the airplane mode. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection | function createNetConnection(netSpecifier?: NetSpecifier, timeout?: number): NetConnection; | Creates a **NetConnection** object. **netSpecifier** specifies the network, and **timeout** specifies the timeout interval in ms. **timeout** is configurable only when **netSpecifier** is specified. If neither of them is present, the default network is used.|
+| ohos.net.connection.NetHandle | bindSocket(socketParam: TCPSocket \| UDPSocket, callback: AsyncCallback\): void; | Binds a **TCPSocket** or **UDPSocket** to the current network. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection.NetHandle | getAddressesByName(host: string, callback: AsyncCallback\>): void; |Obtains all IP addresses of the default network by resolving the domain name. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection.NetHandle | getAddressByName(host: string, callback: AsyncCallback\): void; |Obtains an IP address of the specified network by resolving the domain name. This API uses an asynchronous callback to return the result.|
+| ohos.net.connection.NetConnection | on(type: 'netAvailable', callback: Callback\): void; |Subscribes to **netAvailable** events.|
+| ohos.net.connection.NetConnection | on(type: 'netCapabilitiesChange', callback: Callback\<{ netHandle: NetHandle, netCap: NetCapabilities }>): void; |Subscribes to **netCapabilitiesChange** events.|
+| ohos.net.connection.NetConnection | on(type: 'netConnectionPropertiesChange', callback: Callback\<{ netHandle: NetHandle, connectionProperties: ConnectionProperties }>): void; |Subscribes to **netConnectionPropertiesChange** events.|
+| ohos.net.connection.NetConnection | on(type: 'netBlockStatusChange', callback: Callback<{ netHandle: NetHandle, blocked: boolean }>): void; |Subscribes to **netBlockStatusChange** events.|
+| ohos.net.connection.NetConnection | on(type: 'netLost', callback: Callback\): void; |Subscribes to **netLost** events.|
+| ohos.net.connection.NetConnection | on(type: 'netUnavailable', callback: Callback\): void; |Subscribes to **netUnavailable** events.|
+| ohos.net.connection.NetConnection | register(callback: AsyncCallback\): void; |Registers an observer for the default network or the network specified in **createNetConnection**.|
+| ohos.net.connection.NetConnection | unregister(callback: AsyncCallback\): void; |Unregisters the observer for the default network or the network specified in **createNetConnection**.|
+
+## Subscribing to Status Changes of the Specified Network
+
+1. Import the connection namespace from **@ohos.net.connection.d.ts**.
+
+2. Call **createNetConnection()** to create a **NetConnection** object. You can specify the network type, capability, and timeout interval. If you do not specify parameters, the default values will be used.
+
+3. Call **conn.on()** to subscribe to the target event. You must pass in **type** and **callback**.
+
+4. Call **conn.register()** to subscribe to network status changes of the specified network.
+
+5. When the network is available, the callback will be invoked to return the **netAvailable** event. When the network is unavailable, the callback will be invoked to return the **netUnavailable** event.
+
+6. Call **conn.unregister()** to unsubscribe from the network status changes if required.
+
+```js
+ // Import the connection namespace.
+ import connection from '@ohos.net.connection'
+
+ let netCap = {
+ // Assume that the default network is Wi-Fi. If you need to create a cellular network connection, set the network type to CELLULAR.
+ bearerTypes: [connection.NetBearType.BEARER_CELLULAR],
+ // Set the network capability to INTERNET.
+ networkCap: [connection.NetCap.NET_CAPABILITY_INTERNET],
+ };
+ let netSpec = {
+ netCapabilities: netCap,
+ };
+
+ // Set the timeout value to 10s. The default value is 0.
+ let timeout = 10 * 1000;
+
+ // Create a NetConnection object.
+ let conn = connection.createNetConnection(netSpec, timeout);
+
+ // Listen to network status change events. If the network is available, an on_netAvailable event is returned.
+ conn.on('netAvailable', (data=> {
+ console.log("net is available, netId is " + data.netId);
+ }));
+
+ // Listen to network status change events. If the network is unavailable, an on_netUnavailable event is returned.
+ conn.on('netUnavailable', (data=> {
+ console.log("net is unavailable, netId is " + data.netId);
+ }));
+
+ // Register an observer for network status changes.
+ conn.register((err, data) => {});
+
+ // Unregister the observer for network status changes.
+ conn.unregister((err, data) => {});
+```
+
+## Obtaining the List of All Registered Networks
+
+### How to Develop
+
+1. Import the connection namespace from **@ohos.net.connection.d.ts**.
+
+2. Call **getAllNets** to obtain the list of all connected networks.
+
+```js
+ // Import the connection namespace.
+ import connection from '@ohos.net.connection'
+
+ // Obtain the list of all connected networks.
+ connection.getAllNets((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ if (data) {
+ this.netList = data;
+ }
+ })
+```
+
+## Querying Network Capability Information and Connection Information of Specified Data Network
+
+### How to Develop
+
+1. Import the connection namespace from **@ohos.net.connection.d.ts**.
+
+2. Call **getDefaultNet** to obtain the default data network via **NetHandle** or call **getAllNets** to obtain the list of all connected networks via **Array\**.
+
+3. Call **getNetCapabilities** to obtain the network capability information of the data network specified by **NetHandle**. The capability information includes information such as the network type (cellular, Wi-Fi, or Ethernet network) and the specific network capabilities.
+
+4. Call **getConnectionProperties** to obtain the connection information of the data network specified by **NetHandle**.
+
+```js
+ // Import the connection namespace.
+ import connection from '@ohos.net.connection'
+
+ // Call getDefaultNet to obtain the default data network specified by **NetHandle**.
+ connection.getDefaultNet((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ if (data) {
+ this.netHandle = data;
+ }
+ })
+
+ // Obtain the network capability information of the data network specified by **NetHandle**. The capability information includes information such as the network type and specific network capabilities.
+ connection.getNetCapabilities(this.netHandle, (err, data) => {
+ console.log(JSON.stringify(err));
+
+ // Obtain the network type via bearerTypes.
+ for (let item of data.bearerTypes) {
+ if (item == 0) {
+ // Cellular network
+ console.log(JSON.stringify("BEARER_CELLULAR"));
+ } else if (item == 1) {
+ // Wi-Fi network
+ console.log(JSON.stringify("BEARER_WIFI"));
+ } else if (item == 3) {
+ // Ethernet network
+ console.log(JSON.stringify("BEARER_ETHERNET"));
+ }
+ }
+
+ // Obtain the specific network capabilities via networkCap.
+ for (let item of data.networkCap) {
+ if (item == 0) {
+ // The network can connect to the carrier's Multimedia Messaging Service Center (MMSC) to send and receive multimedia messages.
+ console.log(JSON.stringify("NET_CAPABILITY_MMS"));
+ } else if (item == 11) {
+ // The network traffic is not metered.
+ console.log(JSON.stringify("NET_CAPABILITY_NOT_METERED"));
+ } else if (item == 12) {
+ // The network has the Internet access capability, which is set by the network provider.
+ console.log(JSON.stringify("NET_CAPABILITY_INTERNET"));
+ } else if (item == 15) {
+ // The network does not use a Virtual Private Network (VPN).
+ console.log(JSON.stringify("NET_CAPABILITY_NOT_VPN"));
+ } else if (item == 16) {
+ // The Internet access capability of the network is successfully verified by the connection management module.
+ console.log(JSON.stringify("NET_CAPABILITY_VALIDATED"));
+ }
+ }
+ })
+
+ // Obtain the connection information of the data network specified by NetHandle. Connection information includes link and route information.
+ connection.getConnectionProperties(this.netHandle, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ })
+
+ // Call getAllNets to obtain the list of all connected networks via Array.
+ connection.getAllNets((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ if (data) {
+ this.netList = data;
+ }
+ })
+
+ for (let item of this.netList) {
+ // Obtain the network capability information of the network specified by each netHandle on the network list cyclically.
+ connection.getNetCapabilities(item, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ })
+
+ // Obtain the connection information of the network specified by each netHandle on the network list cyclically.
+ connection.getConnectionProperties(item, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ })
+ }
+```
+
+## Resolving the domain name of a network to obtain all IP addresses
+
+### How to Develop
+
+1. Import the connection namespace from **@ohos.net.connection.d.ts**.
+
+2. Call **getAddressesByName** to use the default network to resolve the host name to obtain the list of all IP addresses.
+
+```js
+ // Import the connection namespace.
+ import connection from '@ohos.net.connection'
+
+ // Use the default network to resolve the host name to obtain the list of all IP addresses.
+ connection.getAddressesByName(this.host, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ })
+```
diff --git a/en/application-dev/connectivity/net-ethernet.md b/en/application-dev/connectivity/net-ethernet.md
new file mode 100644
index 0000000000000000000000000000000000000000..85c4ef4fc15f4c2228eb8351ddb5cd730ff5fe94
--- /dev/null
+++ b/en/application-dev/connectivity/net-ethernet.md
@@ -0,0 +1,140 @@
+# Ethernet Connection
+
+## Introduction
+The Ethernet Connection module allows a device to access the Internet through a network cable.
+After a device is connected to the Ethernet through a network cable, the device can obtain a series of network attributes, such as the dynamically allocated IP address, subnet mask, gateway, and DNS. You can manually configure and obtain the network attributes of the device in static mode.
+
+> **NOTE**
+> To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-ethernet.md).
+
+## **Constraints**
+- Programming language: C++ and JS
+- System: Linux kernel
+- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version.
+
+## When to Use
+Typical application scenarios of Ethernet connection are as follows:
+- Dynamically assigning a series of network attributes, such as the IP address, subnet mask, gateway, and DNS in DHCP mode to enable network access
+- Configuring a series of network attributes, such as the IP address, subnet mask, gateway, and DNS, in static mode to enable network access.
+
+The following describes the development procedure specific to each application scenario.
+
+## Available APIs
+For the complete list of APIs and example code, see [Ethernet Connection](../reference/apis/js-apis-net-ethernet.md).
+
+| Type| API| Description|
+| ---- | ---- | ---- |
+| ohos.net.ethernet | function setIfaceConfig(iface: string, ic: InterfaceConfiguration, callback: AsyncCallback\): void | Configures the network attributes of the specified Ethernet network. This API uses an asynchronous callback to return the result.|
+| ohos.net.ethernet | function getIfaceConfig(iface: string, callback: AsyncCallback\): void | Obtains the network attributes of the specified Ethernet network. This API uses an asynchronous callback to return the result.|
+| ohos.net.ethernet | function isIfaceActive(iface: string, callback: AsyncCallback\): void | Checks whether the specified network port is active. This API uses an asynchronous callback to return the result.|
+| ohos.net.ethernet | function getAllActiveIfaces(callback: AsyncCallback\>): void; | Obtains the list of all active network ports. This API uses an asynchronous callback to return the result.|
+
+## Ethernet Connection – DHCP Mode
+
+1. Use a network cable to connect the device to a network port.
+2. Import the **ethernet** namespace from **@ohos.net.ethernet**.
+3. Call **getAllActiveIfaces** to obtain the list of all active network ports, for example, **eth0** and **eth1**.
+4. Call **isIfaceActive** in user mode to check whether the **eth0** port is active.
+5. Call **getIfaceConfig** in user mode to obtain the static network attributes of the **eth0** port. By default, an unconfigured Ethernet network uses the DHCP mode, in which the Ethernet network obtains the automatically assigned network attributes.
+
+```js
+ // Import the ethernet namespace from @ohos.net.ethernet.
+ import ethernet from '@ohos.net.ethernet'
+
+ // Call getAllActiveIfaces to obtain the list of all active network ports.
+ ethernet.getAllActiveIfaces((error, data) => {
+ if (error) {
+ console.log("getAllActiveIfaces callback error = " + error);
+ } else {
+ console.log("getAllActiveIfaces callback data.length = " + data.length);
+ for (let i = 0; i < data.length; i++) {
+ console.log("getAllActiveIfaces callback = " + data[i]);
+ }
+ }
+ });
+
+ // Call isIfaceActive to check whether the specified network port is active.
+ ethernet.isIfaceActive("eth0", (error, data) => {
+ if (error) {
+ console.log("isIfaceActive callback error = " + error);
+ } else {
+ console.log("isIfaceActive callback = " + data);
+ }
+ });
+
+ // Call getIfaceConfig to obtain the network attributes of the specified Ethernet network.
+ ethernet.getIfaceConfig("eth0", (error, data) => {
+ if (error) {
+ console.log("getIfaceConfig callback error = " + error);
+ } else {
+ console.log("getIfaceConfig callback mode = " + data.mode);
+ console.log("getIfaceConfig callback ipAddr = " + data.ipAddr);
+ console.log("getIfaceConfig callback routeAddr = " + data.routeAddr);
+ console.log("getIfaceConfig callback gateAddr = " + data.gateAddr);
+ console.log("getIfaceConfig callback maskAddr = " + data.maskAddr);
+ console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr);
+ console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr);
+ }
+ });
+```
+## Ethernet Connection – Static Mode
+
+### How to Develop
+
+1. Use a network cable to connect the device to a network port.
+2. Import the **ethernet** namespace from **@ohos.net.ethernet**.
+3. Call **getAllActiveIfaces** in user mode to obtain the list of all active network ports, for example, **eth0** and **eth1**.
+4. Call **isIfaceActive** in user mode to check whether the **eth0** port is active.
+5. Call **setIfaceConfig** in user mode to set the **eth0** port to the static mode, in which you need to manually assign the network attributes (including the IP address, subnet mask, gateway, and DNS).
+6. Call **getIfaceConfig** in user mode to obtain the static network attributes of the **eth0** port.
+
+```js
+ // Import the ethernet namespace from @ohos.net.ethernet.
+ import ethernet from '@ohos.net.ethernet'
+
+ // Call getAllActiveIfaces to obtain the list of all active network ports.
+ ethernet.getAllActiveIfaces((error, data) => {
+ if (error) {
+ console.log("getAllActiveIfaces callback error = " + error);
+ } else {
+ console.log("getAllActiveIfaces callback data.length = " + data.length);
+ for (let i = 0; i < data.length; i++) {
+ console.log("getAllActiveIfaces callback = " + data[i]);
+ }
+ }
+ });
+
+ // Call isIfaceActive to check whether the specified network port is active.
+ ethernet.isIfaceActive("eth0", (error, data) => {
+ if (error) {
+ console.log("isIfaceActive callback error = " + error);
+ } else {
+ console.log("isIfaceActive callback = " + data);
+ }
+ });
+
+ // Call setIfaceConfig to configure the network attributes of the specified Ethernet network.
+ ethernet.setIfaceConfig("eth0", {mode:ethernet.STATIC,ipAddr:"192.168.xx.xx", routeAddr:"192.168.xx.xx",
+ gateAddr:"192.168.xx.xx", maskAddr:"255.255.xx.xx", dnsAddr0:"1.1.xx.xx", dnsAddr1:"2.2.xx.xx"},(error) => {
+ if (error) {
+ console.log("setIfaceConfig callback error = " + error);
+ } else {
+ console.log("setIfaceConfig callback ok ");
+ }
+ });
+
+ // Call getIfaceConfig to obtain the network attributes of the specified Ethernet network.
+ ethernet.getIfaceConfig("eth0", (error, data) => {
+ if (error) {
+ console.log("getIfaceConfig callback error = " + error);
+ } else {
+ console.log("getIfaceConfig callback mode = " + data.mode);
+ console.log("getIfaceConfig callback ipAddr = " + data.ipAddr);
+ console.log("getIfaceConfig callback routeAddr = " + data.routeAddr);
+ console.log("getIfaceConfig callback gateAddr = " + data.gateAddr);
+ console.log("getIfaceConfig callback maskAddr = " + data.maskAddr);
+ console.log("getIfaceConfig callback dns0Addr = " + data.dns0Addr);
+ console.log("getIfaceConfig callback dns1Addr = " + data.dns1Addr);
+ }
+ });
+```
diff --git a/en/application-dev/connectivity/net-mgmt-overview.md b/en/application-dev/connectivity/net-mgmt-overview.md
index 3c8eeb552b811344396afcc6e5316e5daa24ee8b..0ad30c35cc9b4d5e90b2c8fe90cac7ca2e413a57 100644
--- a/en/application-dev/connectivity/net-mgmt-overview.md
+++ b/en/application-dev/connectivity/net-mgmt-overview.md
@@ -2,15 +2,19 @@
Network management functions include:
-- [HTTP Data Request](http-request.md): Initiates a data request through HTTP.
-- [WebSocket Connection](websocket-connection.md): Establishes a bidirectional connection between the server and client through WebSocket.
-- [Socket Connection](socket-connection.md): Transmits data through Socket.
+- [HTTP data request](http-request.md): Initiates a data request through HTTP.
+- [WebSocket connection](websocket-connection.md): Establishes a bidirectional connection between the server and client through WebSocket.
+- [Socket connection](socket-connection.md): Transmits data through Socket.
+- [Network policy management](net-policy-management.md): Restricts network capabilities by setting network policies, including cellular network policy, sleep/power-saving mode policy, and background network policy, and resets network policies as needed.
+- [Network sharing](net-sharing.md): Shares a device's Internet connection with other connected devices by means of Wi-Fi hotspot, Bluetooth, and USB sharing, and queries the network sharing state and shared mobile data volume.
+- [Ethernet connection](net-ethernet.md): Provides wired network capabilities, which allow you to set the IP address, subnet mask, gateway, and Domain Name System (DNS) server of a wired network.
+- [Network connection management](net-connection-manager.md): Provides basic network management capabilities, including management of Wi-Fi/cellular/Ethernet connection priorities, network quality evaluation, subscription to network connection status changes, query of network connection information, and DNS resolution.
## Constraints
To use the functions of the network management module, you must obtain the permissions listed in the following table.
-| Permission | Description |
+| Permission | Description |
| -------------------------------- | -------------------------------------- |
| ohos.permission.GET_NETWORK_INFO | Allows an application to obtain the network connection information. |
| ohos.permission.SET_NETWORK_INFO | Allows an application to modify the network connection state. |
diff --git a/en/application-dev/connectivity/net-policy-management.md b/en/application-dev/connectivity/net-policy-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..6450bd671e565fdffafb7eeed499e123893a45a3
--- /dev/null
+++ b/en/application-dev/connectivity/net-policy-management.md
@@ -0,0 +1,402 @@
+# Network Policy Management
+
+## Introduction
+
+The Network Policy Management module allows you to restrict network capabilities by setting network policies, including cellular network policy, sleep/power-saving mode policy, and background network policy, and to reset network policies as needed.
+
+> **NOTE**
+> To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-policy.md).
+
+## Basic Concepts
+
+- Sleep mode: A mode in which the system shuts down some idle components and peripherals to enter the low-power mode and restricts some applications from accessing the network.
+- Power-saving mode: A mode in which the system disables certain functions and features to save power. When this mode is enabled, the system performance deteriorates and some applications are restricted from accessing the network.
+- Traffic-saving mode: A mode in which the system restricts background applications that use the metering network. It is equivalent to the background network policy.
+- Cellular network: A mobile communication network.
+- Metering network: A mobile network with preconfigured traffic quota, WLAN network, or Ethernet network.
+
+## **Constraints**
+
+- Programming language: C++ and JS
+- System: Linux kernel
+- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version.
+
+## When to Use
+
+Typical application scenarios of network policy management are as follows:
+
+- Managing the metering network policy: Set the metering network quota and obtain the configured metering network policy.
+- Managing network access for an application in the background: Set and obtain the status of the background network restriction switch, and check whether the application indicated by the specified UID can access the network in the background.
+- Managing the metering network access policy: Set and obtain the policy for the application indicated by the specified UID to access the metering network, and obtain the UIDs of the applications for which the policy is configured.
+- Restoring network policies
+- Checking whether an application indicated by the specified UID can access a metering or non-metering network
+- Adding a UID to or removing a UID from the sleep mode allowlist, and obtaining the sleep mode allowlist
+- Adding a UID to or removing a UID from the power-saving mode allowlist, and obtaining the power-saving mode allowlist
+- Updating the network notification policy
+
+The following describes the development procedure specific to each application scenario.
+
+## Available APIs
+
+For the complete list of APIs and example code, see [Network Policy Management](../reference/apis/js-apis-net-policy.md).
+
+| Type| API| Description|
+| ---- | ---- | ---- |
+| ohos.net.policy | function setBackgroundPolicy(isAllowed: boolean, callback: AsyncCallback\): void |Sets a background network policy. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function isBackgroundAllowed(callback: AsyncCallback\): void; |Obtains the background network policy. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function setPolicyByUid(uid: number, policy: NetUidPolicy, callback: AsyncCallback\): void; |Sets an application-specific network policy by **uid**. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getPolicyByUid(uid: number, callback: AsyncCallback\): void;| Obtains an application-specific network policy by **uid**. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getUidsByPolicy(policy: NetUidPolicy, callback: AsyncCallback\>): void; | Obtains the UID array of applications configured with a certain application-specific network policy. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getNetQuotaPolicies(callback: AsyncCallback\>): void; |Obtains the network quota policies. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function setNetQuotaPolicies(quotaPolicies: Array\, callback: AsyncCallback\): void; |Sets an array of network quota policies. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function restoreAllPolicies(iccid: string, callback: AsyncCallback\): void; | Restores all the policies (cellular network, background network, firewall, and application-specific network policies) for the given SIM card. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function isUidNetAllowed(uid: number, isMetered: boolean, callback: AsyncCallback\): void; | Checks whether an application is allowed to access metering or non-metering networks. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function isUidNetAllowed(uid: number, iface: string, callback: AsyncCallback\): void; | Checks whether an application is allowed to access the given network. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function setDeviceIdleAllowList(uid: number, isAllowed: boolean, callback: AsyncCallback\): void; | Sets whether to add an application to the device idle allowlist. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getDeviceIdleAllowList(callback: AsyncCallback\>): void; | Obtains the UID array of applications that are on the device idle allowlist. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getBackgroundPolicyByUid(uid: number, callback: AsyncCallback\): void; | Obtains the background network policies configured for the given application. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function resetPolicies(iccid: string, callback: AsyncCallback\): void; | Restores all the policies (cellular network, background network, firewall, and application-specific network policies) for the given SIM card. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function updateRemindPolicy(netType: NetBearType, iccid: string, remindType: RemindType, callback: AsyncCallback\): void; | Updates a reminder policy. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function setPowerSaveAllowList(uid: number, isAllowed: boolean, callback: AsyncCallback\): void; | Sets whether to add an application to the power-saving allowlist. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function getPowerSaveAllowList(callback: AsyncCallback\>): void; | Obtains the UID array of applications that are on the power-saving allowlist. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function on(type: "netUidPolicyChange", callback: Callback\<{ uid: number, policy: NetUidPolicy }>): void; | Subscribes to policy changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function off(type: "netUidPolicyChange", callback: Callback\<{ uid: number, policy: NetUidPolicy }>): void; | Unsubscribes from policy changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function on(type: "netUidRuleChange", callback: Callback\<{ uid: number, rule: NetUidRule }>): void; | Subscribes to rule changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function off(type: "netUidRuleChange", callback: Callback\<{ uid: number, rule: NetUidRule }>): void; | Unsubscribes from rule changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function on(type: "netMeteredIfacesChange", callback: Callback\>): void; | Subscribes to metered **iface** changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function off(type: "netMeteredIfacesChange", callback: Callback\>): void; | Unsubscribes from metered **iface** changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function on(type: "netQuotaPolicyChange", callback: Callback\>): void; | Subscribes to network quota policy changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function off(type: "netQuotaPolicyChange", callback: Callback\>): void; | Unsubscribes from network quota policy changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function on(type: "netBackgroundPolicyChange", callback: Callback\): void; | Subscribes to background network policy changes. This API uses an asynchronous callback to return the result.|
+| ohos.net.policy | function off(type: "netBackgroundPolicyChange", callback: Callback\): void; | Unsubscribes from background network policy changes. This API uses an asynchronous callback to return the result.|
+
+## Managing the Metering Network Policy
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **setNetQuotaPolicies** to configure the metering network policy.
+
+3. Call **getNetQuotaPolicies** to obtain the configured metering network policy.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy';
+
+ addNetQuotaPolicy(){
+ let param = {
+ // For details about the value of netType, see [NetBearType](../reference/apis/js-apis-net-connection.md#netbeartype).
+ netType:Number.parseInt(this.netType),
+
+ // Integrated circuit card identifier (ICCID) of the SIM card on the metering cellular network. It is not available for an Ethernet or Wi-Fi network.
+ iccid:this.iccid,
+
+ // Used together with ICCID on the metering cellular network. It is used independently on an Ethernet or Wi-Fi network.
+ ident:this.ident,
+
+ // Metering start time, for example, M1, D1, and Y1.
+ periodDuration:this.periodDuration,
+
+ // Set the traffic threshold for generating an alarm to an integer greater than 0.
+ warningBytes:Number.parseInt(this.warningBytes),
+
+ // Set the traffic quota to an integer greater than 0.
+ limitBytes:Number.parseInt(this.limitBytes),
+
+ // Specify whether the network is a metering network. The value true means a metering network and false means a non-metering network.
+ metered:Boolean(Number.parseInt(this.metered)),https://gitee.com/openharmony/docs/pulls/14404
+ // For details about the action triggered after the traffic limit is reached, see [LimitAction](../reference/apis/js-apis-net-policy.md#limitaction).
+ limitAction:Number.parseInt(this.limitAction)
+ };
+ this.netQuotaPolicyList.push(param);
+ },
+
+ // Subscribe to metered iface changes.
+ policy.on('netMeteredIfacesChange', (data) => {
+ this.log('on netMeteredIfacesChange: ' + JSON.stringify(data));
+ });
+
+ // Subscribe to metering network policy changes.
+ policy.on('netQuotaPolicyChange', (data) => {
+ this.log('on netQuotaPolicyChange: ' + JSON.stringify(data));
+ });
+
+ // Call setNetQuotaPolicies to configure the metering network policy.
+ setNetQuotaPolicies(){
+ this.dialogType = DialogType.HIDE;
+ policy.setNetQuotaPolicies(this.netQuotaPolicyList, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data));
+ });
+ },
+
+ // Call getNetQuotaPolicies to obtain the configured metering network policy.
+ getNetQuotaPolicies(){
+ policy.getNetQuotaPolicies((err, data) => {
+ this.callBack(err, data);
+ if(data){
+ this.netQuotaPolicyList = data;
+ }
+ });
+ },
+
+ // Unsubscribe from metered iface changes.
+ policy.off('netMeteredIfacesChange', (data) => {
+ this.log('off netMeteredIfacesChange: ' + JSON.stringify(data));
+ });
+
+ // Unsubscribe from metering network policy changes.
+ policy.off('netQuotaPolicyChange', (data) => {
+ this.log('off netQuotaPolicyChange: ' + JSON.stringify(data));
+ });
+```
+
+## Managing Network Access for an Application in the Background
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **setBackgroundAllowed** to enable or disable the background network restriction switch.
+
+3. Call **isBackgroundAllowed** to check whether the background network restriction switch is enabled or disabled.
+
+4. Call **getBackgroundPolicyByUid** to check whether the application indicated b the specified UID can access the network in the background.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Subscribe to background network policy changes.
+ policy.on('netBackgroundPolicyChange', (data) => {
+ this.log('on netBackgroundPolicyChange: ' + JSON.stringify(data));
+ });
+
+ // Call setBackgroundAllowed to enable or disable the background network restriction switch.
+ setBackgroundAllowed() {
+ policy.setBackgroundAllowed(Boolean(Number.parseInt(this.isBoolean)), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call isBackgroundAllowed to check whether the background network restriction switch is enabled or disabled.
+ isBackgroundAllowed() {
+ policy.isBackgroundAllowed((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call getBackgroundPolicyByUid to check whether the application indicated b the specified UID can access the network in the background.
+ getBackgroundPolicyByUid() {
+ policy.getBackgroundPolicyByUid(Number.parseInt(this.firstParam), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Unsubscribe from background network policy changes.
+ policy.off('netBackgroundPolicyChange', (data) => {
+ this.log('off netBackgroundPolicyChange: ' + JSON.stringify(data));
+ });
+```
+
+## Managing the Metering Network Access Policy
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **setPolicyByUid** to set whether the application indicated by the specified UID can access the network in the background.
+
+3. Call **getPolicyByUid** to check whether the metering network access policy for the application indicated by the specified UID.
+
+4. Call **getUidsByPolicy** to obtain the UIDs of the applications for which the metering network access policy is configured.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Subscribe to policy changes of the application indicated by the specified UID.
+ policy.on('netUidPolicyChange', (data) => {
+ this.log('on netUidPolicyChange: ' + JSON.stringify(data));
+ });
+
+ // Subscribe to rule changes of the application indicated by the specified UID.
+ policy.on('netUidRuleChange', (data) => {
+ this.log('on netUidRuleChange: ' + JSON.stringify(data));
+ });
+
+ // Call setPolicyByUid to set whether the application indicated by the specified UID can access the network in the background.
+ setPolicyByUid() {
+ let param = {
+ uid: Number.parseInt(this.firstParam), policy: Number.parseInt(this.currentNetUidPolicy)
+ }
+ policy.setPolicyByUid(Number.parseInt(this.firstParam), Number.parseInt(this.currentNetUidPolicy), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call getPolicyByUid to check whether the metering network access policy for the application indicated by the specified UID.
+ getPolicyByUid() {
+ policy.getPolicyByUid(Number.parseInt(this.firstParam), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call getUidsByPolicy to obtain the UIDs of the applications for which the metering network access policy is configured.
+ getUidsByPolicy(){
+ policy.getUidsByPolicy(Number.parseInt(this.currentNetUidPolicy), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Unsubscribe from policy changes of the application indicated by the specified UID.
+ policy.off('netUidPolicyChange', (data) => {
+ this.log('off netUidPolicyChange: ' + JSON.stringify(data));
+ });
+
+ // Unsubscribe from rule changes of the application indicated by the specified UID.
+ policy.off('netUidRuleChange', (data) => {
+ this.log('off netUidRuleChange: ' + JSON.stringify(data));
+ });
+
+```
+
+## Restoring Network Policies
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **restoreAllPolicies** to restore all network policies.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Call restoreAllPolicies to restore all network policies.
+ restoreAllPolicies(){
+ policy.restoreAllPolicies(this.firstParam, (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+```
+
+## Checking Access to a Metering or Non-metering Network
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **isUidNetAllowed** to check whether the UID can access the metering or non-metering network.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Call isUidNetAllowed to check whether the application indicated by the specified UID can access the metering or non-metering network.
+ isUidNetAllowedIsMetered(){
+ let param = {
+ uid: Number.parseInt(this.firstParam), isMetered: Boolean(Number.parseInt(this.isBoolean))
+ }
+ policy.isUidNetAllowed(Number.parseInt(this.firstParam), Boolean(Number.parseInt(this.isBoolean)), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+```
+
+## Managing the Sleep Mode Allowlist
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **setDeviceIdleAllowList** to add a UID to or remove a UID from the sleep mode allowlist.
+
+3. Call **getDeviceIdleAllowList** to obtain the UIDs added to the sleep mode allowlist.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Call setDeviceIdleAllowList to add a UID to or remove a UID from the sleep mode allowlist.
+ setDeviceIdleAllowList(){
+ let param = {
+ uid: Number.parseInt(this.firstParam), isAllowed: Boolean(Number.parseInt(this.isBoolean))
+ }
+ policy.setDeviceIdleAllowList(Number.parseInt(this.firstParam), Boolean(Number.parseInt(this.isBoolean)), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call getDeviceIdleAllowList to obtain the UIDs added to the sleep mode allowlist.
+ getDeviceIdleAllowList(){
+ policy.getDeviceIdleAllowList((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+```
+
+## Managing the Power-saving Mode Allowlist
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+2. Call **setPowerSaveAllowList** to add a UID to or remove a UID from the power-saving mode allowlist.
+3. Call **getPowerSaveAllowList** to obtain the UIDs added to the power-saving mode allowlist.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Call setPowerSaveAllowList to add a UID to or remove a UID from the power-saving mode allowlist.
+ setPowerSaveAllowList(){
+ let param = {
+ uid: Number.parseInt(this.firstParam), isAllowed: Boolean(Number.parseInt(this.isBoolean))
+ }
+ policy.setPowerSaveAllowList(Number.parseInt(this.firstParam), Boolean(Number.parseInt(this.isBoolean)), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+
+ // Call getPowerSaveAllowList to obtain the UIDs added to the power-saving mode allowlist.
+ getPowerSaveAllowList(){
+ policy.getPowerSaveAllowList((err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+```
+
+## Updating the Network Notification Policy
+
+### How to Develop
+
+1. Import the **policy** namespace from **@ohos.net.policy.d.ts**.
+
+2. Call **updateRemindPolicy** to update the network notification policy.
+
+```js
+ // Import the policy namespace.
+ import policy from '@ohos.net.policy'
+
+ // Call updateRemindPolicy to update the network notification policy.
+ updateRemindPolicy() {
+ let param = {
+ netType: Number.parseInt(this.netType), iccid: this.firstParam, remindType: this.currentRemindType
+ }
+ policy.updateRemindPolicy(Number.parseInt(this.netType), this.firstParam, Number.parseInt(this.currentRemindType), (err, data) => {
+ console.log(JSON.stringify(err));
+ console.log(JSON.stringify(data))
+ });
+ },
+```
diff --git a/en/application-dev/connectivity/net-sharing.md b/en/application-dev/connectivity/net-sharing.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5bc9cf2f8817723f0f23d666c45997a6735f706
--- /dev/null
+++ b/en/application-dev/connectivity/net-sharing.md
@@ -0,0 +1,130 @@
+# Network Sharing
+
+## Introduction
+The Network Sharing module allows you to share your device's Internet connection with other connected devices by means of Wi-Fi hotspot, Bluetooth, and USB sharing. It also allows you to query the network sharing state and shared mobile data volume.
+
+> **NOTE**
+> To maximize the application running efficiency, most API calls are called asynchronously in callback or promise mode. The following code examples use the callback mode. For details about the APIs, see [sms API Reference](../reference/apis/js-apis-net-sharing.md).
+
+## Basic Concepts
+- Wi-Fi sharing: Shares the network through a Wi-Fi hotspot.
+- Bluetooth sharing: Shares the network through Bluetooth.
+- USB tethering: Shares the network using a USB flash drive.
+
+## **Constraints**
+- Programming language: C++ and JS
+- System: Linux kernel
+- The initial APIs of this module are supported since API version 9. Newly added APIs will be marked with a superscript to indicate their earliest API version.
+
+## When to Use
+Typical network sharing scenarios are as follows:
+- Enabling network sharing
+- Disabling network sharing
+- Obtaining the data traffic of the shared network
+
+The following describes the development procedure specific to each application scenario.
+## Available APIs
+For the complete list of APIs and example code, see [Network Sharing](../reference/apis/js-apis-net-sharing.md).
+
+| Type| API| Description|
+| ---- | ---- | ---- |
+| ohos.net.sharing | function isSharingSupported(callback: AsyncCallback\): void; | Checks whether the system supports network sharing. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function isSharing(callback: AsyncCallback\): void; | Checks whether network sharing is active. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function startSharing(type: SharingIfaceType, callback: AsyncCallback\): void; | Starts network sharing. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function stopSharing(type: SharingIfaceType, callback: AsyncCallback\): void; | Stops network sharing. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getStatsRxBytes(callback: AsyncCallback\): void; | Obtains the received data traffic during network sharing, in KB. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getStatsTxBytes(callback: AsyncCallback\): void; | Obtains the sent data traffic during network sharing, in KB. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getStatsTotalBytes(callback: AsyncCallback\): void; | Obtains the total data traffic during network sharing, in KB. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getSharingIfaces(state: SharingIfaceState, callback: AsyncCallback\>): void; | Obtains the names of network interface cards (NICs) in the specified network sharing state.. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getSharingState(type: SharingIfaceType, callback: AsyncCallback\): void; | Obtains the network sharing state of the specified type. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function getSharableRegexes(type: SharingIfaceType, callback: AsyncCallback\>): void; | Obtains regular expressions of NICs of a specified type. This API uses an asynchronous callback to return the result.|
+| ohos.net.sharing | function on(type: 'sharingStateChange', callback: Callback\): void; | Subscribes to network sharing state changes.|
+| ohos.net.sharing | function off(type: 'sharingStateChange', callback?: Callback\): void; | Unsubscribes from network sharing state changes.|
+| ohos.net.sharing | unction on(type: 'interfaceSharingStateChange', callback: Callback\<{ type: SharingIfaceType, iface: string, state: SharingIfaceState }>): void; | Subscribes to network sharing state changes of the specified NIC.|
+| ohos.net.sharing | function off(type: 'interfaceSharingStateChange', callback?: Callback\<{ type: SharingIfaceType, iface: string, state: SharingIfaceState }>): void; | Unsubscribes from network sharing state changes of the specified NIC.|
+| ohos.net.sharing | function on(type: 'sharingUpstreamChange', callback: Callback\): void; | Subscribes to upstream NIC changes.|
+| ohos.net.sharing | function off(type: 'sharingUpstreamChange', callback?: Callback\): void; | Unsubscribes from upstream NIC changes.|
+
+## Enabling Network Sharing
+
+1. Import the **sharing** namespace from **@ohos.net.sharing**.
+2. Subscribe to network sharing state changes.
+3. Call **startSharing** to start network sharing of the specified type.
+4. Return the callback for successfully starting network sharing.
+
+```js
+ // Import the sharing namespace from @ohos.net.sharing.
+ import sharing from '@ohos.net.sharing'
+
+ // Subscribe to network sharing state changes.
+ sharing.on('sharingStateChange', (error, data) => {
+ console.log(JSON.stringify(error));
+ console.log(JSON.stringify(data));
+ });
+
+ // Call startSharing to start network sharing of the specified type.
+ sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => {
+ console.log(JSON.stringify(error));
+ });
+```
+
+## Disabling network sharing
+
+### How to Develop
+
+1. Import the **sharing** namespace from **@ohos.net.sharing**.
+2. Subscribe to network sharing state changes.
+3. Call **stopSharing** to stop network sharing of the specified type.
+4. Return the callback for successfully stopping network sharing.
+
+```js
+ // Import the sharing namespace from @ohos.net.sharing.
+ import sharing from '@ohos.net.sharing'
+
+ // Subscribe to network sharing state changes.
+ sharing.on('sharingStateChange', (error, data) => {
+ console.log(JSON.stringify(error));
+ console.log(JSON.stringify(data));
+ });
+
+ // Call stopSharing to stop network sharing of the specified type.
+ sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => {
+ console.log(JSON.stringify(error));
+ });
+```
+
+## Obtaining the data traffic of the shared network
+
+### How to Develop
+
+1. Import the **sharing** namespace from **@ohos.net.sharing**.
+2. Call **startSharing** to start network sharing of the specified type.
+3. Call **getStatsTotalBytes** to obtain the data traffic generated during data sharing.
+4. Call **stopSharing** to stop network sharing of the specified type and clear the data volume of network sharing.
+
+```js
+ // Import the sharing namespace from @ohos.net.sharing.
+ import sharing from '@ohos.net.sharing'
+
+ // Call startSharing to start network sharing of the specified type.
+ sharing.startSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => {
+ console.log(JSON.stringify(error));
+ });
+
+ // Call getStatsTotalBytes to obtain the data traffic generated during data sharing.
+ sharing.getStatsTotalBytes((error, data) => {
+ console.log(JSON.stringify(error));
+ console.log(JSON.stringify(data));
+ });
+
+ // Call stopSharing to stop network sharing of the specified type and clear the data volume of network sharing.
+ sharing.stopSharing(sharing.SharingIfaceType.SHARING_WIFI, (error) => {
+ console.log(JSON.stringify(error));
+ });
+
+ // Call getStatsTotalBytes again. The data volume of network sharing has been cleared.
+ sharing.getStatsTotalBytes((error, data) => {
+ console.log(JSON.stringify(error));
+ console.log(JSON.stringify(data));
+ });
+```
diff --git a/en/application-dev/connectivity/socket-connection.md b/en/application-dev/connectivity/socket-connection.md
index da5bea318e2b60da5641b9cf01ee73c926802c16..5cae73b2a5c84f280aea80e299605ee80ac2553a 100644
--- a/en/application-dev/connectivity/socket-connection.md
+++ b/en/application-dev/connectivity/socket-connection.md
@@ -1,46 +1,83 @@
# Socket Connection
+## Introduction
-## Use Cases
+The Socket Connection module allows an application to transmit data over a Socket connection through the TCP, UDP, or TLS protocol.
-Your application can transmit data through Socket connections. Currently, the TCP and UDP protocols are supported.
+## Basic Concepts
+- Socket: An abstraction of endpoints for bidirectional communication between application processes running on different hosts in a network.
+- TCP: Transmission Control Protocol, which is a byte stream–based transport layer communication protocol that is connection-oriented and reliable.
+- UDP: User Datagram Protocol, which is a simple datagram-oriented transport layer communication protocol.
+- TLS: Transport Layer Security, which is a protocol that ensures the data confidentiality and integrity between communication programs.
+
+## When to Use
+
+Applications transmit data over TCP, UDP, or TLS Socket connections. The main application scenarios are as follows:
+
+- Implementing data transmission over TCP/UDP Socket connections
+- Implementing encrypted data transmission over TLS Socket connections
## Available APIs
-The Socket connection function is mainly implemented by the Socket module. The following table describes the related APIs.
+For the complete list of APIs and example code, see [Socket Connection](../reference/apis/js-apis-socket.md).
-| API| Description |
+Socket connection functions are mainly implemented by the **socket** module. The following table describes the related APIs.
+
+| API| Description|
| -------- | -------- |
-| constructUDPSocketInstance() | Creates a **UDPSocket** object. |
-| constructTCPSocketInstance() | Creates a **TCPSocket** object. |
-| bind() | Binds the IP address and port number. |
+| constructUDPSocketInstance() | Creates a **UDPSocket** object.|
+| constructTCPSocketInstance() | Creates a **TCPSocket** object.|
+| bind() | Binds the IP address and port number.|
| send() | Sends data.|
-| close() | Closes a Socket connection. |
-| getState() | Obtains the Socket connection status. |
-| connect() | Connects to the specified IP address and port. This function is supported only for TCP. |
-| getRemoteAddress() | Obtains the peer address of the Socket connection. This function is supported only for TCP. The **connect** API must have been called before you use this API. |
-| on(type: 'message') | Enables listening for **message** events of the Socket connection. |
-| off(type: 'message') | Disables listening for **message** events of the Socket connection. |
-| on(type: 'close') | Enables listening for **close** events of the Socket connection. |
-| off(type: 'close') | Disables listening for **close** events of the Socket connection. |
-| on(type: 'error') | Enables listening for **error** events of the Socket connection. |
-| off(type: 'error') | Disables listening for **error** events of the Socket connection. |
-| on(type: 'listening') | Enables listening for **listening** events of the UDPSocket connection. |
-| off(type: 'listening') | Disables listening for **listening** events of the UDPSocket connection. |
-| on(type: 'connect') | Enables listening for **connect** events of the TCPSocket connection. |
-| off(type: 'connect') | Disables listening for **connect** events of the TCPSocket connection. |
+| close() | Closes a Socket connection.|
+| getState() | Obtains the Socket connection status.|
+| connect() | Connects to the specified IP address and port. This function is supported only for TCP.|
+| getRemoteAddress() | Obtains the peer address of the Socket connection. This function is supported only for TCP. The **connect** API must have been called before you use this API.|
+| on(type: 'message') | Subscribes to **message** events of the Socket connection.|
+| off(type: 'message') | Unsubscribes from **message** events of the Socket connection.|
+| on(type: 'close') | Subscribes to **close** events of the Socket connection.|
+| off(type: 'close') | Unsubscribes from **close** events of the Socket connection.|
+| on(type: 'error') | Subscribes to **error** events of the Socket connection.|
+| off(type: 'error') | Unsubscribes from **error** events of the Socket connection.|
+| on(type: 'listening') | Subscribes to **listening** events of the UDP Socket connection. |
+| off(type: 'listening') | Unsubscribes from **listening** events of the UDP Socket connection. |
+| on(type: 'connect') | Subscribes to **connect** events of the TCP Socket connection. |
+| off(type: 'connect') | Unsubscribes from **connect** events of the TCP Socket connection.|
+
+TLS Socket connection functions are mainly provided by the **tls_socket** module. The following table describes the related APIs.
+| API| Description|
+| -------- | -------- |
+| constructTLSSocketInstance() | Creates a **TLSSocket** object.|
+| bind() | Binds the IP address and port number.|
+| close(type: 'error') | Closes a Socket connection.|
+| connect() | Sets up a connection to the specified IP address and port number.|
+| getCertificate() | Obtains an object representing the local certificate.|
+| getCipherSuite() | Obtains a list containing information about the negotiated cipher suite.|
+| getProtocol() | Obtains a string containing the SSL/TLS protocol version negotiated for the current connection.|
+| getRemoteAddress() | Obtains the peer address of the TLS Socket connection.|
+| getRemoteCertificate() | Obtains an object representing a peer certificate.|
+| getSignatureAlgorithms() | Obtains a list containing signature algorithms shared between the server and client, in descending order of priority.|
+| getState() | Obtains the TLS Socket connection status.|
+| off(type: 'close') | Unsubscribes from **close** events of the TLS Socket connection.|
+| off(type: 'error') | Unsubscribes from **error** events of the TLS Socket connection.|
+| off(type: 'message') | Unsubscribes from **message** events of the TLS Socket connection.|
+| on(type: 'close') | Subscribes to **close** events of the TLS Socket connection.|
+| on(type: 'error') | Subscribes to **error** events of the TLS Socket connection.|
+| on(type: 'message') | Subscribes to **message** events of the TLS Socket connection.|
+| send() | Sends data.|
+| setExtraOptions() | Sets other properties of the TLS Socket connection.|
-## How to Develop
+## Transmitting Data over TCP/UDP Socket Connections
-The implementation is similar for UDPSocket and TCPSocket. The following uses the TCPSocket as an example.
+The implementation is similar for UDP Socket and TCP Socket connections. The following uses data transmission over a TCP Socket connection as an example.
-1. Import the required Socket module.
+1. Import the required **socket** module.
2. Create a **TCPSocket** object.
-3. (Optional) Enable listening for TCPSocket events.
+3. (Optional) Subscribe to TCP Socket connection events.
4. Bind the IP address and port number. The port number can be specified or randomly allocated by the system.
@@ -48,15 +85,15 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
6. Send data.
-7. Enable the TCPSocket connection to be automatically closed after use.
-
+7. Enable the TCP Socket connection to be automatically closed after use.
+
```js
import socket from '@ohos.net.socket'
-
+
// Create a TCPSocket object.
let tcp = socket.constructTCPSocketInstance();
-
- // Enable listening for TCPSocket events.
+
+ // Subscribe to TCP Socket connection events.
tcp.on('message', value => {
console.log("on message")
let buffer = value.message
@@ -73,7 +110,7 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
tcp.on('close', () => {
console.log("on close")
});
-
+
// Bind the local IP address and port number.
let bindAddress = {
address: '192.168.xx.xx',
@@ -86,6 +123,7 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
return;
}
console.log('bind success');
+
// Set up a connection to the specified IP address and port number.
let connectAddress = {
address: '192.168.xx.xx',
@@ -100,6 +138,7 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
return;
}
console.log('connect success');
+
// Send data.
tcp.send({
data: 'Hello, server!'
@@ -112,7 +151,8 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
})
});
});
- // Enable the TCPSocket connection to be automatically closed after use. Then, disable listening for TCPSocket events.
+
+ // Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events.
setTimeout(() => {
tcp.close((err) => {
console.log('close socket.')
@@ -122,3 +162,160 @@ The implementation is similar for UDPSocket and TCPSocket. The following uses th
tcp.off('close');
}, 30 * 1000);
```
+
+## Implementing Encrypted Data Transmission over TLS Socket Connections
+
+### How to Develop
+
+TLS Socket connection process on the client:
+
+1. Import the required **socket** module.
+
+2. Bind the IP address and port number of the server.
+
+3. For two-way authentication, upload the client CA certificate and digital certificate. For one-way authentication, upload the client CA certificate.
+
+4. Create a **TLSSocket** object.
+
+5. (Optional) Subscribe to TLS Socket connection events.
+
+6. Send data.
+
+7. Enable the TLS Socket connection to be automatically closed after use.
+
+```js
+ import socket from '@ohos.net.socket'
+
+ // Create a TLS Socket connection (for two-way authentication).
+ let tlsTwoWay = socket.constructTLSSocketInstance();
+
+ // Subscribe to TLS Socket connection events.
+ tlsTwoWay.on('message', value => {
+ console.log("on message")
+ let buffer = value.message
+ let dataView = new DataView(buffer)
+ let str = ""
+ for (let i = 0; i < dataView.byteLength; ++i) {
+ str += String.fromCharCode(dataView.getUint8(i))
+ }
+ console.log("on connect received:" + str)
+ });
+ tlsTwoWay.on('connect', () => {
+ console.log("on connect")
+ });
+ tlsTwoWay.on('close', () => {
+ console.log("on close")
+ });
+
+ // Bind the local IP address and port number.
+ tlsTwoWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => {
+ if (err) {
+ console.log('bind fail');
+ return;
+ }
+ console.log('bind success');
+ });
+
+ // Set the communication parameters.
+ let options = {
+ ALPNProtocols: ["spdy/1", "http/1.1"],
+
+ // Set up a connection to the specified IP address and port number.
+ address: {
+ address: "192.168.xx.xxx",
+ port: xxxx, // Port
+ family: 1,
+ },
+
+ // Set the parameters used for authentication during communication.
+ secureOptions: {
+ key: "xxxx", // Key
+ cert: "xxxx", // Digital certificate
+ ca: ["xxxx"], // CA certificate
+ passwd: "xxxx", // Password for generating the key
+ protocols: [socket.Protocol.TLSv12], // Communication protocol
+ useRemoteCipherPrefer: true, // Whether to preferentially use the peer cipher suite
+ signatureAlgorithms: "rsa_pss_rsae_sha256:ECDSA+SHA256", // Signature algorithm
+ cipherSuite: "AES256-SHA256", // Cipher suite
+ },
+ };
+
+ // Set up a connection.
+ tlsTwoWay.connect(options, (err, data) => {
+ console.error(err);
+ console.log(data);
+ });
+
+ // Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events.
+ tlsTwoWay.close((err) => {
+ if (err) {
+ console.log("close callback error = " + err);
+ } else {
+ console.log("close success");
+ }
+ tlsTwoWay.off('message');
+ tlsTwoWay.off('connect');
+ tlsTwoWay.off('close');
+ });
+
+ // Create a TLS Socket connection (for one-way authentication).
+ let tlsOneWay = socket.constructTLSSocketInstance(); // One way authentication
+
+ // Subscribe to TLS Socket connection events.
+ tlsTwoWay.on('message', value => {
+ console.log("on message")
+ let buffer = value.message
+ let dataView = new DataView(buffer)
+ let str = ""
+ for (let i = 0;i < dataView.byteLength; ++i) {
+ str += String.fromCharCode(dataView.getUint8(i))
+ }
+ console.log("on connect received:" + str)
+ });
+ tlsTwoWay.on('connect', () => {
+ console.log("on connect")
+ });
+ tlsTwoWay.on('close', () => {
+ console.log("on close")
+ });
+
+ // Bind the local IP address and port number.
+ tlsOneWay.bind({address: '192.168.xxx.xxx', port: xxxx, family: 1}, err => {
+ if (err) {
+ console.log('bind fail');
+ return;
+ }
+ console.log('bind success');
+ });
+
+ // Set the communication parameters.
+ let oneWayOptions = {
+ address: {
+ address: "192.168.xxx.xxx",
+ port: xxxx,
+ family: 1,
+ },
+ secureOptions: {
+ ca: ["xxxx","xxxx"], // CA certificate
+ cipherSuite: "AES256-SHA256", // Cipher suite
+ },
+ };
+
+ // Set up a connection.
+ tlsOneWay.connect(oneWayOptions, (err, data) => {
+ console.error(err);
+ console.log(data);
+ });
+
+ // Enable the TCP Socket connection to be automatically closed after use. Then, disable listening for TCP Socket connection events.
+ tlsTwoWay.close((err) => {
+ if (err) {
+ console.log("close callback error = " + err);
+ } else {
+ console.log("close success");
+ }
+ tlsTwoWay.off('message');
+ tlsTwoWay.off('connect');
+ tlsTwoWay.off('close');
+ });
+```
diff --git a/en/application-dev/database/database-datashare-guidelines.md b/en/application-dev/database/database-datashare-guidelines.md
index 1f25dccf2a36f3bbedb5728291e8e11b3292476e..580811158051b5b6d5d2137f4b14654a46e891b9 100644
--- a/en/application-dev/database/database-datashare-guidelines.md
+++ b/en/application-dev/database/database-datashare-guidelines.md
@@ -34,7 +34,7 @@ There are two roles in **DataShare**:
- Data provider: adds, deletes, modifies, and queries data, opens files, and shares data.
- Data consumer: accesses the data provided by the provider using **DataShareHelper**.
-### Data Provider Application Development (Only for System Applications)
+### Data Provider Application Development (for System Applications Only)
[DataShareExtensionAbility](../reference/apis/js-apis-application-dataShareExtensionAbility.md) provides the following APIs. You can override these APIs as required.
diff --git a/en/application-dev/database/database-mdds-guidelines.md b/en/application-dev/database/database-mdds-guidelines.md
index b72874536b968593cbb7a3c8d5fd865eb1720b35..b84b668ce377b03561f7f7fdd3cdd6eb5fb0d796 100644
--- a/en/application-dev/database/database-mdds-guidelines.md
+++ b/en/application-dev/database/database-mdds-guidelines.md
@@ -13,7 +13,7 @@ For details about the APIs, see [Distributed KV Store](../reference/apis/js-apis
| API | Description |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
-| createKVManager(config: KVManagerConfig, callback: AsyncCallback<KVManager>): void
createKVManager(config: KVManagerConfig): Promise<KVManager> | Creates a **KvManager** object for database management. |
+| createKVManager(config: KVManagerConfig): KVManager | Creates a **KvManager** object for database management. |
| getKVStore<T extends KVStore>(storeId: string, options: Options, callback: AsyncCallback<T>): void
getKVStore<T extends KVStore>(storeId: string, options: Options): Promise<T> | Creates and obtains a KV store.|
| put(key: string, value: Uint8Array\|string\|number\|boolean, callback: AsyncCallback<void>): void
put(key: string, value: Uint8Array\|string\|number\|boolean): Promise<void> | Inserts and updates data. |
| delete(key: string, callback: AsyncCallback<void>): void
delete(key: string): Promise<void> | Deletes data. |
@@ -117,16 +117,10 @@ The following uses a single KV store as an example to describe the development p
bundleName: 'com.example.datamanagertest',
context:context,
}
- distributedKVStore.createKVManager(kvManagerConfig, function (err, manager) {
- if (err) {
- console.error(`Failed to create KVManager. code is ${err.code},message is ${err.message}`);
- return;
- }
- console.log('Created KVManager successfully');
- kvManager = manager;
- });
+ kvManager = distributedKVStore.createKVManager(kvManagerConfig);
+ console.log("Created KVManager successfully");
} catch (e) {
- console.error(`An unexpected error occurred.code is ${e.code},message is ${e.message}`);
+ console.error(`Failed to create KVManager. Code is ${e.code}, message is ${e.message}`);
}
```
@@ -150,14 +144,14 @@ The following uses a single KV store as an example to describe the development p
};
kvManager.getKVStore('storeId', options, function (err, store) {
if (err) {
- console.error(`Failed to get KVStore: code is ${err.code},message is ${err.message}`);
+ console.error(`Failed to get KVStore: code is ${err.code}, message is ${err.message}`);
return;
}
console.log('Obtained KVStore successfully');
kvStore = store;
});
} catch (e) {
- console.error(`An unexpected error occurred.code is ${e.code},message is ${e.message}`);
+ console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`);
}
```
@@ -175,7 +169,7 @@ The following uses a single KV store as an example to describe the development p
console.log(`dataChange callback call data: ${data}`);
});
}catch(e){
- console.error(`An unexpected error occured.code is ${e.code},message is ${e.message}`);
+ console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`);
}
```
@@ -192,13 +186,13 @@ The following uses a single KV store as an example to describe the development p
try {
kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, function (err,data) {
if (err != undefined) {
- console.error(`Failed to put.code is ${err.code},message is ${err.message}`);
+ console.error(`Failed to put data. Code is ${err.code}, message is ${err.message}`);
return;
}
- console.log('Put data successfully');
+ console.log("Put data successfully");
});
}catch (e) {
- console.error(`An unexpected error occurred.code is ${e.code},message is ${e.message}`);
+ console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`);
}
```
@@ -215,20 +209,20 @@ The following uses a single KV store as an example to describe the development p
try {
kvStore.put(KEY_TEST_STRING_ELEMENT, VALUE_TEST_STRING_ELEMENT, function (err,data) {
if (err != undefined) {
- console.error(`Failed to put.code is ${err.code},message is ${err.message}`);
+ console.error(`Failed to put data. Code is ${err.code}, message is ${err.message}`);
return;
}
- console.log('Put data successfully');
+ console.log("Put data successfully");
kvStore.get(KEY_TEST_STRING_ELEMENT, function (err,data) {
if (err != undefined) {
- console.error(`Failed to get data.code is ${err.code},message is ${err.message}`);
+ console.error(`Failed to obtain data. Code is ${err.code}, message is ${err.message}`);
return;
}
console.log(`Obtained data successfully:${data}`);
});
});
}catch (e) {
- console.error(`Failed to get.code is ${e.code},message is ${e.message}`);
+ console.error(`Failed to obtain data. Code is ${e.code}, message is ${e.message}`);
}
```
@@ -262,7 +256,7 @@ The following uses a single KV store as an example to describe the development p
// 1000 indicates that the maximum delay is 1000 ms.
kvStore.sync(deviceIds, distributedKVStore.SyncMode.PUSH_ONLY, 1000);
} catch (e) {
- console.error(`An unexpected error occurred. code is ${e.code},message is ${e.message}`);
+ console.error(`An unexpected error occurred. Code is ${e.code}, message is ${e.message}`);
}
}
});
diff --git a/en/application-dev/database/database-preference-guidelines.md b/en/application-dev/database/database-preference-guidelines.md
index e5c9faa1477565541a94076e2fb568e69b2f5cf6..724e273675061c4b6969fb3fcd6f6cbdd984a15f 100644
--- a/en/application-dev/database/database-preference-guidelines.md
+++ b/en/application-dev/database/database-preference-guidelines.md
@@ -114,21 +114,19 @@ You can use the following APIs to delete a **Preferences** instance or data file
```ts
// Obtain the context.
import UIAbility from '@ohos.app.ability.UIAbility';
- let context = null;
let preferences = null;
export default class EntryAbility extends UIAbility {
- onWindowStageCreate(windowStage){
- context = this.context;
+ onWindowStageCreate(windowStage) {
+ let promise = data_preferences.getPreferences(this.context, 'mystore');
+ promise.then((pref) => {
+ preferences = pref;
+ }).catch((err) => {
+ console.info("Failed to get the preferences.");
+ })
}
}
- let promise = data_preferences.getPreferences(context, 'mystore');
- promise.then((pref) => {
- preferences = pref;
- }).catch((err) => {
- console.info("Failed to get the preferences.");
- })
```
3. Write data.
diff --git a/en/application-dev/device/Readme-EN.md b/en/application-dev/device/Readme-EN.md
index abf5154a8caa1473367960eea7b9118598ce706a..6ce8d1b16951d5fb739d97c102cb8d3be3f628d7 100644
--- a/en/application-dev/device/Readme-EN.md
+++ b/en/application-dev/device/Readme-EN.md
@@ -1,19 +1,21 @@
-# Device
+# Device Management
+- USB Service
+ - [USB Service Overview](usb-overview.md)
+ - [USB Service Development](usb-guidelines.md)
- Location
- [Location Service Development](location-guidelines.md)
-- Multimodal Input
- - [Input Device Development](inputdevice-guidelines.md)
- - [Mouse Pointer Development](pointerstyle-guidelines.md)
- Sensor
- [Sensor Overview](sensor-overview.md)
- [Sensor Development](sensor-guidelines.md)
+- Vibrator
+ - [Vibrator Overview](vibrator-overview.md)
+ - [Vibrator Development](vibrator-guidelines.md)
+- Multimodal Input
+ - [Input Device Development](inputdevice-guidelines.md)
+ - [Mouse Pointer Development](pointerstyle-guidelines.md)
- Update Service
- [Sample Server Overview](sample-server-overview.md)
- [Sample Server Development](sample-server-guidelines.md)
-- USB Service
- - [USB Service Overview](usb-overview.md)
- - [USB Service Development](usb-guidelines.md)
-- Vibrator
- - [Vibrator Overview](vibrator-overview.md)
- - [Vibrator Development](vibrator-guidelines.md)
\ No newline at end of file
+- Stationary
+ - [Stationary Development](stationary-guidelines.md)
diff --git a/en/application-dev/device/inputdevice-guidelines.md b/en/application-dev/device/inputdevice-guidelines.md
index da6eef71d750b74e01d1ea8a9eaaf49b1bf598cb..c15955d9b01239605d0ce1afa9bfe5f693b22940 100644
--- a/en/application-dev/device/inputdevice-guidelines.md
+++ b/en/application-dev/device/inputdevice-guidelines.md
@@ -29,7 +29,6 @@ When a user enters text, the input method determines whether to launch the virtu
1. Call the **getDeviceList** API to obtain the list of connected input devices. Call the **getKeyboardType** API to traverse all connected devices to check whether a physical keyboard exists. If a physical keyboard exists, mark the physical keyboard as connected. This step ensures that your application detects all inserted input devices before listening for device hot swap events.
2. Call the **on** API to listen for device hot swap events. If a physical keyboard is inserted, mark the physical keyboard as connected. If a physical keyboard is removed, mark the physical keyboard as disconnected.
-3. When a user enters text, check whether a physical keyboard is connected. If a physical keyboard is not connected, launch the virtual keyboard.
```js
@@ -65,6 +64,4 @@ try {
} catch (error) {
console.log(`Execute failed, error: ${JSON.stringify(error, [`code`, `message`])}`);
}
- // 3. Determine whether to launch the virtual keyboard based on the value of isPhysicalKeyboardExist.
- // TODO
```
diff --git a/en/application-dev/device/pointerstyle-guidelines.md b/en/application-dev/device/pointerstyle-guidelines.md
index cecab92b282e2da7a3bb966bcedeefa84768f22e..bcc09093eed4440a0c5e62c5d4cfe37a3f954c87 100644
--- a/en/application-dev/device/pointerstyle-guidelines.md
+++ b/en/application-dev/device/pointerstyle-guidelines.md
@@ -15,11 +15,11 @@ import pointer from '@ohos.multimodalInput.pointer';
The following table lists the common APIs for mouse pointer management. For details about the APIs, see [ohos.multimodalInput.pointer](../reference/apis/js-apis-pointer.md).
| Instance | API | Description |
-| ------- | ------------------------------------------------------------ | --------------------------------------------------------------- |
-| pointer | function isPointerVisible(callback: AsyncCallback\): void; | Checks the visible status of the mouse pointer. |
-| pointer | function setPointerVisible(visible: boolean, callback: AsyncCallback\): void; | Sets the visible status of the mouse pointer. This setting takes effect for the mouse pointer globally. |
+| ------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| pointer | function isPointerVisible(callback: AsyncCallback\): void; | Checks the visible status of the mouse pointer. |
+| pointer | function setPointerVisible(visible: boolean, callback: AsyncCallback\): void; | Sets the visible status of the mouse pointer. This setting takes effect for the mouse pointer globally.|
| pointer | function setPointerStyle(windowId: number, pointerStyle: PointerStyle, callback: AsyncCallback\): void; | Sets the mouse pointer style. This setting takes effect for the mouse pointer style of a specified window. |
-| pointer | function getPointerStyle(windowId: number, callback: AsyncCallback\): void; | Obtains the mouse pointer style. |
+| pointer | function getPointerStyle(windowId: number, callback: AsyncCallback\): void; | Obtains the mouse pointer style. |
## Hiding the Mouse Pointer
@@ -77,43 +77,48 @@ When designing a color picker, you can have the mouse pointer switched to the co
5. Set the mouse pointer to the default style.
```js
+import pointer from '@ohos.multimodalInput.pointer';
import window from '@ohos.window';
// 1. Enable the color pickup function.
// 2. Obtain the window ID.
-window.getTopWindow((error, windowClass) => {
- windowClass.getProperties((error, data) => {
- var windowId = data.id;
- if (windowId < 0) {
- console.log(`Invalid windowId`);
- return;
- }
- try {
- // 3. Set the mouse pointer to the color picker style.
- pointer.setPointerStyle(windowId, pointer.PointerStyle.COLOR_SUCKER).then(() => {
- console.log(`Successfully set mouse pointer style`);
- });
- } catch (error) {
- console.log(`Failed to set the pointer style, error=${JSON.stringify(error)}, msg=${JSON.stringify(message)}`);
- }
- });
+window.getLastWindow(this.context, (error, windowClass) => {
+ if (error.code) {
+ console.error('Failed to obtain the top window. Cause: ' + JSON.stringify(error));
+ return;
+ }
+ var windowId = windowClass.getWindowProperties().id;
+ if (windowId < 0) {
+ console.log(`Invalid windowId`);
+ return;
+ }
+ try {
+ // 3. Set the mouse pointer to the color picker style.
+ pointer.setPointerStyle(windowId, pointer.PointerStyle.COLOR_SUCKER).then(() => {
+ console.log(`Successfully set mouse pointer style`);
+ });
+ } catch (error) {
+ console.log(`Failed to set the pointer style, error=${JSON.stringify(error)}, msg=${JSON.stringify(`message`)}`);
+ }
});
// 4. End color pickup.
-window.getTopWindow((error, windowClass) => {
- windowClass.getProperties((error, data) => {
- var windowId = data.id;
- if (windowId < 0) {
- console.log(`Invalid windowId`);
- return;
- }
- try {
- // 5. Set the mouse pointer to the default style.
- pointer.setPointerStyle(windowId, pointer.PointerStyle.DEFAULT).then(() => {
- console.log(`Successfully set mouse pointer style`);
- });
- } catch (error) {
- console.log(`Failed to set the pointer style, error=${JSON.stringify(error)}, msg=${JSON.stringify(message)}`);
- }
- });
+window.getLastWindow(this.context, (error, windowClass) => {
+ if (error.code) {
+ console.error('Failed to obtain the top window. Cause: ' + JSON.stringify(error));
+ return;
+ }
+ var windowId = windowClass.getWindowProperties().id;
+ if (windowId < 0) {
+ console.log(`Invalid windowId`);
+ return;
+ }
+ try {
+ // 5. Set the mouse pointer to the default style.
+ pointer.setPointerStyle(windowId, pointer.PointerStyle.DEFAULT).then(() => {
+ console.log(`Successfully set mouse pointer style`);
+ });
+ } catch (error) {
+ console.log(`Failed to set the pointer style, error=${JSON.stringify(error)}, msg=${JSON.stringify(`message`)}`);
+ }
});
```
diff --git a/en/application-dev/device/stationary-guidelines.md b/en/application-dev/device/stationary-guidelines.md
new file mode 100644
index 0000000000000000000000000000000000000000..9f6693027a29f48c2c434b842df74beb5209f319
--- /dev/null
+++ b/en/application-dev/device/stationary-guidelines.md
@@ -0,0 +1,84 @@
+# Stationary Development
+
+
+## When to Use
+
+An application can call the **Stationary** module to obtain the device status, for example, whether the device is absolutely or relatively still.
+
+For details about the APIs, see [Stationary](../reference/apis/js-apis-stationary.md).
+
+## Device Status Type Parameters
+
+| Name| Description|
+| -------- | -------- |
+| still | Absolutely still.|
+| relativeStill | Relatively still.|
+
+## Parameters for Subscribing to Device Status events
+
+| Name | Value | Description |
+| ------------------------------ | ---- | ---------------------------------------- |
+| ENTER | 1 | Event indicating entering device status. |
+| EXIT | 2 | Event indicating exiting device status.|
+| ENTER_EXIT | 3 | Event indicating entering and exiting device status.|
+
+## Returned Device Status Parameters
+
+| Name | Value | Description |
+| ------------------------------ | ---- | ---------------------------------------- |
+| ENTER | 1 | Entering device status. |
+| EXIT | 2 | Exiting device status.|
+
+## Available APIs
+
+| Module | Name | Description |
+| ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| ohos.stationary | on(activity: ActivityType, event: ActivityEvent, reportLatencyNs: number, callback: Callback<ActivityResponse>): void | Subscribes to the device status. This API uses an asynchronous callback to return the result.|
+| ohos.stationary | once(activity: ActivityType, callback: Callback<ActivityResponse>): void | Obtains the device status. This API uses an asynchronous callback to return the result.|
+| ohos.stationary | off(activity: ActivityType, event: ActivityEvent, callback?: Callback<ActivityResponse>): void | Unsubscribes from the device status. |
+
+## Constraints
+
+The device must support the acceleration sensor.
+
+## How to Develop
+
+1. Subscribe to the event indicating entering the absolute still state, and the event is reported every 1 second.
+
+ ```js
+ import stationary from '@ohos.stationary';
+ var reportLatencyNs = 1000000000;
+ try {
+ stationary.on('still', stationary.ActivityEvent.ENTER, reportLatencyNs, (data) => {
+ console.log('data='+ JSON.stringify(data));
+ })
+ } catch (err) {
+ console.error('errCode: ' + err.code + ' ,msg: ' + err.message);
+ }
+ ```
+
+2. Obtain the event indicating entering the absolute still state.
+
+ ```js
+ import stationary from '@ohos.stationary';
+ try {
+ stationary.once('still', (data) => {
+ console.log('data='+ JSON.stringify(data));
+ })
+ } catch (err) {
+ console.error('errCode: ' + err.code + ' ,msg: ' + err.message);
+ }
+ ```
+
+3. Unsubscribe from the event indicating entering the absolute still state.
+
+ ```js
+ import stationary from '@ohos.stationary';
+ try {
+ stationary.off('still', stationary.ActivityEvent.ENTER, (data) => {
+ console.log('data='+ JSON.stringify(data));
+ })
+ } catch (err) {
+ console.error('errCode: ' + err.code + ' ,msg: ' + err.message);
+ }
+ ```
diff --git a/en/application-dev/device/usb-guidelines.md b/en/application-dev/device/usb-guidelines.md
index c4f5131536a1f0b55ae973bdf7cdf04d2b8f0980..68c8c3de013e75d56854bf0cf0e3a71aca9eb261 100644
--- a/en/application-dev/device/usb-guidelines.md
+++ b/en/application-dev/device/usb-guidelines.md
@@ -130,8 +130,6 @@ You can set a USB device as the USB host to connect to other USB devices for dat
usb.bulkTransfer(pipe, inEndpoint, dataUint8Array, 15000).then(dataLength => {
if (dataLength >= 0) {
console.info("usb readData result Length : " + dataLength);
- let resultStr = this.ab2str(dataUint8Array); // Convert uint8 data into a string.
- console.info("usb readData buffer : " + resultStr);
} else {
console.info("usb readData failed : " + dataLength);
}
diff --git a/en/application-dev/device/vibrator-guidelines.md b/en/application-dev/device/vibrator-guidelines.md
index 36b08b2acd96e2d65fc14a936f1c3e9c9dd31a88..c028f5be4890c476bab762cfc8b0f0d12d9fdda8 100644
--- a/en/application-dev/device/vibrator-guidelines.md
+++ b/en/application-dev/device/vibrator-guidelines.md
@@ -16,6 +16,10 @@ For details about the APIs, see [Vibrator](../reference/apis/js-apis-vibrator.md
| ohos.vibrator | startVibration(effect: VibrateEffect, attribute: VibrateAttribute, callback: AsyncCallback<void>): void | Starts vibration with the specified effect and attribute. This API uses an asynchronous callback to return the result.|
| ohos.vibrator | stopVibration(stopMode: VibratorStopMode): Promise<void> | Stops vibration in the specified mode. This API uses a promise to return the result. |
| ohos.vibrator | stopVibration(stopMode: VibratorStopMode, callback: AsyncCallback<void>): void | Stops vibration in the specified mode. This API uses an asynchronous callback to return the result. |
+| ohos.vibrator | stopVibration(): Promise<void> | Stops vibration in all modes. This API uses a promise to return the result. |
+| ohos.vibrator | stopVibration(callback: AsyncCallback<void>): void | Stops vibration in all modes. This API uses an asynchronous callback to return the result. |
+| ohos.vibrator | isSupportEffect(effectId: string): Promise<boolean> | Checks whether the passed effect ID is supported. This API uses a promise to return the result. The value **true** means that the passed effect ID is supported, and **false** means the opposite. |
+| ohos.vibrator | isSupportEffect(effectId: string, callback: AsyncCallback<boolean>): void | Checks whether the passed effect ID is supported. This API uses an asynchronous callback to return the result. The value **true** means that the passed effect ID is supported, and **false** means the opposite. |
## How to Develop
@@ -27,7 +31,7 @@ For details about the APIs, see [Vibrator](../reference/apis/js-apis-vibrator.md
```js
import vibrator from '@ohos.vibrator';
try {
- vibrator.startVibration({
+ vibrator.startVibration({ // To use startVibration, you must configure the ohos.permission.VIBRATE permission.
type: 'time',
duration: 1000,
}, {
@@ -50,7 +54,7 @@ For details about the APIs, see [Vibrator](../reference/apis/js-apis-vibrator.md
```js
import vibrator from '@ohos.vibrator';
try {
- // Stop vibration in VIBRATOR_STOP_MODE_TIME mode.
+ // Stop vibration in VIBRATOR_STOP_MODE_TIME mode. To use stopVibration, you must configure the ohos.permission.VIBRATE permission.
vibrator.stopVibration(vibrator.VibratorStopMode.VIBRATOR_STOP_MODE_TIME, function (error) {
if (error) {
console.log('error.code' + error.code + 'error.message' + error.message);
@@ -62,3 +66,72 @@ For details about the APIs, see [Vibrator](../reference/apis/js-apis-vibrator.md
console.info('errCode: ' + err.code + ' ,msg: ' + err.message);
}
```
+
+4. Stop vibration in all modes.
+
+ ```js
+ import vibrator from '@ohos.vibrator';
+ // To use startVibration and stopVibration, you must configure the ohos.permission.VIBRATE permission.
+ try {
+ vibrator.startVibration({
+ type: 'time',
+ duration: 1000,
+ }, {
+ id: 0,
+ usage: 'alarm'
+ }, (error) => {
+ if (error) {
+ console.error('vibrate fail, error.code: ' + error.code + 'error.message: ', + error.message);
+ return;
+ }
+ console.log('Callback returned to indicate a successful vibration.');
+ });
+ // Stop vibration in all modes.
+ vibrator.stopVibration(function (error) {
+ if (error) {
+ console.log('error.code' + error.code + 'error.message' + error.message);
+ return;
+ }
+ console.log('Callback returned to indicate successful.');
+ })
+ } catch (error) {
+ console.info('errCode: ' + error.code + ' ,msg: ' + error.message);
+ }
+ ```
+
+5. Check whether the passed effect ID is supported.
+
+ ```js
+ import vibrator from '@ohos.vibrator';
+ try {
+ // Check whether 'haptic.clock.timer' is supported.
+ vibrator.isSupportEffect('haptic.clock.timer', function (err, state) {
+ if (err) {
+ console.error('isSupportEffect failed, error:' + JSON.stringify(err));
+ return;
+ }
+ console.log('The effectId is ' + (state ? 'supported' : 'unsupported'));
+ if (state) {
+ try {
+ vibrator.startVibration({ // To use startVibration, you must configure the ohos.permission.VIBRATE permission.
+ type: 'preset',
+ effectId: 'haptic.clock.timer',
+ count: 1,
+ }, {
+ usage: 'unknown'
+ }, (error) => {
+ if(error) {
+ console.error('haptic.clock.timer vibrator error:' + JSON.stringify(error));
+ } else {
+ console.log('haptic.clock.timer vibrator success');
+ }
+ });
+ } catch (error) {
+ console.error('Exception in, error:' + JSON.stringify(error));
+ }
+ }
+ })
+ } catch (error) {
+ console.error('Exception in, error:' + JSON.stringify(error));
+ }
+ ```
diff --git a/en/application-dev/dfx/Readme-EN.md b/en/application-dev/dfx/Readme-EN.md
index 4ed700a49b94ef0c296ec27eaf9c5cde96575234..c6b449f197ba66d8bd6f4021abffe6ce31a69028 100644
--- a/en/application-dev/dfx/Readme-EN.md
+++ b/en/application-dev/dfx/Readme-EN.md
@@ -1,13 +1,9 @@
# DFX
-- Application Event Logging
- - [Development of Application Event Logging](hiappevent-guidelines.md)
-- Distributed Call Chain Tracing
- - [Development of Distributed Call Chain Tracing](hitracechain-guidelines.md)
-- HiLog
- - [HiLog Development](hilog-guidelines.md)
-- Performance Tracing
- - [Development of Performance Tracing](hitracemeter-guidelines.md)
+- [Development of Application Event Logging](hiappevent-guidelines.md)
+- [Development of Performance Tracing](hitracemeter-guidelines.md)
+- [Development of Distributed Call Chain Tracing](hitracechain-guidelines.md)
+- [HiLog Development (Native)](hilog-guidelines.md)
- Error Management
- [Development of Error Manager](errormanager-guidelines.md)
- - [Development of Application Recovery](apprecovery-guidelines.md)
+ - [Development of Application Recovery](apprecovery-guidelines.md)
\ No newline at end of file
diff --git a/en/application-dev/dfx/apprecovery-guidelines.md b/en/application-dev/dfx/apprecovery-guidelines.md
index 67176a77ff4cacda15de76a3390275f59f3a6aa4..5c297d3357d83feb0b6c1465f67ec2b085ade7b3 100644
--- a/en/application-dev/dfx/apprecovery-guidelines.md
+++ b/en/application-dev/dfx/apprecovery-guidelines.md
@@ -1,62 +1,80 @@
-# Development of Application Recovery
+# Application Recovery Development
## When to Use
-During application running, some unexpected behaviors are inevitable. For example, unprocessed exceptions and errors are thrown, and the call or running constraints of the framework are violated.
+During application running, some unexpected behaviors are inevitable. For example, unprocessed exceptions and errors are thrown, and the call or running constraints of the recovery framework are violated.
-By default, the processes will exit as exception handling. However, if user data is generated during application use, process exits may interrupt user operations and cause data loss.
-In this way, application recovery APIs may help you save temporary data, restart an application after it exits, and restore its status and data, which deliver a better user experience.
+Process exit is treated as the default exception handling method. However, if user data is generated during application use, process exit may interrupt user operations and cause data loss.
+Application recovery helps to restore the application state and save temporary data upon next startup in the case of an abnormal process exit, thus providing more consistent user experience. The application state includes two parts, namely, the page stack of the and the data saved in **onSaveState**.
-Currently, the APIs support only the development of an application that adopts the stage model, single process, and single ability.
+In API version 9, application recovery is supported only for a single ability of the application developed using the stage model. Application state saving and automatic restart are performed when a JsError occurs.
+
+In API version 10, application recovery is also supported for multiple abilities of the application developed using the stage model. Application state storage and restore are performed when an AppFreeze occurs. If an application is killed in control mode, the application state will be restored upon next startup.
## Available APIs
-The application recovery APIs are provided by the **appRecovery** module, which can be imported via **import**. For details, please refer to [Development Example](#development-example). This document describes behaviors of APIs in API version 9, and the content will update with changes.
+The application recovery APIs are provided by the **appRecovery** module, which can be imported via **import**. For details, see [Development Example](#development-example).
### Available APIs
-| API | Description |
-| ------------------------------------------------------------ | ------------------------------------------------------------ |
-| enableAppRecovery(restart?: RestartFlag, saveOccasion?: SaveOccasionFlag, saveMode?: SaveModeFlag) : void; | Enables the application recovery function. |
-| saveAppState(): boolean; | Saves the ability status of an application. |
-| restartApp(): void; | Restarts the current process. If there is saved ability status, it will be passed to the **want** parameter's **wantParam** attribute of the **onCreate** lifecycle callback of the ability.|
+| API | Description |
+| ------------------------------------------------------------ | ---------------------------------------------------- |
+| enableAppRecovery(restart?: RestartFlag, saveOccasion?: SaveOccasionFlag, saveMode?: SaveModeFlag) : void;9+ | Enables application recovery. After this API is called, the first ability that is displayed when the application is started from the initiator can be restored.|
+| saveAppState(): boolean;9+ | Saves the state of the ability that supports recovery in the current application.|
+| restartApp(): void;9+ | Restarts the current process and starts the ability specified by **setRestartWant**. If no ability is specified, a foreground ability that supports recovery is restarted.|
+| saveAppState(context?: UIAbilityContext): boolean;10+ | Saves the ability state specified by **Context**.|
+| setRestartWant(want: Want): void;10+ | Sets the abilities to restart when **restartApp** is actively called and **RestartFlag** is not **NO_RESTART**. The abilities must be under the same bundle name and must be a **UiAbility**.|
+
+No error will be thrown if the preceding APIs are used in the troubleshooting scenario. The following are some notes on API usage:
+
+**enableAppRecovery**: This API should be called during application initialization. For example, you can call this API in **onCreate** of **AbilityStage**. For details, see [Parameter Description](../reference/apis/js-apis-app-ability-appRecovery.md).
+
+**saveAppState**: After this API is called, the recovery framework invokes **onSaveState** for all abilities that support recovery in the current process. If you choose to save data in **onSaveState**, the related data and ability page stack are persistently stored in the local cache of the application. To save data of the specified ability, you need to specify the context corresponding to that ability.
+
+**setRestartWant**: This API specifies the ability to be restarted by **appRecovery**.
-The APIs are used for troubleshooting and do not return any exception. Therefore, you need to be familiar with when they are used.
+**restartApp**: After this API is called, the recovery framework kills the current process and restarts the ability specified by **setRestartWant**, with **APP_RECOVERY** set as the startup cause. In API version 9 and scenarios where an ability is not specified by **setRestartWant**, the last foreground ability that supports recovery is started. If the no foreground ability supports recovery, the application crashes. If a saved state is available for the restarted ability, the saved state is passed as the **wantParam** attribute in the **want** parameter of the ability's **onCreate** callback.
-**enableAppRecovery**: This API should be called during application initialization. For example, you can call this API in **onCreate** of **AbilityStage**. For details, please refer to the [parameter description](../reference/apis/js-apis-app-ability-appRecovery.md).
+### Application State Management
+Since API version 10, application recovery is not limited to automatic restart in the case of an exception. Therefore, you need to understand when the application will load the saved state.
+If the last exit of an application is not initiated by a user and a saved state is available for recovery, the startup reason is set to **APP_RECOVERY** when the application is started by the user next time, and the recovery state of the application is cleared.
+The application recovery status flag is set when **saveAppState** is actively or passively called. The flag is cleared when the application exits normally or the saved state is consumed. (A normal exit is usually triggered by pressing the back key or clearing recent tasks.)
-**saveAppState**: After this API is called, the framework calls back **onSaveState** of the ability. If data saving is agreed to in this method, relevant data and the page stack of the ability are persisted to the local cache of the application.
+
-**restartApp**: After this API is called, the framework kills the current application process and restarts the ability in the foreground, with **APP_RECOVERY** specified as the startup cause.
+### Application State Saving and Restore
+API version 10 or later supports saving of the application state when an application is suspended. If a JsError occurs, **onSaveState** is called in the main thread. If an AppFreeze occurs, however, the main thread may be suspended, and therefore **onSaveState** is called in a non-main thread. The following figure shows the main service flow.
-### Framework Fault Management Process
+
+When the application is suspended, the callback is not executed in the JS thread. Therefore, you are advised not to use the imported dynamic Native library or access the **thread_local** object created by the main thread in the code of the **onSaveState** callback.
+
+### Framework Fault Management
Fault management is an important way for applications to deliver a better user experience. The application framework offers three methods for application fault management: fault listening, fault rectification, and fault query.
-- Fault listening refers to the process of registering [ErrorObserver](../reference/apis/js-apis-application-errorManager.md#errorobserver) via [errorManager](../reference/apis/js-apis-application-errorManager.md), listening for fault occurrence, and notifying the fault listener.
+- Fault listening refers to the process of registering an [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) via [errorManager](../reference/apis/js-apis-app-ability-errorManager.md), listening for faults, and notifying the listener of the faults.
-- Fault rectification refers to [appRecovery](../reference/apis/js-apis-app-ability-appRecovery.md) and restarts an application to restore its status previous to a fault.
+- Fault rectification refers to the process of restoring the application state and data through [appRecovery](../reference/apis/js-apis-app-ability-appRecovery.md).
-- Fault query indicates that [faultLogger](../reference/apis/js-apis-faultLogger.md) obtains the fault information using its query API.
+- Fault query is the process of calling APIs of [faultLogger](../reference/apis/js-apis-faultLogger.md) to obtain the fault information.
-The figure below does not illustrate the time when [faultLogger](../reference/apis/js-apis-faultLogger.md) is called. You can refer to [LastExitReason](../reference/apis/js-apis-app-ability-abilityConstant.md#abilityconstantlastexitreason) passed during application initialization to determine whether to call [faultLogger](../reference/apis/js-apis-faultLogger.md) to query the information about the last fault.
+The figure below does not illustrate the time when [faultLogger](../reference/apis/js-apis-faultLogger.md) is called. You can refer to the [LastExitReason](../reference/apis/js-apis-app-ability-abilityConstant.md#abilityconstantlastexitreason) passed during application initialization to determine whether to call [faultLogger](../reference/apis/js-apis-faultLogger.md) to query information about the previous fault.

+It is recommended that you call [errorManager](../reference/apis/js-apis-app-ability-errorManager.md) to handle the exception. After the processing is complete, you can call the **saveAppState** API and restart the application.
+If you do not register [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) or enable application recovery, the application process will exit according to the default processing logic of the system. Users can restart the application from the home screen.
+If you have enabled application recovery, the recovery framework first checks whether application state saving is supported and whether the application state saving is enabled. If so, the recovery framework invokes [onSaveState](../reference/apis/js-apis-app-ability-uiAbility.md#uiabilityonsavestate) of the [Ability](../reference/apis/js-apis-app-ability-uiAbility.md). Finally, the application is restarted.
-It is recommended that you call [errorManager](../reference/apis/js-apis-application-errorManager.md) to process the exception. After the processing is complete, you can call the status saving API and restart the application.
-If you do not register [ErrorObserver](../reference/apis/js-apis-application-errorManager.md#errorobserver) or enable application recovery, the application process will exit according to the default processing logic of the system. Users can restart the application from the home screen.
-If you have enabled application recovery, the framework first checks whether a fault allows for ability status saving and whether you have configured ability status saving. If so, [onSaveState](../reference/apis/js-apis-application-ability.md#abilityonsavestate) of [Ability](../reference/apis/js-apis-application-ability.md#ability) is called back. Finally, the application is restarted.
-
-### Scenarios Supported by Application Fault Management APIs
+### Supported Application Recovery Scenarios
Common fault types include JavaScript application crash, application freezing, and C++ application crash. Generally, an application is closed when a crash occurs. Application freezing occurs when the application does not respond. The fault type can be ignored for the upper layer of an application. The recovery framework implements fault management in different scenarios based on the fault type.
-| Fault | Fault Listening| Status Saving| Automatic Restart| Log Query|
-| ------------------------------------------------------------ | -------- | -------- | -------- | -------- |
-| [JS_CRASH](../reference/apis/js-apis-faultLogger.md#faulttype) | Supported | Supported | Supported | Supported |
-| [APP_FREEZE](../reference/apis/js-apis-faultLogger.md#faulttype) | Not supported | Not supported | Supported | Supported |
-| [CPP_CRASH](../reference/apis/js-apis-faultLogger.md#faulttype) | Not supported | Not supported | Not supported | Supported |
+| Fault | Fault Listening | State Saving| Automatic Restart| Log Query|
+| ----------|--------- |--------- |--------- |--------- |
+| [JS_CRASH](../reference/apis/js-apis-faultLogger.md#faulttype) | Supported|Supported|Supported|Supported|
+| [APP_FREEZE](../reference/apis/js-apis-faultLogger.md#faulttype) | Not supported|Supported|Supported|Supported|
+| [CPP_CRASH](../reference/apis/js-apis-faultLogger.md#faulttype) | Not supported|Not supported|Not supported|Supported|
-**Status Saving** in the table header means status saving when a fault occurs. To protect user data as much as possible in the application freezing fault, you can adopt either the periodic or automatic way, and the latter will save user data when an ability is switched to the background.
+**State Saving** in the table header means saving of the application state when a fault occurs. To protect user data as much as possible when an AppFreeze occurs, you can adopt either the periodic or automatic way, and the latter will save user data when an ability is switched to the background.
@@ -78,11 +96,23 @@ export default class MyAbilityStage extends AbilityStage {
appRecovery.SaveModeFlag.SAVE_WITH_FILE);
}
}
+```
+### Enabling Application Recovery for the Specified Abilities
+Generally, the ability configuration list is named **module.json5**.
+```json
+{
+ "abilities": [
+ {
+ "name": "EntryAbility",
+ "recoverable": true,
+ }]
+}
+
```
### Saving and Restoring Data
-After enabling **appRecovery**, you can use this function by either actively or passively saving the status and restoring data in the ability.
+After enabling **appRecovery**, you can use this function by either actively or passively saving the application state and restoring data in the ability.
The following is an example of **EntryAbility**:
#### Importing the Service Package
@@ -93,14 +123,14 @@ import appRecovery from '@ohos.app.ability.appRecovery';
import AbilityConstant from '@ohos.app.ability.AbilityConstant';
```
-#### Actively Saving Status and Restoring Data
+#### Actively Saving the Application State and Restoring Data
-- Define and register the [ErrorObserver](../reference/apis/js-apis-application-errorManager.md#errorobserver) callback.
+- Define and register the [ErrorObserver](../reference/apis/js-apis-inner-application-errorObserver.md) callback. For details about its usage, see [errorManager](../reference/apis/js-apis-app-ability-errorManager.md).
```ts
var registerId = -1;
var callback = {
- onUnhandledException: function (errMsg) {
+ onUnhandledException(errMsg) {
console.log(errMsg);
appRecovery.saveAppState();
appRecovery.restartApp();
@@ -108,11 +138,11 @@ import AbilityConstant from '@ohos.app.ability.AbilityConstant';
}
onWindowStageCreate(windowStage) {
- // Main window is created. Set a main page for this ability.
+ // Main window is created, set main page for this ability
console.log("[Demo] EntryAbility onWindowStageCreate")
globalThis.registerObserver = (() => {
- registerId = errorManager.registerErrorObserver(callback);
+ registerId = errorManager.on('error', callback);
})
windowStage.loadContent("pages/index", null);
@@ -125,16 +155,16 @@ After the callback triggers **appRecovery.saveAppState()**, **onSaveState(state,
```ts
onSaveState(state, wantParams) {
- // Save application data.
+ // Ability has called to save app data
console.log("[Demo] EntryAbility onSaveState")
wantParams["myData"] = "my1234567";
- return AbilityConstant.onSaveResult.ALL_AGREE;
+ return AbilityConstant.OnSaveResult.ALL_AGREE;
}
```
- Restore data.
-After the callback triggers **appRecovery.restartApp()**, the application is restarted. After the restart, **onCreate(want, launchParam)** of **EntryAbility** is called, and the saved data is in **parameters** of **want**.
+After the callback triggers **appRecovery.restartApp()**, the application is restarted. After the restart, **onCreate(want, launchParam)** of **EntryAbility** is called, and the saved data is stored in **parameters** of **want**.
```ts
storage: LocalStorage
@@ -150,24 +180,24 @@ onCreate(want, launchParam) {
}
```
-- Deregister **ErrorObserver callback**.
+- Unregister the **ErrorObserver** callback.
```ts
onWindowStageDestroy() {
- // Main window is destroyed to release UI resources.
+ // Main window is destroyed, release UI related resources
console.log("[Demo] EntryAbility onWindowStageDestroy")
globalThis.unRegisterObserver = (() => {
- errorManager.unregisterErrorObserver(registerId, (result) => {
- console.log("[Demo] result " + result.code + ";" + result.message)
+ errorManager.off('error', registerId, (err) => {
+ console.error("[Demo] err:", err);
});
})
}
```
-#### Passively Saving Status and Restoring Data
+#### Passively Saving the Application State and Restoring Data
-This is triggered by the recovery framework. You do not need to register **ErrorObserver callback**. You only need to implement **onSaveState** of the ability for status saving and **onCreate** of the ability for data restoration.
+This is triggered by the recovery framework. You do not need to register an **ErrorObserver** callback. You only need to implement **onSaveState** for application state saving and **onCreate** for data restore.
```ts
export default class EntryAbility extends Ability {
@@ -184,10 +214,10 @@ export default class EntryAbility extends Ability {
}
onSaveState(state, wantParams) {
- // Save application data.
+ // Ability has called to save app data
console.log("[Demo] EntryAbility onSaveState")
wantParams["myData"] = "my1234567";
- return AbilityConstant.onSaveResult.ALL_AGREE;
+ return AbilityConstant.OnSaveResult.ALL_AGREE;
}
}
```
diff --git a/en/application-dev/dfx/errormanager-guidelines.md b/en/application-dev/dfx/errormanager-guidelines.md
index 667339c3b3dbaa101cfbda8eeacbc8f11c2fd03d..4679cfcfc78893590fe73eab770e49fc68a1a828 100644
--- a/en/application-dev/dfx/errormanager-guidelines.md
+++ b/en/application-dev/dfx/errormanager-guidelines.md
@@ -12,11 +12,11 @@ Application error management APIs are provided by the **errorManager** module. F
| API | Description |
| ------------------------------------------------------------ | ---------------------------------------------------- |
-| registerErrorObserver(observer: ErrorObserver): number | Registers an observer for application errors. A callback will be invoked when an application error is detected. This API works in a synchronous manner. The return value is the SN of the registered observer.|
-| unregisterErrorObserver(observerId: number, callback: AsyncCallback\): void | Unregisters an observer in callback mode. The number passed to this API is the SN of the registered observer. |
-| unregisterErrorObserver(observerId: number): Promise\ | Unregisters an observer in promise mode. The number passed to this API is the SN of the registered observer. |
+| on(type: "error", observer: ErrorObserver): number | Registers an observer for application errors. A callback will be invoked when an application error is detected. This API works in a synchronous manner. The return value is the SN of the registered observer.|
+| off(type: "error", observerId: number, callback: AsyncCallback\): void | Unregisters an observer in callback mode. The number passed to this API is the SN of the registered observer. |
+| off(type: "error", observerId: number): Promise\ | Unregisters an observer in promise mode. The number passed to this API is the SN of the registered observer. |
-When an asynchronous callback is used, the return value can be processed directly in the callback. If a promise is used, the return value can also be processed in the promise in a similar way. For details about the result codes, see [Result Codes for Unregistering an Observer](#result-codes-for-unregistering-an-observer).
+When an asynchronous callback is used, the return value can be processed directly in the callback. If a promise is used, the return value can also be processed in the promise in a similar way. For details about the result codes, see [Result Codes for Unregistering an Observer](#result codes-for-unregistering-an-observer).
**Table 2** Description of the ErrorObserver API
@@ -39,22 +39,23 @@ When an asynchronous callback is used, the return value can be processed directl
import UIAbility from '@ohos.app.ability.UIAbility';
import errorManager from '@ohos.app.ability.errorManager';
-var registerId = -1;
-var callback = {
+let registerId = -1;
+let callback = {
onUnhandledException: function (errMsg) {
console.log(errMsg);
}
}
-export default class EntryAbility extends Ability {
+
+export default class EntryAbility extends UIAbility {
onCreate(want, launchParam) {
console.log("[Demo] EntryAbility onCreate")
- registerId = errorManager.registerErrorObserver(callback);
+ registerId = errorManager.on("error", callback);
globalThis.abilityWant = want;
}
onDestroy() {
console.log("[Demo] EntryAbility onDestroy")
- errorManager.unregisterErrorObserver(registerId, (result) => {
+ errorManager.off("error", registerId, (result) => {
console.log("[Demo] result " + result.code + ";" + result.message)
});
}
diff --git a/en/application-dev/dfx/figures/application_recovery_from_freezing.png b/en/application-dev/dfx/figures/application_recovery_from_freezing.png
new file mode 100644
index 0000000000000000000000000000000000000000..968b4cefc5e898209cdae117c7f9f667bc9fbd64
Binary files /dev/null and b/en/application-dev/dfx/figures/application_recovery_from_freezing.png differ
diff --git a/en/application-dev/dfx/figures/application_recovery_status_management.png b/en/application-dev/dfx/figures/application_recovery_status_management.png
new file mode 100644
index 0000000000000000000000000000000000000000..762504c1d1027be87233e589988be2091640191b
Binary files /dev/null and b/en/application-dev/dfx/figures/application_recovery_status_management.png differ
diff --git a/en/application-dev/dfx/figures/fault_rectification.png b/en/application-dev/dfx/figures/fault_rectification.png
index 67aa40592f7bcad23e216222e898c1f1327a4efb..a178b2691616d406d2668806ffcd4f89c8ca82a3 100644
Binary files a/en/application-dev/dfx/figures/fault_rectification.png and b/en/application-dev/dfx/figures/fault_rectification.png differ
diff --git a/en/application-dev/dfx/hiappevent-guidelines.md b/en/application-dev/dfx/hiappevent-guidelines.md
index 9991e10c731d9130a7c1f52154e18ac19bce336d..569b16d587af811d32e425a534ab4dc0df6a4be6 100644
--- a/en/application-dev/dfx/hiappevent-guidelines.md
+++ b/en/application-dev/dfx/hiappevent-guidelines.md
@@ -45,7 +45,7 @@ The following table provides only a brief description of related APIs. For detai
The following example illustrates how to log and subscribe to button click events of users.
-1. Create an eTS application project. In the displayed **Project** window, choose **entry** > **src** > **main** > **ets** > **entryability** > **EntryAbility.ts**, and double-click **EntryAbility.ts**. Then, add an event watcher to subscribe to button click events. The complete sample code is as follows:
+1. Create an ArkTS application project. In the displayed **Project** window, choose **entry** > **src** > **main** > **ets** > **entryability** > **EntryAbility.ts**, and double-click **EntryAbility.ts**. Then, add an event watcher to subscribe to button click events. The complete sample code is as follows:
```js
import hilog from '@ohos.hilog';
@@ -146,9 +146,3 @@ The following example illustrates how to log and subscribe to button click event
HiAppEvent eventPkg.size=124
HiAppEvent eventPkg.info={"domain_":"button","name_":"click","type_":4,"time_":1670268234523,"tz_":"+0800","pid_":3295,"tid_":3309,"click_time":100}
```
-
-## Samples
-
-The following sample is provided to help you better understand how to develop the application event logging feature:
-
-- [`JsDotTest`: Event Logging (JS) (API8)](https://gitee.com/openharmony/applications_app_samples/tree/master/DFX/JsDotTest)
diff --git a/en/application-dev/faqs/Readme-EN.md b/en/application-dev/faqs/Readme-EN.md
index 7eb9cad6b546996a47e92cd01b03f783a1f4a6d2..63535a32ae16eca13b03d20b4bce93569e2fe1d0 100644
--- a/en/application-dev/faqs/Readme-EN.md
+++ b/en/application-dev/faqs/Readme-EN.md
@@ -18,5 +18,4 @@
- [Native API Usage](faqs-native.md)
- [Usage of Third- and Fourth-Party Libraries](faqs-third-party-library.md)
- [IDE Usage](faqs-ide.md)
-- [hdc_std Command Usage](faqs-hdc-std.md)
- [Development Board](faqs-development-board.md)
\ No newline at end of file
diff --git a/en/application-dev/faqs/faqs-bundle.md b/en/application-dev/faqs/faqs-bundle.md
index 61a5277c6d4a1493d0281fdd66b88a99a07141ae..fda41c42bccc357d6b8800ce3f5401e1e2abbceb 100644
--- a/en/application-dev/faqs/faqs-bundle.md
+++ b/en/application-dev/faqs/faqs-bundle.md
@@ -14,7 +14,7 @@ Applicable to: OpenHarmony SDK 3.2.3.5, stage model of API version 9
Obtain the bundle name through **context.abilityInfo.bundleName**.
-Reference: [AbilityContext](../reference/apis/js-apis-ability-context.md) and [AbilityInfo](../reference/apis/js-apis-bundle-AbilityInfo.md)
+Reference: [AbilityInfo](../reference/apis/js-apis-bundle-AbilityInfo.md)
## How do I obtain an application icon?
diff --git a/en/application-dev/faqs/faqs-device-management.md b/en/application-dev/faqs/faqs-device-management.md
index dd836eb11abfbee3979f5a604eb5aa734d0d9112..ea71edd6c9940437e197be35e60a6638c73ae88d 100644
--- a/en/application-dev/faqs/faqs-device-management.md
+++ b/en/application-dev/faqs/faqs-device-management.md
@@ -2,23 +2,21 @@
## How do I obtain the DPI of a device?
-Applicable to: OpenHarmony SDK 3.2.2.5, stage model of API version 9
-
-Import the **\@ohos.display** module and call the **getDefaultDisplay** API.
+Applicable to: OpenHarmony 3.2 Beta5, stage model of API version 9
-Example:
+Import the **@ohos.display** module and call the **getDefaultDisplaySync** API.
+**Example**
```
import display from '@ohos.display';
-display.getDefaultDisplay((err, data) => {
- if (err.code) {
- console.error('Test Failed to obtain the default display object. Code: ' + JSON.stringify(err));
- return;
- }
- console.info('Test Succeeded in obtaining the default display object. Data:' + JSON.stringify(data));
- console.info('Test densityDPI:' + JSON.stringify(data.densityDPI));
-});
+let displayClass = null;
+try {
+ displayClass = display.getDefaultDisplaySync();
+ console.info('Test densityDPI:' + JSON.stringify(data.densityDPI));
+} catch (exception) {
+ console.error('Failed to obtain the default display object. Code: ' + JSON.stringify(exception));
+}
```
## How do I obtain the type of the device where the application is running?
diff --git a/en/application-dev/faqs/faqs-hdc-std.md b/en/application-dev/faqs/faqs-hdc-std.md
deleted file mode 100644
index 60f93da61d7d78a4e148b65c0e30d379b1e1206d..0000000000000000000000000000000000000000
--- a/en/application-dev/faqs/faqs-hdc-std.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# hdc_std Command Usage
-
-## Common Log Commands
-
-Applicable to: OpenHarmony SDK 3.2.2.5
-
-Clearing logs: hdc_std shell hilog -r
-
-Increasing the buffer size to 20 MB: hdc_std shell hilog -G 20M
-
-Capturing logs: hdc_std shell hilog > log.txt
-
-## What should I do to avoid log flow control?
-
-Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9
-
-- Disabling log flow control: hdc_std shell hilog -Q pidoff
-
-- Disabling the privacy flag: hdc_std shell hilog -p off
-
-- Increasing the log buffer to 200 MB: hdc_std shell hilog -G 200M
-
-- Enabling the log function of the specific domain (that is, disabling the global log function): hdc_std shell hilog –b D –D 0xd0xxxxx
-
-After performing the preceding operations, restart the DevEco Studio.
-
-## What should I do if the HAP installed on the development board through the IDE cannot be opened?
-
-Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9
-
-Check whether the SDK version is consistent with the system version on the development board. You are advised to use the SDK version and system version that are released on the same day.
-
-## How do I upload files using the hdc command?
-
-Applicable to: OpenHarmony SDK 3.2.2.5
-
-Run the **hdc_std file send** command.
-
-## How do I prevent the screen of the RK3568 development board from turning off?
-
-Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9
-
-Run the **hdc_std shell "power-shell setmode 602"** command.
-
-## How do I start an ability using the hdc command?
-
-Applicable to: OpenHarmony SDK 3.2.5.3, stage model of API version 9
-
-Run the **hdc\_std shell aa start -a AbilityName -b bundleName -m moduleName** command.
-
-## How do I change the read and write permissions on a file directory on the development board?
-
-Applicable to: OpenHarmony SDK 3.2.5.6, stage model of API version 9
-
-Run the **hdc\_std shell mount -o remount,rw /** command.
-
-## What should I do if the error message "Unknown file option -r" is displayed when hdc_std file recv is run?
-
-Applicable to: OpenHarmony SDK 3.2.5.6, stage model of API version 9
-
-1. Use the the hdc tool in the device image or SDK of the same version.
-
-2. Remove any Chinese characters or spaces from the directory specified for the hdc tool.
-
-## How do I uninstall an application using the hdc command?
-
-Applicable to: OpenHarmony SDK 3.2.2.5
-
-Run the **hdc\_std uninstall [-k] [package_name]** command.
-
-## How do I check whether the system is 32-bit or 64-bit?
-
-Applicable to: OpenHarmony SDK 3.2.5.5
-
-Run the **hdc\_std shell getconf LONG_BIT** command.
-
-If **64** is returned, the system is a 64-bit one. Otherwise, the system is a 32-bit one.
-
-## How do I view the component tree structure?
-
-Applicable to: OpenHarmony SDK 3.2.5.5
-
-1. Run the **hdc\_std shell** command to launch the CLI.
-
-2. Run the **aa dump -a** command to find **abilityID**.
-
-3. Run the **aa dump -i [abilityID] -c -render** command to view the component tree.
diff --git a/en/application-dev/faqs/faqs-language.md b/en/application-dev/faqs/faqs-language.md
index 22a450b4c8e37dc85a28c2ea3b972b03d6ea16ae..6d3ded94a76155feae22d761bdb63422e07f0316 100644
--- a/en/application-dev/faqs/faqs-language.md
+++ b/en/application-dev/faqs/faqs-language.md
@@ -251,7 +251,6 @@ Applicable to: OpenHarmony SDK 3.2.5.5, stage model of API version 9
To listen for in-depth changes of **@State** decorated variables, you can use **@Observed** and **@ObjectLink** decorators.
-Reference: [@Observed and @ObjectLink](../quick-start/arkts-state-mgmt-page-level.md#observed-and-objectlink)
## How do I implement character string encoding and decoding?
diff --git a/en/application-dev/faqs/faqs-web-arkts.md b/en/application-dev/faqs/faqs-web-arkts.md
index be2d58f82d54c9b95596ad3e767954fb7acfceca..6fe2c75a4bf0bc9b1d2f73929a34dc618c503d5b 100644
--- a/en/application-dev/faqs/faqs-web-arkts.md
+++ b/en/application-dev/faqs/faqs-web-arkts.md
@@ -76,4 +76,4 @@ Applicable to: OpenHarmony SDK 3.2.7.5, stage model of API version 9
4. Use message port 0 on the application side to send messages to message port 1 on the HTML side.
-Reference: [Web](../reference/arkui-ts/ts-basic-components-web.md#postmessage9)
+Reference: [Web](../reference/arkui-ts/ts-basic-components-web.md)
diff --git a/en/application-dev/file-management/medialibrary-filepath-guidelines.md b/en/application-dev/file-management/medialibrary-filepath-guidelines.md
index 4c7e2ecd4db6723a66930e624bd4b36b556330d1..1e310ef9312499bb131affb620ac7758e5033778 100644
--- a/en/application-dev/file-management/medialibrary-filepath-guidelines.md
+++ b/en/application-dev/file-management/medialibrary-filepath-guidelines.md
@@ -136,7 +136,7 @@ async function copySandbox2Public() {
console.error('file asset get failed, message = ' + err);
}
let fdPub = await fileAsset.open('rw');
- let fdSand = await fs.open(sandboxDirPath + 'testFile.txt', OpenMode.READ_WRITE);
+ let fdSand = await fs.open(sandboxDirPath + 'testFile.txt', fs.OpenMode.READ_WRITE);
await fs.copyFile(fdSand.fd, fdPub);
await fileAsset.close(fdPub);
await fs.close(fdSand.fd);
@@ -174,7 +174,7 @@ async function example() {
const context = getContext(this);
let media = mediaLibrary.getMediaLibrary(context);
const path = await media.getPublicDirectory(DIR_DOCUMENTS);
- media.createAsset(mediaType, "testFile.text", path).then((asset) => {
+ media.createAsset(mediaType, "testFile.txt", path).then((asset) => {
console.info("createAsset successfully:" + JSON.stringify(asset));
}).catch((err) => {
console.error("createAsset failed with error: " + err);
diff --git a/en/application-dev/file-management/medialibrary-resource-guidelines.md b/en/application-dev/file-management/medialibrary-resource-guidelines.md
index 7d120ec9a4fa9fd38ba92be97ee7fdd5a6f33816..b5691ad2e7505f0b1df617dd94c933b09de350e7 100644
--- a/en/application-dev/file-management/medialibrary-resource-guidelines.md
+++ b/en/application-dev/file-management/medialibrary-resource-guidelines.md
@@ -42,14 +42,11 @@ async function example() {
const context = getContext(this);
let media = mediaLibrary.getMediaLibrary(context);
const fetchFileResult = await media.getFileAssets(option);
- fetchFileResult.getFirstObject().then((fileAsset) => {
+ fetchFileResult.getFirstObject().then(async (fileAsset) => {
console.log('getFirstObject.displayName : ' + fileAsset.displayName);
for (let i = 1; i < fetchFileResult.getCount(); i++) {
- fetchFileResult.getNextObject().then((fileAsset) => {
- console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
- }).catch((err) => {
- console.error('Failed to get next object: ' + err);
- });
+ let fileAsset = await fetchFileResult.getNextObject();
+ console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
}
}).catch((err) => {
console.error('Failed to get first object: ' + err);
@@ -75,14 +72,11 @@ async function example() {
const context = getContext(this);
let media = mediaLibrary.getMediaLibrary(context);
const fetchFileResult = await media.getFileAssets(option);
- fetchFileResult.getFirstObject().then((fileAsset) => {
+ fetchFileResult.getFirstObject().then(async (fileAsset) => {
console.info('getFirstObject.displayName : ' + fileAsset.displayName);
for (let i = 1; i < fetchFileResult.getCount(); i++) {
- fetchFileResult.getNextObject().then((fileAsset) => {
- console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
- }).catch((err) => {
- console.error('Failed to get next object: ' + err);
- });
+ let fileAsset = await fetchFileResult.getNextObject();
+ console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
}
}).catch((err) => {
console.error('Failed to get first object: ' + err);
@@ -108,14 +102,11 @@ async function example() {
const context = getContext(this);
let media = mediaLibrary.getMediaLibrary(context);
const fetchFileResult = await media.getFileAssets(option);
- fetchFileResult.getFirstObject().then((fileAsset) => {
+ fetchFileResult.getFirstObject().then(async (fileAsset) => {
console.info('getFirstObject.displayName : ' + fileAsset.displayName);
for (let i = 1; i < fetchFileResult.getCount(); i++) {
- fetchFileResult.getNextObject().then((fileAsset) => {
- console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
- }).catch((err) => {
- console.error('Failed to get next object: ' + err);
- });
+ let fileAsset = await fetchFileResult.getNextObject();
+ console.info('fileAsset.displayName ' + i + ': ' + fileAsset.displayName);
}
}).catch((err) => {
console.error('Failed to get first object: ' + err);
@@ -156,7 +147,7 @@ async function example() {
## Obtaining Images and Videos in an Album
You can obtain media assets in an album in either of the following ways:
-- Call [MediaLibrary.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-1) with an album specified, as described in [Querying Media Assets with the Specfied Album Name](#querying-media-assets-with-the-specified-album-name).
+- Call [MediaLibrary.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-1) with an album specified, as described in [Querying Media Assets with the Specified Album Name](#querying-media-assets-with-the-specified-album-name).
- Call [Album.getFileAssets](../reference/apis/js-apis-medialibrary.md#getfileassets7-3) to obtain an **Album** instance, so as to obtain the media assets in it.
**Prerequisites**
diff --git a/en/application-dev/internationalization/i18n-guidelines.md b/en/application-dev/internationalization/i18n-guidelines.md
index 8218f2561376c4119f66be0175c5c9ea16c7d024..e78bdb6437b26b8a30ee23f9fdec380087297b33 100644
--- a/en/application-dev/internationalization/i18n-guidelines.md
+++ b/en/application-dev/internationalization/i18n-guidelines.md
@@ -6,7 +6,7 @@ The [intl](intl-guidelines.md) module provides basic i18n capabilities through t
## Obtaining and Setting i18n Information
-The system provides APIs to configure information such as the system language, preferred language, country or region, 24-hour clock, and local digit switch.
+The following table lists the APIs used to configure information such as the system language, preferred language, country or region, 24-hour clock, and use of local digits.
### Available APIs
@@ -30,15 +30,15 @@ The system provides APIs to configure information such as the system language, p
| System | getPreferredLanguageList()9+ | Obtains the preferred language list. |
| System | getFirstPreferredLanguage()9+ | Obtains the first language in the preferred language list. |
| System | getAppPreferredLanguage()9+ | Obtains the preferred language of an application. |
-| System | setUsingLocalDigit(flag: boolean)9+ | Sets whether to enable the local digit switch. |
-| System | getUsingLocalDigit()9+ | Checks whether the local digit switch is turned on. |
+| System | setUsingLocalDigit(flag: boolean)9+ | Specifies whether to enable use of local digits. |
+| System | getUsingLocalDigit()9+ | Checks whether use of local digits is enabled. |
| | isRTL(locale:string):boolean9+ | Checks whether the locale uses a right-to-left (RTL) language.|
### How to Develop
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Obtain and set the system language.
@@ -51,7 +51,7 @@ The system provides APIs to configure information such as the system language, p
I18n.System.setSystemLanguage("en"); // Set the system language to en.
let language = I18n.System.getSystemLanguage(); // language = "en"
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -65,7 +65,7 @@ The system provides APIs to configure information such as the system language, p
I18n.System.setSystemRegion("CN"); // Set the system country to CN.
let region = I18n.System.getSystemRegion(); // region = "CN"
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -79,7 +79,7 @@ The system provides APIs to configure information such as the system language, p
I18n.System.setSystemLocale("zh-Hans-CN"); // Set the system locale to zh-Hans-CN.
let locale = I18n.System.getSystemLocale(); // locale = "zh-Hans-CN"
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -92,7 +92,7 @@ The system provides APIs to configure information such as the system language, p
let rtl = I18n.isRTL("zh-CN"); // rtl = false
rtl = I18n.isRTL("ar"); // rtl = true
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -106,7 +106,7 @@ The system provides APIs to configure information such as the system language, p
I18n.System.set24HourClock(true);
let hourClock = I18n.System.is24HourClock(); // hourClock = true
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -121,7 +121,7 @@ The system provides APIs to configure information such as the system language, p
let sentenceCase = false;
let localizedLanguage = I18n.System.getDisplayLanguage(language, locale, sentenceCase); // localizedLanguage = "English"
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -136,7 +136,7 @@ The system provides APIs to configure information such as the system language, p
let sentenceCase = false;
let localizedCountry = I18n.System.getDisplayCountry(country, locale, sentenceCase); // localizedCountry = "U.S."
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -150,7 +150,7 @@ The system provides APIs to configure information such as the system language, p
let languageList = I18n.System.getSystemLanguages(); // languageList = ["en-Latn-US", "zh-Hans"]
let countryList = I18n.System.getSystemCountries("zh"); // countryList = ["ZW", "YT", ..., "CN", "DE"], 240 countries and regions in total
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -162,7 +162,7 @@ The system provides APIs to configure information such as the system language, p
try {
let isSuggest = I18n.System.isSuggested("zh", "CN"); // isSuggest = true
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -182,7 +182,7 @@ The system provides APIs to configure information such as the system language, p
let firstPreferredLanguage = I18n.System.getFirstPreferredLanguage(); // firstPreferredLanguage = "en-GB"
let appPreferredLanguage = I18n.System.getAppPreferredLanguage(); // Set the preferred language of the application to en-GB if the application contains en-GB resources.
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -190,14 +190,14 @@ The system provides APIs to configure information such as the system language, p
Call **setUsingLocalDigit** to enable the local digit switch. (This is a system API and can be called only by system applications with the UPDATE_CONFIGURATION permission.)
Call **getUsingLocalDigit** to check whether the local digit switch is enabled.
- Currently, the local digit switch applies only to the following languages: "ar", "as", "bn", "fa", "mr", "my", "ne", and "ur".
+ Currently, use of local digits is supported only for the following languages: **ar**, **as**, **bn**, **fa**, **mr**, **my**, **ne**, **ur**.
```js
try {
I18n.System.setUsingLocalDigit(true); // Enable the local digit switch.
let status = I18n.System.getUsingLocalDigit(); // status = true
} catch(error) {
- console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`)
+ console.error(`call i18n.System interface failed, error code: ${error.code}, message: ${error.message}`);
}
```
@@ -220,14 +220,14 @@ try {
| Calendar | getMinimalDaysInFirstWeek():number8+ | Obtains the minimum number of days in the first week of a year. |
| Calendar | setMinimalDaysInFirstWeek(value:number): void8+ | Sets the minimum number of days in the first week of a year. |
| Calendar | getDisplayName(locale:string):string8+ | Obtains the localized display of the **Calendar** object. |
-| Calendar | isWeekend(date?:Date):boolean8+ | Checks whether the specified date in this **Calendar** object is a weekend. |
+| Calendar | isWeekend(date?:Date):boolean8+ | Checks whether a given date is a weekend in the calendar. |
### How to Develop
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Instantiate a **Calendar** object.
@@ -254,7 +254,7 @@ try {
Call **set** to set the year, month, day, hour, minute, and second for the **Calendar** object.
```js
- calendar.set(2021, 12, 21, 6, 0, 0)
+ calendar.set(2021, 12, 21, 6, 0, 0);
```
5. Set and obtain the time zone for the **Calendar** object.
@@ -317,7 +317,7 @@ try {
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Instantiate a **PhoneNumberFormat** object.
@@ -359,7 +359,7 @@ The **I18NUtil** class provides an API to implement measurement conversion.
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Convert a measurement unit.
@@ -393,7 +393,7 @@ The **I18NUtil** class provides an API to implement measurement conversion.
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Instantiates an **IndexUtil** object.
@@ -418,7 +418,7 @@ The **I18NUtil** class provides an API to implement measurement conversion.
Call **addLocale** to add the alphabet index of a new locale to the current index list.
```js
- indexUtil.addLocale("ar")
+ indexUtil.addLocale("ar");
```
5. Obtain the index of a string.
@@ -454,7 +454,7 @@ When a text is displayed in more than one line, use [BreakIterator8](../referenc
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Instantiate a **BreakIterator** object.
@@ -462,7 +462,7 @@ When a text is displayed in more than one line, use [BreakIterator8](../referenc
Call **getLineInstance** to instantiate a **BreakIterator** object.
```js
- let locale = "en-US"
+ let locale = "en-US";
let breakIterator = I18n.getLineInstance(locale);
```
@@ -531,7 +531,7 @@ When a text is displayed in more than one line, use [BreakIterator8](../referenc
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Instantiate the **TimeZone** object, and obtain the time zone information.
@@ -592,7 +592,7 @@ Call [Transliterator](../reference/apis/js-apis-i18n.md#transliterator9) APIs to
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Obtains the transliterator ID list.
@@ -637,7 +637,7 @@ Call [Transliterator](../reference/apis/js-apis-i18n.md#transliterator9) APIs to
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Check the input character has a certain attribute.
@@ -719,7 +719,7 @@ Call [Transliterator](../reference/apis/js-apis-i18n.md#transliterator9) APIs to
1. Import the **i18n** module.
```js
- import I18n from '@ohos.i18n'
+ import I18n from '@ohos.i18n';
```
2. Check the sequence of year, month, and day in a date.
diff --git a/en/application-dev/internationalization/intl-guidelines.md b/en/application-dev/internationalization/intl-guidelines.md
index 609af84500cecb0ce5bda8409216b6957182885f..fcac5325292b27f349f6c3dcadb627dca2dd0c03 100644
--- a/en/application-dev/internationalization/intl-guidelines.md
+++ b/en/application-dev/internationalization/intl-guidelines.md
@@ -2,7 +2,7 @@
The **intl** module provides basic i18n capabilities, such as time and date formatting, number formatting, and string sorting, through the standard i18n APIs defined in ECMA 402. For more details about APIs and their usage, see [intl](../reference/apis/js-apis-intl.md).
-The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities through supplementary interfaces that are not defined in ECMA 402. It works with the Intl module to provide a complete suite of I18N capabilities.
+The [i18n](../reference/apis/js-apis-i18n.md) module provides enhanced I18N capabilities through supplementary interfaces that are not defined in ECMA 402. It works with the Intl module to provide a complete suite of I18N capabilities.
## Setting Locale Information
@@ -25,7 +25,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiates a **Locale** object.
@@ -100,7 +100,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiate a **DateTimeFormat** object.
@@ -111,7 +111,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
let dateTimeFormat = new Intl.DateTimeFormat();
```
- Alternatively, use your own locale and formatting parameters to create a **DateTimeFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [DateTimeOptions](../reference/apis/js-apis-intl.md#datetimeoptions).
+ Alternatively, use your own locale and formatting parameters to create a **DateTimeFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [DateTimeOptions](../reference/apis/js-apis-intl.md#datetimeoptions9).
```js
let options = {dateStyle: "full", timeStyle: "full"};
@@ -150,7 +150,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
let resolvedOptions = dateTimeFormat.resolvedOptions(); // resolvedOptions = {"locale": "zh-CN", "calendar": "gregorian", "dateStyle":"full", "timeStyle":"full", "timeZone": "CST"}
```
-## Number Formatting
+## Formatting Numbers
[NumberFormat](../reference/apis/js-apis-intl.md#numberformat) provides APIs to implement the number formatting specific to a locale.
@@ -170,7 +170,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiate a **NumberFormat** object.
@@ -181,7 +181,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
let numberFormat = new Intl.NumberFormat();
```
- Alternatively, use your own locale and formatting parameters to create a **NumberFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [NumberOptions](../reference/apis/js-apis-intl.md#numberoptions).
+ Alternatively, use your own locale and formatting parameters to create a **NumberFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [NumberOptions](../reference/apis/js-apis-intl.md#numberoptions9).
```js
let options = {compactDisplay: "short", notation: "compact"};
@@ -195,7 +195,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
```js
let options = {compactDisplay: "short", notation: "compact"};
let numberFormat = new Intl.NumberFormat("zh-CN", options);
- let number = 1234.5678
+ let number = 1234.5678;
let formatResult = numberFormat.format(number); // formatResult = "1235"
```
@@ -209,7 +209,7 @@ The [I18N](i18n-guidelines.md) module provides enhanced I18N capabilities throug
let resolvedOptions = numberFormat.resolvedOptions(); // resolvedOptions = {"locale": "zh-CN", "compactDisplay": "short", "notation": "compact", "numberingSystem": "Latn"}
```
-## String Sorting
+## Sorting Strings
Users in different regions have different requirements for string sorting. [Collator](../reference/apis/js-apis-intl.md#collator8) provides APIs to sort character strings specific to a locale.
@@ -229,7 +229,7 @@ Users in different regions have different requirements for string sorting. [Coll
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiate a **Collator** object.
@@ -240,7 +240,7 @@ Users in different regions have different requirements for string sorting. [Coll
let collator = new Intl.Collator();
```
- Alternatively, use your own locale and formatting parameters to create a **Collator** object. For a full list of parameters, see [CollatorOptions](../reference/apis/js-apis-intl.md#collatoroptions8).
+ Alternatively, use your own locale and formatting parameters to create a **Collator** object. For a full list of parameters, see [CollatorOptions](../reference/apis/js-apis-intl.md#collatoroptions9).
The **sensitivity** parameter is used to specify the levels of differences that will be used for string comparison. The value **base** indicates that only characters are compared, but not the accent and capitalization. For example, 'a' != 'b', 'a' == '', 'a'=='A'. The value **accent** indicates that the accent is considered, but not the capitalization. For example, 'a' != 'b', 'a' == '', 'a'=='A'. The value **case** indicates that the capitalization is considered, but the accent. For example, 'a' != 'b', 'a' == '', 'a'=='A'. The value **variant** indicates that the accent and capitalization are considered. For example, 'a' != 'b', 'a' == '', 'a'=='A'.
```js
@@ -290,7 +290,7 @@ According to grammars in certain languages, the singular or plural form of a nou
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiate a **PluralRules** object.
@@ -301,7 +301,7 @@ According to grammars in certain languages, the singular or plural form of a nou
let pluralRules = new Intl.PluralRules();
```
- Alternatively, use your own locale and formatting parameters to create a **PluralRules** object. For a full list of parameters, see [PluralRulesOptions](../reference/apis/js-apis-intl.md#pluralrulesoptions8).
+ Alternatively, use your own locale and formatting parameters to create a **PluralRules** object. For a full list of parameters, see [PluralRulesOptions](../reference/apis/js-apis-intl.md#pluralrulesoptions9).
```js
let pluralRules = new Intl.PluralRules("zh-CN", {localeMatcher: "best fit", type: "cardinal"});
@@ -313,11 +313,11 @@ According to grammars in certain languages, the singular or plural form of a nou
```js
let pluralRules = new Intl.PluralRules("zh-CN", {localeMatcher: "best fit", type: "cardinal"});
- let number = 1234.5678
+ let number = 1234.5678;
let categoryResult = pluralRules.select(number); // categoryResult = "other"
```
-## Formatting Relative Time
+## Formatting the Relative Time
[RelativeTimeFormat](../reference/apis/js-apis-intl.md#relativetimeformat8) provides APIs to format the relative time for a specific locale.
@@ -338,7 +338,7 @@ According to grammars in certain languages, the singular or plural form of a nou
Importing an incorrect bundle can lead to unexpected API behavior.
```js
- import Intl from '@ohos.intl'
+ import Intl from '@ohos.intl';
```
2. Instantiate a **RelativeTimeFormat** object.
@@ -349,7 +349,7 @@ According to grammars in certain languages, the singular or plural form of a nou
let relativeTimeFormat = new Intl.RelativeTimeFormat();
```
- Alternatively, use your own locale and formatting parameters to create a **RelativeTimeFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [RelativeTimeFormatInputOptions](../reference/apis/js-apis-intl.md#relativetimeformatinputoptions8).
+ Alternatively, use your own locale and formatting parameters to create a **RelativeTimeFormat** object. Formatting parameters are optional. For a full list of formatting parameters, see [RelativeTimeFormatInputOptions](../reference/apis/js-apis-intl.md#relativetimeformatinputoptions9).
```js
let relativeTimeFormat = new Intl.RelativeTimeFormat("zh-CN", {numeric: "always", style: "long"});
@@ -362,7 +362,7 @@ According to grammars in certain languages, the singular or plural form of a nou
```js
let relativeTimeFormat = new Intl.RelativeTimeFormat("zh-CN", {numeric: "always", style: "long"});
let number = 2;
- let unit = "year"
+ let unit = "year";
let formatResult = relativeTimeFormat.format(number, unit); // 2 years later
```
@@ -373,7 +373,7 @@ According to grammars in certain languages, the singular or plural form of a nou
```js
let relativeTimeFormat = new Intl.RelativeTimeFormat("zh-CN", {numeric: "always", style: "long"});
let number = 2;
- let unit = "year"
+ let unit = "year";
let formatPartsResult = relativeTimeFormat.formatToParts(number, unit); // formatPartsResult = [{"type": "integer", "value": "2", "unit": "year"}, {"type":"literal", "value": "years later"}]
```
@@ -385,11 +385,3 @@ According to grammars in certain languages, the singular or plural form of a nou
let relativeTimeFormat = new Intl.RelativeTimeFormat("zh-CN", {numeric: "always", style: "long"});
let options = relativeTimeFormat.resolvedOptions(); // options = {"locale": "zh-CN", "style": "long", "numeric": "always", "numberingSystem": "latn"}
```
-
-## Samples
-
-The following sample is provided to help you better understand how to develop internationalization capabilities:
-
--[`International`: Internationalization (JS) (API8)](https://gitee.com/openharmony/applications_app_samples/tree/master/UI/International)
-
--[`International`: Internationalization (ArkTS) (API8) (Full SDK)](https://gitee.com/openharmony/applications_app_samples/tree/master/common/International)
diff --git a/en/application-dev/media/Readme-EN.md b/en/application-dev/media/Readme-EN.md
index 926a2718a48dcefd217e503932f9d9f997d1275e..f3a233ca129527db112459ab5110df49b8e1052d 100755
--- a/en/application-dev/media/Readme-EN.md
+++ b/en/application-dev/media/Readme-EN.md
@@ -1,29 +1,60 @@
# Media
+- [Media Application Overview](media-application-overview.md)
- Audio and Video
- - [Audio Overview](audio-overview.md)
- - [Audio Rendering Development](audio-renderer.md)
- - [Audio Stream Management Development](audio-stream-manager.md)
- - [Audio Capture Development](audio-capturer.md)
- - [OpenSL ES Audio Playback Development](opensles-playback.md)
- - [OpenSL ES Audio Recording Development](opensles-capture.md)
- - [Audio Interruption Mode Development](audio-interruptmode.md)
- - [Volume Management Development](audio-volume-manager.md)
- - [Audio Routing and Device Management Development](audio-routing-manager.md)
- - [AVPlayer Development (Recommended)](avplayer-playback.md)
- - [AVRecorder Development (Recommended)](avrecorder.md)
- - [Audio Playback Development](audio-playback.md)
- - [Audio Recording Development](audio-recorder.md)
- - [Video Playback Development](video-playback.md)
- - [Video Recording Development](video-recorder.md)
-
-- AVSession
+ - [Audio and Video Overview](av-overview.md)
+ - [AVPlayer and AVRecorder](avplayer-avrecorder-overview.md)
+ - Audio Playback
+ - [Audio Playback Overview](audio-playback-overview.md)
+ - [Using AVPlayer for Audio Playback](using-avplayer-for-playback.md)
+ - [Using AudioRenderer for Audio Playback](using-audiorenderer-for-playback.md)
+ - [Using OpenSL ES for Audio Playback](using-opensl-es-for-playback.md)
+ - [Using TonePlayer for Audio Playback (for System Applications Only)](using-toneplayer-for-playback.md)
+ - [Audio Playback Concurrency Policy](audio-playback-concurrency.md)
+ - [Volume Management](volume-management.md)
+ - [Audio Playback Stream Management](audio-playback-stream-management.md)
+ - [Audio Output Device Management](audio-output-device-management.md)
+ - [Distributed Audio Playback (for System Applications Only)](distributed-audio-playback.md)
+ - Audio Recording
+ - [Audio Recording Overview](audio-recording-overview.md)
+ - [Using AVRecorder for Audio Recording](using-avrecorder-for-recording.md)
+ - [Using AudioCapturer for Audio Recording](using-audiocapturer-for-recording.md)
+ - [Using OpenSL ES for Audio Recording](using-opensl-es-for-recording.md)
+ - [Microphone Management](mic-management.md)
+ - [Audio Recording Stream Management](audio-recording-stream-management.md)
+ - [Audio Input Device Management](audio-input-device-management.md)
+ - Audio Call
+ - [Audio Call Overview](audio-call-overview.md)
+ - [Developing Audio Call](audio-call-development.md)
+ - [Video Playback](video-playback.md)
+ - [Video Recording](video-recording.md)
+- AVSession (for System Applications Only)
- [AVSession Overview](avsession-overview.md)
- - [AVSession Development](avsession-guidelines.md)
-
+ - Local AVSession
+ - [Local AVSession Overview](local-avsession-overview.md)
+ - [AVSession Provider](using-avsession-developer.md)
+ - [AVSession Controller](using-avsession-controller.md)
+ - Distributed AVSession
+ - [Distributed AVSession Overview](distributed-avsession-overview.md)
+ - [Using Distributed AVSession](using-distributed-avsession.md)
+- Camera (for System Applications Only)
+ - [Camera Overview](camera-overview.md)
+ - Camera Development
+ - [Camera Development Preparations](camera-preparation.md)
+ - [Device Input Management](camera-device-input.md)
+ - [Session Management](camera-session-management.md)
+ - [Camera Preview](camera-preview.md)
+ - [Camera Photographing](camera-shooting.md)
+ - [Video Recording](camera-recording.md)
+ - [Camera Metadata](camera-metadata.md)
+ - Best Practices
+ - [Camera Photographing Sample](camera-shooting-case.md)
+ - [Video Recording Sample](camera-recording-case.md)
- Image
- - [Image Development](image.md)
-
-- Camera
- - [Camera Development](camera.md)
- - [Distributed Camera Development](remote-camera.md)
+ - [Image Overview](image-overview.md)
+ - [Image Decoding](image-decoding.md)
+ - Image Processing
+ - [Image Transformation](image-transformation.md)
+ - [Pixel Map Operation](image-pixelmap-operation.md)
+ - [Image Encoding](image-encoding.md)
+ - [Image Tool](image-tool.md)
diff --git a/en/application-dev/media/audio-call-development.md b/en/application-dev/media/audio-call-development.md
new file mode 100644
index 0000000000000000000000000000000000000000..8234c837c2ce985c2a1a7dc91c7e0002fb3d4a69
--- /dev/null
+++ b/en/application-dev/media/audio-call-development.md
@@ -0,0 +1,259 @@
+# Developing Audio Call
+
+During an audio call, audio output (playing the peer voice) and audio input (recording the local voice) are carried out simultaneously. You can use the AudioRenderer to implement audio output and the AudioCapturer to implement audio input.
+
+Before starting or stopping using the audio call service, the application needs to check the [audio scene](audio-call-overview.md#audio-scene) and [ringer mode](audio-call-overview.md#ringer-mode) to adopt proper audio management and prompt policies.
+
+The sample code below demonstrates the basic process of using the AudioRenderer and AudioCapturer to implement the audio call service, without the process of call data transmission. In actual development, the peer call data transmitted over the network needs to be decoded and played, and the sample code uses the process of reading an audio file instead; the local call data needs to be encoded and packed and then sent to the peer over the network, and the sample code uses the process of writing an audio file instead.
+
+## Using AudioRenderer to Play the Peer Voice
+
+This process is similar to the process of [using AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md). The key differences lie in the **audioRendererInfo** parameter and audio data source. In the **audioRendererInfo** parameter used for audio calling, **content** must be set to **CONTENT_TYPE_SPEECH**, and **usage** must be set to **STREAM_USAGE_VOICE_COMMUNICATION**.
+
+```ts
+import audio from '@ohos.multimedia.audio';
+import fs from '@ohos.file.fs';
+const TAG = 'VoiceCallDemoForAudioRenderer';
+// The process is similar to the process of using AudioRenderer to develop audio playback. The key differences lie in the audioRendererInfo parameter and audio data source.
+export default class VoiceCallDemoForAudioRenderer {
+ private renderModel = undefined;
+ private audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // Sampling rate.
+ channels: audio.AudioChannel.CHANNEL_2, // Channel.
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format.
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format.
+ }
+ private audioRendererInfo = {
+ // Parameters corresponding to the call scenario need to be used.
+ content: audio.ContentType.CONTENT_TYPE_SPEECH, // Audio content type: speech.
+ usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // Audio stream usage type: voice communication.
+ rendererFlags: 0 // AudioRenderer flag. The default value is 0.
+ }
+ private audioRendererOptions = {
+ streamInfo: this.audioStreamInfo,
+ rendererInfo: this.audioRendererInfo
+ }
+ // Create an AudioRenderer instance, and set the events to listen for.
+ init() {
+ audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // Create an AudioRenderer instance.
+ if (!err) {
+ console.info(`${TAG}: creating AudioRenderer success`);
+ this.renderModel = renderer;
+ this.renderModel.on('stateChange', (state) => { // Set the events to listen for. A callback is invoked when the AudioRenderer is switched to the specified state.
+ if (state == 1) {
+ console.info('audio renderer state is: STATE_PREPARED');
+ }
+ if (state == 2) {
+ console.info('audio renderer state is: STATE_RUNNING');
+ }
+ });
+ this.renderModel.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of rendered frames reaches 1000.
+ if (position == 1000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+ } else {
+ console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
+ }
+ });
+ }
+ // Start audio rendering.
+ async start() {
+ let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
+ if (stateGroup.indexOf(this.renderModel.state) === -1) { // Rendering can be started only when the AudioRenderer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state.
+ console.error(TAG + 'start failed');
+ return;
+ }
+ await this.renderModel.start(); // Start rendering.
+ const bufferSize = await this.renderModel.getBufferSize();
+ // The process of reading audio file data is used as an example. In actual audio call development, audio data transmitted from the peer needs to be read.
+ let context = getContext(this);
+ let path = context.filesDir;
+
+ const filePath = path + '/voice_call_data.wav'; // Sandbox path. The actual path is /data/storage/el2/base/haps/entry/files/voice_call_data.wav.
+ let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
+ let stat = await fs.stat(filePath);
+ let buf = new ArrayBuffer(bufferSize);
+ let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
+ for (let i = 0; i < len; i++) {
+ let options = {
+ offset: i * bufferSize,
+ length: bufferSize
+ };
+ let readsize = await fs.read(file.fd, buf, options);
+ // buf indicates the audio data to be written to the buffer. Before calling AudioRenderer.write(), you can preprocess the audio data for personalized playback. The AudioRenderer reads the audio data written to the buffer for rendering.
+ let writeSize = await new Promise((resolve, reject) => {
+ this.renderModel.write(buf, (err, writeSize) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(writeSize);
+ }
+ });
+ });
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // The rendering stops if the AudioRenderer is in the STATE_RELEASED state.
+ fs.close(file);
+ await this.renderModel.stop();
+ }
+ if (this.renderModel.state === audio.AudioState.STATE_RUNNING) {
+ if (i === len - 1) { // The rendering stops if the file finishes reading.
+ fs.close(file);
+ await this.renderModel.stop();
+ }
+ }
+ }
+ }
+ // Pause the rendering.
+ async pause() {
+ // Rendering can be paused only when the AudioRenderer is in the STATE_RUNNING state.
+ if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) {
+ console.info('Renderer is not running');
+ return;
+ }
+ await this.renderModel.pause(); // Pause rendering.
+ if (this.renderModel.state === audio.AudioState.STATE_PAUSED) {
+ console.info('Renderer is paused.');
+ } else {
+ console.error('Pausing renderer failed.');
+ }
+ }
+ // Stop rendering.
+ async stop() {
+ // Rendering can be stopped only when the AudioRenderer is in the STATE_RUNNING or STATE_PAUSED state.
+ if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) {
+ console.info('Renderer is not running or paused.');
+ return;
+ }
+ await this.renderModel.stop(); // Stop rendering.
+ if (this.renderModel.state === audio.AudioState.STATE_STOPPED) {
+ console.info('Renderer stopped.');
+ } else {
+ console.error('Stopping renderer failed.');
+ }
+ }
+ // Release the instance.
+ async release() {
+ // The AudioRenderer can be released only when it is not in the STATE_RELEASED state.
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
+ console.info('Renderer already released');
+ return;
+ }
+ await this.renderModel.release(); // Release the instance.
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
+ console.info('Renderer released');
+ } else {
+ console.error('Renderer release failed.');
+ }
+ }
+}
+```
+
+## Using AudioCapturer to Record the Local Voice
+
+This process is similar to the process of [using AudioCapturer to develop audio recording](using-audiocapturer-for-recording.md). The key differences lie in the **audioCapturerInfo** parameter and audio data stream direction. In the **audioCapturerInfo** parameter used for audio calling, **source** must be set to **SOURCE_TYPE_VOICE_COMMUNICATION**.
+
+```ts
+import audio from '@ohos.multimedia.audio';
+import fs from '@ohos.file.fs';
+const TAG = 'VoiceCallDemoForAudioCapturer';
+// The process is similar to the process of using AudioCapturer to develop audio recording. The key differences lie in the audioCapturerInfo parameter and audio data stream direction.
+export default class VoiceCallDemoForAudioCapturer {
+ private audioCapturer = undefined;
+ private audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // Sampling rate.
+ channels: audio.AudioChannel.CHANNEL_1, // Channel.
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format.
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format.
+ }
+ private audioCapturerInfo = {
+ // Parameters corresponding to the call scenario need to be used.
+ source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // Audio source type: voice communication.
+ capturerFlags: 0 // AudioCapturer flag. The default value is 0.
+ }
+ private audioCapturerOptions = {
+ streamInfo: this.audioStreamInfo,
+ capturerInfo: this.audioCapturerInfo
+ }
+ // Create an AudioCapturer instance, and set the events to listen for.
+ init() {
+ audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // Create an AudioCapturer instance.
+ if (err) {
+ console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
+ return;
+ }
+ console.info(`${TAG}: create AudioCapturer success`);
+ this.audioCapturer = capturer;
+ this.audioCapturer.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of captured frames reaches 1000.
+ if (position === 1000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+ this.audioCapturer.on('periodReach', 2000, (position) => { // Subscribe to the periodReach event. A callback is triggered when the number of captured frames reaches 2000.
+ if (position === 2000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+ });
+ }
+ // Start audio recording.
+ async start() {
+ let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
+ if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // Recording can be started only when the AudioRenderer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state.
+ console.error(`${TAG}: start failed`);
+ return;
+ }
+ await this.audioCapturer.start(); // Start recording.
+ // The following describes how to write audio data to a file. In actual audio call development, the local audio data needs to be encoded and packed, and then sent to the peer through the network.
+ let context = getContext(this);
+ const path = context.filesDir + '/voice_call_data.wav'; // Path for storing the recorded audio file.
+ let file = fs.openSync(path, 0o2 | 0o100); // Create the file if it does not exist.
+ let fd = file.fd;
+ let numBuffersToCapture = 150; // Write data for 150 times.
+ let count = 0;
+ while (numBuffersToCapture) {
+ let bufferSize = await this.audioCapturer.getBufferSize();
+ let buffer = await this.audioCapturer.read(bufferSize, true);
+ let options = {
+ offset: count * bufferSize,
+ length: bufferSize
+ };
+ if (buffer === undefined) {
+ console.error(`${TAG}: read buffer failed`);
+ } else {
+ let number = fs.writeSync(fd, buffer, options);
+ console.info(`${TAG}: write date: ${number}`);
+ }
+ numBuffersToCapture--;
+ count++;
+ }
+ }
+ // Stop recording.
+ async stop() {
+ // The AudioCapturer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state.
+ if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) {
+ console.info('Capturer is not running or paused');
+ return;
+ }
+ await this.audioCapturer.stop(); // Stop recording.
+ if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) {
+ console.info('Capturer stopped');
+ } else {
+ console.error('Capturer stop failed');
+ }
+ }
+ // Release the instance.
+ async release() {
+ // The AudioCapturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state.
+ if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) {
+ console.info('Capturer already released');
+ return;
+ }
+ await this.audioCapturer.release(); // Release the instance.
+ if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) {
+ console.info('Capturer released');
+ } else {
+ console.error('Capturer release failed');
+ }
+ }
+}
+```
diff --git a/en/application-dev/media/audio-call-overview.md b/en/application-dev/media/audio-call-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..1462198c201203da3eecc902de556c005ad3aae9
--- /dev/null
+++ b/en/application-dev/media/audio-call-overview.md
@@ -0,0 +1,49 @@
+# Audio Call Development
+
+Typically, audio calls are classified into VoIP calls and cellular calls.
+
+- Voice over Internet Protocol (VoIP) is a technology that enables you to make voice calls using a broadband Internet connection. During a VoIP call, call information is packed into data packets and transmitted over the network. Therefore, the VoIP call has high requirements on the network quality, and the call quality is closely related to the network connection speed.
+
+- Cellular call refers to the traditional telephony service provided by carriers. Currently, APIs for developing cellular calling are available only for system applications.
+
+When developing the audio call service, you must use a proper audio processing policy based on the [audio scene](#audio-scene) and [ringer mode](#ringer-mode).
+
+## Audio Scene
+
+When an application uses the audio call service, the system switches to the call-related audio scene (specified by [AudioScene](../reference/apis/js-apis-audio.md#audioscene8)). The system has preset multiple audio scenes, including ringing, cellular call, and voice chat, and uses a scene-specific policy to process audio.
+
+For example, in the cellular call audio scene, the system prioritizes voice clarity. To deliver a crystal clear voice during calls, the system uses the 3A algorithm to preprocess audio data, suppress echoes, eliminates background noise, and adjusts the volume range. The 3A algorithm refers to three audio processing algorithms: Acoustic Echo Cancellation (AEC), Active Noise Control (ANC), and Automatic Gain Control (AGC).
+
+Currently, the following audio scenes are preset:
+
+- **AUDIO_SCENE_DEFAULT**: default audio scene, which can be used in all scenarios except audio calls.
+
+- **AUDIO_SCENE_RINGING**: ringing audio scene, which is used when a call is coming and is open only to system applications.
+
+- **AUDIO_SCENE_PHONE_CALL**: cellular call audio scene, which is used for cellular calls and is open only to system applications.
+
+- **AUDIO_SCENE_VOICE_CHAT**: voice chat scene, which is used for VoIP calls.
+
+The application can call **getAudioScene** in the [AudioManager](../reference/apis/js-apis-audio.md#audiomanager) class to obtain the audio scene in use. Before starting or stopping using the audio call service, the application can call this API to check whether the system has switched to the suitable audio scene.
+
+## Ringer Mode
+
+When an audio call is coming, the application notifies the user by playing a ringtone or vibrating, depending on the setting of [AudioRingMode](../reference/apis/js-apis-audio.md#audioringmode).
+
+The system has preset the following ringer modes:
+
+- **RINGER_MODE_SILENT**: silent mode, in which no sound is played when a call is coming in.
+
+- **RINGER_MODE_VIBRATE**: vibration mode, in which no sound is played but the device vibrates when a call is coming in.
+
+- **RINGER_MODE_NORMAL**: normal mode, in which a ringtone is played when a call is coming in.
+
+The application can call **getRingerMode** in the [AudioVolumeGroupManager](../reference/apis/js-apis-audio.md#audiovolumegroupmanager9) class to obtain the ringer mode in use so as to use a proper policy to notify the user.
+
+If the application wants to obtain the ringer mode changes in time, it can call **on('ringerModeChange')** in the **AudioVolumeGroupManager** class to listen for the changes. When the ringer mode changes, it will receive a notification and can make adjustment accordingly.
+
+## Audio Device Switching During a Call
+
+When a call is coming, the system selects an appropriate audio device based on the default priority. The application can switch the call to another audio device as required.
+
+The audio devices that can be used for the audio call are specified by [CommunicationDeviceType](../reference/apis/js-apis-audio.md#communicationdevicetype9). The application can call **isCommunicationDeviceActive** in the [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9) class to check whether a communication device is active. It can also call **setCommunicationDevice** in the **AudioRoutingManager** class to set a communication device to the active state so that the device can be used for the call.
diff --git a/en/application-dev/media/audio-capturer.md b/en/application-dev/media/audio-capturer.md
deleted file mode 100644
index 8371b6248d71f48e9088da849dc36c3edb2be3cf..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-capturer.md
+++ /dev/null
@@ -1,254 +0,0 @@
-# Audio Capture Development
-
-## Introduction
-
-You can use the APIs provided by **AudioCapturer** to record raw audio files, thereby implementing audio data collection.
-
-**Status check**: During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the **AudioCapturer** instance. This is because some operations can be performed only when the audio capturer is in a given state. If the application performs an operation when the audio capturer is not in the given state, the system may throw an exception or generate other undefined behavior.
-
-## Working Principles
-
-This following figure shows the audio capturer state transitions.
-
-**Figure 1** Audio capturer state transitions
-
-
-
-- **PREPARED**: The audio capturer enters this state by calling **create()**.
-- **RUNNING**: The audio capturer enters this state by calling **start()** when it is in the **PREPARED** state or by calling **start()** when it is in the **STOPPED** state.
-- **STOPPED**: The audio capturer in the **RUNNING** state can call **stop()** to stop playing audio data.
-- **RELEASED**: The audio capturer in the **PREPARED** or **STOPPED** state can use **release()** to release all occupied hardware and software resources. It will not transit to any other state after it enters the **RELEASED** state.
-
-## Constraints
-
-Before developing the audio data collection feature, configure the **ohos.permission.MICROPHONE** permission for your application. For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-## How to Develop
-
-For details about the APIs, see [AudioCapturer in Audio Management](../reference/apis/js-apis-audio.md#audiocapturer8).
-
-1. Use **createAudioCapturer()** to create an **AudioCapturer** instance.
-
- Set parameters of the **AudioCapturer** instance in **audioCapturerOptions**. This instance is used to capture audio, control and obtain the recording state, and register a callback for notification.
-
- ```js
- import audio from '@ohos.multimedia.audio';
-
- let audioStreamInfo = {
- samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
- channels: audio.AudioChannel.CHANNEL_1,
- sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
- encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
- }
-
- let audioCapturerInfo = {
- source: audio.SourceType.SOURCE_TYPE_MIC,
- capturerFlags: 0 // 0 is the extended flag bit of the audio capturer. The default value is 0.
- }
-
- let audioCapturerOptions = {
- streamInfo: audioStreamInfo,
- capturerInfo: audioCapturerInfo
- }
-
- let audioCapturer = await audio.createAudioCapturer(audioCapturerOptions);
- console.log('AudioRecLog: Create audio capturer success.');
- ```
-
-2. Use **start()** to start audio recording.
-
- The capturer state will be **STATE_RUNNING** once the audio capturer is started. The application can then begin reading buffers.
-
- ```js
- import audio from '@ohos.multimedia.audio';
-
- async function startCapturer() {
- let state = audioCapturer.state;
- // The audio capturer should be in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state after being started.
- if (state != audio.AudioState.STATE_PREPARED || state != audio.AudioState.STATE_PAUSED ||
- state != audio.AudioState.STATE_STOPPED) {
- console.info('Capturer is not in a correct state to start');
- return;
- }
- await audioCapturer.start();
-
- state = audioCapturer.state;
- if (state == audio.AudioState.STATE_RUNNING) {
- console.info('AudioRecLog: Capturer started');
- } else {
- console.error('AudioRecLog: Capturer start failed');
- }
- }
- ```
-
-3. Read the captured audio data and convert it to a byte stream. Call **read()** repeatedly to read the data until the application stops the recording.
-
- The following example shows how to write recorded data into a file.
-
- ```js
- import fs from '@ohos.file.fs';
-
- let state = audioCapturer.state;
- // The read operation can be performed only when the state is STATE_RUNNING.
- if (state != audio.AudioState.STATE_RUNNING) {
- console.info('Capturer is not in a correct state to read');
- return;
- }
-
- const path = '/data/data/.pulse_dir/capture_js.wav'; // Path for storing the collected audio file.
- let file = fs.openSync(filePath, 0o2);
- let fd = file.fd;
- if (file !== null) {
- console.info('AudioRecLog: file created');
- } else {
- console.info('AudioRecLog: file create : FAILED');
- return;
- }
-
- if (fd !== null) {
- console.info('AudioRecLog: file fd opened in append mode');
- }
-
- let numBuffersToCapture = 150; // Write data for 150 times.
- let count = 0;
- while (numBuffersToCapture) {
- let bufferSize = await audioCapturer.getBufferSize();
- let buffer = await audioCapturer.read(bufferSize, true);
- let options = {
- offset: count * this.bufferSize,
- length: this.bufferSize
- }
- if (typeof(buffer) == undefined) {
- console.info('AudioRecLog: read buffer failed');
- } else {
- let number = fs.writeSync(fd, buffer, options);
- console.info(`AudioRecLog: data written: ${number}`);
- }
- numBuffersToCapture--;
- count++;
- }
- ```
-
-4. Once the recording is complete, call **stop()** to stop the recording.
-
- ```js
- async function StopCapturer() {
- let state = audioCapturer.state;
- // The audio capturer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state.
- if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
- console.info('AudioRecLog: Capturer is not running or paused');
- return;
- }
-
- await audioCapturer.stop();
-
- state = audioCapturer.state;
- if (state == audio.AudioState.STATE_STOPPED) {
- console.info('AudioRecLog: Capturer stopped');
- } else {
- console.error('AudioRecLog: Capturer stop failed');
- }
- }
- ```
-
-5. After the task is complete, call **release()** to release related resources.
-
- ```js
- async function releaseCapturer() {
- let state = audioCapturer.state;
- // The audio capturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state.
- if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
- console.info('AudioRecLog: Capturer already released');
- return;
- }
-
- await audioCapturer.release();
-
- state = audioCapturer.state;
- if (state == audio.AudioState.STATE_RELEASED) {
- console.info('AudioRecLog: Capturer released');
- } else {
- console.info('AudioRecLog: Capturer release failed');
- }
- }
- ```
-
-6. (Optional) Obtain the audio capturer information.
-
- You can use the following code to obtain the audio capturer information:
-
- ```js
- // Obtain the audio capturer state.
- let state = audioCapturer.state;
-
- // Obtain the audio capturer information.
- let audioCapturerInfo : audio.AuduioCapturerInfo = await audioCapturer.getCapturerInfo();
-
- // Obtain the audio stream information.
- let audioStreamInfo : audio.AudioStreamInfo = await audioCapturer.getStreamInfo();
-
- // Obtain the audio stream ID.
- let audioStreamId : number = await audioCapturer.getAudioStreamId();
-
- // Obtain the Unix timestamp, in nanoseconds.
- let audioTime : number = await audioCapturer.getAudioTime();
-
- // Obtain a proper minimum buffer size.
- let bufferSize : number = await audioCapturer.getBufferSize();
- ```
-
-7. (Optional) Use **on('markReach')** to subscribe to the mark reached event, and use **off('markReach')** to unsubscribe from the event.
-
- After the mark reached event is subscribed to, when the number of frames collected by the audio capturer reaches the specified value, a callback is triggered and the specified value is returned.
-
- ```js
- audioCapturer.on('markReach', (reachNumber) => {
- console.info('Mark reach event Received');
- console.info(`The Capturer reached frame: ${reachNumber}`);
- });
-
- audioCapturer.off('markReach'); // Unsubscribe from the mark reached event. This event will no longer be listened for.
- ```
-
-8. (Optional) Use **on('periodReach')** to subscribe to the period reached event, and use **off('periodReach')** to unsubscribe from the event.
-
- After the period reached event is subscribed to, each time the number of frames collected by the audio capturer reaches the specified value, a callback is triggered and the specified value is returned.
-
- ```js
- audioCapturer.on('periodReach', (reachNumber) => {
- console.info('Period reach event Received');
- console.info(`In this period, the Capturer reached frame: ${reachNumber}`);
- });
-
- audioCapturer.off('periodReach'); // Unsubscribe from the period reached event. This event will no longer be listened for.
- ```
-
-9. If your application needs to perform some operations when the audio capturer state is updated, it can subscribe to the state change event. When the audio capturer state is updated, the application receives a callback containing the event type.
-
- ```js
- audioCapturer.on('stateChange', (state) => {
- console.info(`AudioCapturerLog: Changed State to : ${state}`)
- switch (state) {
- case audio.AudioState.STATE_PREPARED:
- console.info('--------CHANGE IN AUDIO STATE----------PREPARED--------------');
- console.info('Audio State is : Prepared');
- break;
- case audio.AudioState.STATE_RUNNING:
- console.info('--------CHANGE IN AUDIO STATE----------RUNNING--------------');
- console.info('Audio State is : Running');
- break;
- case audio.AudioState.STATE_STOPPED:
- console.info('--------CHANGE IN AUDIO STATE----------STOPPED--------------');
- console.info('Audio State is : stopped');
- break;
- case audio.AudioState.STATE_RELEASED:
- console.info('--------CHANGE IN AUDIO STATE----------RELEASED--------------');
- console.info('Audio State is : released');
- break;
- default:
- console.info('--------CHANGE IN AUDIO STATE----------INVALID--------------');
- console.info('Audio State is : invalid');
- break;
- }
- });
- ```
diff --git a/en/application-dev/media/audio-input-device-management.md b/en/application-dev/media/audio-input-device-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..ebdadfaad7a9316cf055d3216ac3a94a1b052a33
--- /dev/null
+++ b/en/application-dev/media/audio-input-device-management.md
@@ -0,0 +1,88 @@
+# Audio Input Device Management
+
+If a device is connected to multiple audio input devices, you can use **AudioRoutingManager** to specify an audio input device to record audio. For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9).
+
+## Creating an AudioRoutingManager Instance
+
+Before using **AudioRoutingManager** to manage audio devices, import the audio module and create an **AudioManager** instance.
+
+```ts
+import audio from '@ohos.multimedia.audio'; // Import the audio module.
+
+let audioManager = audio.getAudioManager(); // Create an AudioManager instance.
+
+let audioRoutingManager = audioManager.getRoutingManager(); // Call an API of AudioManager to create an AudioRoutingManager instance.
+```
+
+## Supported Audio Input Device Types
+
+The table below lists the supported audio input devices.
+
+| Name| Value| Description|
+| -------- | -------- | -------- |
+| WIRED_HEADSET | 3 | Wired headset with a microphone.|
+| BLUETOOTH_SCO | 7 | Bluetooth device using Synchronous Connection Oriented (SCO) links.|
+| MIC | 15 | Microphone.|
+| USB_HEADSET | 22 | USB Type-C headset.|
+
+## Obtaining Input Device Information
+
+Use **getDevices()** to obtain information about all the input devices.
+
+```ts
+audioRoutingManager.getDevices(audio.DeviceFlag.INPUT_DEVICES_FLAG).then((data) => {
+ console.info('Promise returned to indicate that the device list is obtained.');
+});
+```
+
+## Listening for Device Connection State Changes
+
+Set a listener to listen for changes of the device connection state. When a device is connected or disconnected, a callback is triggered.
+
+```ts
+// Listen for connection state changes of audio devices.
+audioRoutingManager.on('deviceChange', audio.DeviceFlag.INPUT_DEVICES_FLAG, (deviceChanged) => {
+ console.info('device change type: ' + deviceChanged.type); // Device connection state change. The value 0 means that the device is connected and 1 means that the device is disconnected.
+ console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length);
+ console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceRole); // Device role.
+ console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceType); // Device type.
+});
+
+// Cancel the listener for the connection state changes of audio devices.
+audioRoutingManager.off('deviceChange', (deviceChanged) => {
+ console.info('Should be no callback.');
+});
+```
+
+## Selecting an Audio Input Device (for System Applications only)
+
+Currently, only one input device can be selected, and the device ID is used as the unique identifier. For details about audio device descriptors, see [AudioDeviceDescriptors](../reference/apis/js-apis-audio.md#audiodevicedescriptors).
+
+> **NOTE**
+>
+> The user can connect to a group of audio devices (for example, a pair of Bluetooth headsets), but the system treats them as one device (a group of devices that share the same device ID).
+
+```ts
+let inputAudioDeviceDescriptor = [{
+ deviceRole : audio.DeviceRole.INPUT_DEVICE,
+ deviceType : audio.DeviceType.EARPIECE,
+ id : 1,
+ name : "",
+ address : "",
+ sampleRates : [44100],
+ channelCounts : [2],
+ channelMasks : [0],
+ networkId : audio.LOCAL_NETWORK_ID,
+ interruptGroupId : 1,
+ volumeGroupId : 1,
+}];
+
+async function getRoutingManager(){
+ audioRoutingManager.selectInputDevice(inputAudioDeviceDescriptor).then(() => {
+ console.info('Invoke selectInputDevice succeeded.');
+ }).catch((err) => {
+ console.error(`Invoke selectInputDevice failed, code is ${err.code}, message is ${err.message}`);
+ });
+}
+
+```
diff --git a/en/application-dev/media/audio-interruptmode.md b/en/application-dev/media/audio-interruptmode.md
deleted file mode 100644
index 48a53bf5d5990ac88aae1271466a6aa36d52ac98..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-interruptmode.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Audio Interruption Mode Development
-
-## Introduction
-The audio interruption mode is used to control the playback of multiple audio streams.
-
-Audio applications can set the audio interruption mode to independent or shared under **AudioRenderer**.
-
-In shared mode, multiple audio streams share one session ID. In independent mode, each audio stream has an independent session ID.
-
-**Asynchronous operation**: To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions.
-
-## How to Develop
-
-For details about the APIs, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8).
-
-1. Use **createAudioRenderer()** to create an **AudioRenderer** instance.
-
- Set parameters of the **AudioRenderer** instance in **audioRendererOptions**.
-
- This instance is used to render audio, control and obtain the rendering status, and register a callback for notification.
-
-```js
- import audio from '@ohos.multimedia.audio';
-
- var audioStreamInfo = {
- samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
- channels: audio.AudioChannel.CHANNEL_1,
- sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
- encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
- }
-
- var audioRendererInfo = {
- content: audio.ContentType.CONTENT_TYPE_SPEECH,
- usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
- rendererFlags: 1
- }
-
- var audioRendererOptions = {
- streamInfo: audioStreamInfo,
- rendererInfo: audioRendererInfo
- }
-
-let audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
- ```
-
-2. Set the audio interruption mode.
-
- After the **AudioRenderer** instance is initialized, you can set the audio interruption mode.
-
- ```js
- var mode_ = audio.InterruptMode.SHARE_MODE;
- await this.audioRenderer.setInterruptMode(mode_).then(() => {
- console.log('[JSAR] [SetInterruptMode] Setting: '+ (mode_ == 0? " share mode":"independent mode") + "success");
- });
- ```
diff --git a/en/application-dev/media/audio-output-device-management.md b/en/application-dev/media/audio-output-device-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad20276c60ce7e535f99778e18d04e4e50e29dc6
--- /dev/null
+++ b/en/application-dev/media/audio-output-device-management.md
@@ -0,0 +1,90 @@
+# Audio Output Device Management
+
+If a device is connected to multiple audio output devices, you can use **AudioRoutingManager** to specify an audio output device to play audio. For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9).
+
+## Creating an AudioRoutingManager Instance
+
+Before using **AudioRoutingManager** to manage audio devices, import the audio module and create an **AudioManager** instance.
+
+```ts
+import audio from '@ohos.multimedia.audio'; // Import the audio module.
+
+let audioManager = audio.getAudioManager(); // Create an AudioManager instance.
+
+let audioRoutingManager = audioManager.getRoutingManager(); // Call an API of AudioManager to create an AudioRoutingManager instance.
+```
+
+## Supported Audio Output Device Types
+
+The table below lists the supported audio output devices.
+
+| Name| Value| Description|
+| -------- | -------- | -------- |
+| EARPIECE | 1 | Earpiece.|
+| SPEAKER | 2 | Speaker.|
+| WIRED_HEADSET | 3 | Wired headset with a microphone.|
+| WIRED_HEADPHONES | 4 | Wired headset without microphone.|
+| BLUETOOTH_SCO | 7 | Bluetooth device using Synchronous Connection Oriented (SCO) links.|
+| BLUETOOTH_A2DP | 8 | Bluetooth device using Advanced Audio Distribution Profile (A2DP) links.|
+| USB_HEADSET | 22 | USB Type-C headset.|
+
+## Obtaining Output Device Information
+
+Use **getDevices()** to obtain information about all the output devices.
+
+```ts
+audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => {
+ console.info('Promise returned to indicate that the device list is obtained.');
+});
+```
+
+## Listening for Device Connection State Changes
+
+Set a listener to listen for changes of the device connection state. When a device is connected or disconnected, a callback is triggered.
+
+```ts
+// Listen for connection state changes of audio devices.
+audioRoutingManager.on('deviceChange', audio.DeviceFlag.OUTPUT_DEVICES_FLAG, (deviceChanged) => {
+ console.info('device change type: ' + deviceChanged.type); // Device connection state change. The value 0 means that the device is connected and 1 means that the device is disconnected.
+ console.info('device descriptor size : ' + deviceChanged.deviceDescriptors.length);
+ console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceRole); // Device role.
+ console.info('device change descriptor: ' + deviceChanged.deviceDescriptors[0].deviceType); // Device type.
+});
+
+// Cancel the listener for the connection state changes of audio devices.
+audioRoutingManager.off('deviceChange', (deviceChanged) => {
+ console.info('Should be no callback.');
+});
+```
+
+## Selecting an Audio Output Device (for System Applications only)
+
+Currently, only one output device can be selected, and the device ID is used as the unique identifier. For details about audio device descriptors, see [AudioDeviceDescriptors](../reference/apis/js-apis-audio.md#audiodevicedescriptors).
+
+> **NOTE**
+>
+> The user can connect to a group of audio devices (for example, a pair of Bluetooth headsets), but the system treats them as one device (a group of devices that share the same device ID).
+
+```ts
+let outputAudioDeviceDescriptor = [{
+ deviceRole : audio.DeviceRole.OUTPUT_DEVICE,
+ deviceType : audio.DeviceType.SPEAKER,
+ id : 1,
+ name : "",
+ address : "",
+ sampleRates : [44100],
+ channelCounts : [2],
+ channelMasks : [0],
+ networkId : audio.LOCAL_NETWORK_ID,
+ interruptGroupId : 1,
+ volumeGroupId : 1,
+}];
+
+async function selectOutputDevice(){
+ audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor).then(() => {
+ console.info('Invoke selectOutputDevice succeeded.');
+ }).catch((err) => {
+ console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`);
+ });
+}
+```
diff --git a/en/application-dev/media/audio-overview.md b/en/application-dev/media/audio-overview.md
deleted file mode 100755
index e1fd93eab8238b8ae55c9ce3dff2e807a1585a00..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-overview.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Audio Overview
-
-You can use APIs provided by the audio module to implement audio-related features, including audio playback and volume management.
-
-## Basic Concepts
-
-- **Sampling**
- Sampling is a process to obtain discrete-time signals by extracting samples from analog signals in a continuous time domain at a specific interval.
-
-- **Sampling rate**
- Sampling rate is the number of samples extracted from a continuous signal per second to form a discrete signal. It is measured in Hz. Generally, human hearing range is from 20 Hz to 20 kHz. Common audio sampling rates include 8 kHz, 11.025 kHz, 22.05 kHz, 16 kHz, 37.8 kHz, 44.1 kHz, 48 kHz, 96 kHz, and 192 kHz.
-
-- **Channel**
- Channels refer to different spatial positions where independent audio signals are recorded or played. The number of channels is the number of audio sources used during audio recording, or the number of speakers used for audio playback.
-
-- **Audio frame**
- Audio data is in stream form. For the convenience of audio algorithm processing and transmission, it is generally agreed that a data amount in a unit of 2.5 to 60 milliseconds is one audio frame. This unit is called sampling time, and its length is specific to codecs and the application requirements.
-
-- **PCM**
- Pulse code modulation (PCM) is a method used to digitally represent sampled analog signals. It converts continuous-time analog signals into discrete-time digital signal samples.
diff --git a/en/application-dev/media/audio-playback-concurrency.md b/en/application-dev/media/audio-playback-concurrency.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b36594f6bef62c7ba7588bc8977af67609a6c9d
--- /dev/null
+++ b/en/application-dev/media/audio-playback-concurrency.md
@@ -0,0 +1,119 @@
+# Audio Playback Concurrency Policy
+
+## Audio Interruption Policy
+
+If multiple audio streams are played at the same time, the user may feel uncomfortable or even painful. To address this issue, OpenHarmony presets the audio interruption policy so that only the audio stream holding audio focus can be played.
+
+When an application attempts to play an audio, the system requests audio focus for the audio stream. The audio stream that gains the focus can be played. If the request is rejected, the audio stream cannot be played. If the audio stream is interrupted by another, it loses the focus and therefore the playback is paused. All these actions are automatically performed by the system and do not require additional operations on the application. However, to maintain state consistency between the application and the system and ensure good user experience, it is recommended that the application [listen for the audio interruption event](#listening-for-the-audio-interruption-event) and perform the corresponding processing when receiving such an event (specified by [InterruptEvent](../reference/apis/js-apis-audio.md#interruptevent9)).
+
+OpenHarmony presets two [audio interruption modes](#audio-interruption-mode) to specify whether audio concurrency is controlled by the application or system. You can choose a mode for each of the audio streams created by the same application.
+
+The audio interruption policy determines the operations (for example, pause, resume, duck, or unduck) to be performed on the audio stream. These operations can be performed by the system or application. To distinguish the body that executes the operations, the [audio interruption type](#audio-interruption-type) is introduced, and two audio interruption types are preset.
+
+### Audio Interruption Mode
+
+Two audio interruption modes, specified by [InterruptMode](../reference/apis/js-apis-audio.md#interruptmode9), are preset in the audio interruption policy:
+
+- **SHARED_MODE**: Multiple audio streams created by an application share one audio focus. The concurrency rules between these audio streams are determined by the application, without the use of the audio interruption policy. However, if another application needs to play audio while one of these audio streams is being played, the audio interruption policy is triggered.
+
+- **INDEPENDENT_MODE**: Each audio stream created by an application has an independent audio focus. When multiple audio streams are played concurrently, the audio interruption policy is triggered.
+
+The application can select an audio interruption mode as required. By default, the **SHARED_MODE** is used.
+
+You can set the audio interruption mode in either of the following ways:
+
+- If you [use the AVPlayer to develop audio playback](using-avplayer-for-playback.md), set the [audioInterruptMode](../reference/apis/js-apis-media.md#avplayer9) attribute of the AVPlayer to set the audio interruption mode.
+
+- If you [use the AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md), call [setInterruptMode](../reference/apis/js-apis-audio.md#setinterruptmode9) of the AudioRenderer to set the audio interruption mode.
+
+
+### Audio Interruption Type
+
+The audio interruption policy (containing two audio interruption modes) determines the operation to be performed on each audio stream. These operations can be carried out by the system or application. To distinguish the executors, the audio interruption type, specified by [InterruptForceType](../reference/apis/js-apis-audio.md#interruptforcetype9), is introduced.
+
+- **INTERRUPT_FORCE**: The operation is performed by the system. The system forcibly interrupts audio playback.
+
+- **INTERRUPT_SHARE**: The operation is performed by the application. The application can take action or ignore as required.
+
+For the pause operation, the **INTERRUPT_FORCE** type is always used and cannot be changed by the application. However, the application can choose to use **INTERRUPT_SHARE** for other operations, such as the resume operation. The application can obtain the audio interruption type based on the value of the member variable **forceType** in the audio interruption event.
+
+During audio playback, the system automatically requests, holds, and releases the focus for the audio stream. When audio interruption occurs, the system forcibly pauses or stops playing or ducks the volume down for the audio stream, and sends an audio interruption event callback to the application. To maintain state consistency between the application and the system and ensure good user experience, it is recommended that the application [listen for the audio interruption event](#listening-for-the-audio-interruption-event) and perform processing when receiving such an event.
+
+For operations that cannot be forcibly performed by the system (for example, resume), the system sends the audio interruption event containing **INTERRUPT_SHARE**, and the application can choose to take action or ignore.
+
+## Listening for the Audio Interruption Event
+
+Your application are advised to listen for the audio interruption event when playing audio. When audio interruption occurs, the system performs processing on the audio stream according to the preset policy, and sends the audio interruption event to the application.
+
+Upon the receipt of the event, the application carries out processing based on the event content to ensure that the application state is consistent with the expected effect.
+
+You can use either of the following methods to listen for the audio interruption event:
+
+- If you [use the AVPlayer to develop audio playback](using-avplayer-for-playback.md), call [on('audioInterrupt')](../reference/apis/js-apis-media.md#onaudiointerrupt9) of the AVPlayer to listen for the event.
+
+- If you [use the AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md), call [on('audioInterrupt')](../reference/apis/js-apis-audio.md#onaudiointerrupt9) of the AudioRenderer to listen for the event.
+
+ To deliver an optimal user experience, the application needs to perform processing based on the event content. The following uses the AudioRenderer as an example to describe the recommended application processing. (The recommended processing is similar if the AVPlayer is used to develop audio playback.) You can customize the code to implement your own audio playback functionality or application processing based on service requirements.
+
+```ts
+let isPlay; // An identifier specifying whether the audio stream is being played. In actual development, this parameter corresponds to the module related to the audio playback state.
+let isDucked; // An identifier specifying whether to duck the volume down. In actual development, this parameter corresponds to the module related to the audio volume.
+let started; // An identifier specifying whether the start operation is successful.
+
+async function onAudioInterrupt(){
+ // The AudioRenderer is used as an example to describe how to develop audio playback. The audioRenderer variable is the AudioRenderer instance created for playback.
+ audioRenderer.on('audioInterrupt', async(interruptEvent) => {
+ // When an audio interruption event occurs, the audioRenderer receives the interruptEvent callback and performs processing based on the content in the callback.
+ // The audioRenderer reads the value of interruptEvent.forceType to see whether the system has forcibly performed the operation.
+ // The audioRenderer then reads the value of interruptEvent.hintType and performs corresponding processing.
+ if (interruptEvent.forceType === audio.InterruptForceType.INTERRUPT_FORCE) {
+ // If the value of interruptEvent.forceType is INTERRUPT_FORCE, the system has performed audio-related processing, and the application needs to update its state and make adjustments accordingly.
+ switch (interruptEvent.hintType) {
+ case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
+ // The system has paused the audio stream (the focus is temporarily lost). To ensure state consistency, the application needs to switch to the audio paused state.
+ // Temporarily losing the focus: After the other audio stream releases the focus, the current audio stream will receive the audio interruption event corresponding to resume and automatically resume the playback.
+ isPlay = false; // A simplified processing indicating several operations for switching the application to the audio paused state.
+ break;
+ case audio.InterruptHint.INTERRUPT_HINT_STOP:
+ // The system has stopped the audio stream (the focus is permanently lost). To ensure state consistency, the application needs to switch to the audio paused state.
+ // Permanently losing the focus: No audio interruption event will be received. The user must manually trigger the operation to resume playback.
+ isPlay = false; // A simplified processing indicating several operations for switching the application to the audio paused state.
+ break;
+ case audio.InterruptHint.INTERRUPT_HINT_DUCK:
+ // The system has ducked the volume down (20% of the normal volume by default). To ensure state consistency, the application needs to switch to the volume decreased state.
+ // If the application does not want to play at a lower volume, it can select another processing mode, for example, proactively pausing the playback.
+ isDucked = true; // A simplified processing indicating several operations for switching the application to the volume decreased state.
+ break;
+ case audio.InterruptHint.INTERRUPT_HINT_UNDUCK:
+ // The system has restored the audio volume to normal. To ensure state consistency, the application needs to switch to the normal volume state.
+ isDucked = false; // A simplified processing indicating several operations for switching the application to the normal volume state.
+ break;
+ default:
+ break;
+ }
+ } else if (interruptEvent.forceType === audio.InterruptForceType.INTERRUPT_SHARE) {
+ // If the value of interruptEvent.forceType is INTERRUPT_SHARE, the application can take action or ignore as required.
+ switch (interruptEvent.hintType) {
+ case audio.InterruptHint.INTERRUPT_HINT_RESUME:
+ // The paused audio stream can be played. It is recommended that the application continue to play the audio stream and switch to the audio playing state.
+ // If the application does not want to continue the playback, it can ignore the event.
+ // To continue the playback, the application needs to call start(), and use the identifier variable started to record the execution result of start().
+ await audioRenderer.start().then(async function () {
+ started = true; // Calling start() is successful.
+ }).catch((err) => {
+ started = false; // Calling start() fails.
+ });
+ // If calling start() is successful, the application needs to switch to the audio playing state.
+ if (started) {
+ isPlay = true; // A simplified processing indicating several operations for switching the application to the audio playing state.
+ } else {
+ // Resuming the audio playback fails.
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ });
+}
+```
diff --git a/en/application-dev/media/audio-playback-overview.md b/en/application-dev/media/audio-playback-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..d17970d6de9b8b238db74d971ad5f58c605462eb
--- /dev/null
+++ b/en/application-dev/media/audio-playback-overview.md
@@ -0,0 +1,25 @@
+# Audio Playback Development
+
+## Selecting an Audio Playback Development Mode
+
+OpenHarmony provides multiple classes for you to develop audio playback applications. You can select them based on the audio data formats, audio sources, audio usage scenarios, and even the programming language you use. Selecting a suitable class helps you reduce development workload and your application deliver a better effect.
+
+- [AVPlayer](using-avplayer-for-playback.md): provides ArkTS and JS APIs to implement audio and video playback. It also supports parsing streaming media and local assets, decapsulating media assets, decoding audio, and outputting audio. It can play audio files in MP3 and M4A formats, but not in PCM format.
+
+- [AudioRenderer](using-audiorenderer-for-playback.md): provides ArkTS and JS API to implement audio output. It supports only the PCM format and requires applications to continuously write audio data. The applications can perform data preprocessing, for example, setting the sampling rate and bit width of audio files, before audio input. This class can be used to develop more professional and diverse playback applications. To use this class, you must have basic audio processing knowledge.
+
+- [OpenSLES](using-opensl-es-for-playback.md): provides a set of standard, cross-platform, yet unique native audio APIs. It supports audio output in PCM format and is applicable to playback applications that are ported from other embedded platforms or that implements audio output at the native layer.
+
+- [TonePlayer](using-toneplayer-for-playback.md): provides ArkTS and JS API to implement the playback of dialing tones and ringback tones. It can be used to play the content selected from a fixed type range, without requiring the input of media assets or audio data. This class is application to specific scenarios where dialing tones and ringback tones are played. is available only to system applications.
+
+- Applications often need to use short sound effects, such as camera shutter sound effect, key press sound effect, and game shooting sound effect. Currently, only the **AVPlayer** class can implement audio file playback. More APIs will be provided to support this scenario in later versions.
+
+## Precautions for Developing Audio Playback Applications
+
+To enable your application to play a video in the background or when the screen is off, the application must meet the following conditions:
+
+1. The application is registered with the system for unified management through the **AVSession** APIs. Otherwise, the playback will be forcibly stopped when the application switches to the background. For details, see [AVSession Development](avsession-overview.md).
+
+2. The application must request a continuous task to prevent from being suspended. For details, see [Continuous Task Development](../task-management/continuous-task-dev-guide.md).
+
+If the playback is interrupted when the application switches to the background, you can view the log to see whether the application has requested a continuous task. If the application has requested a continuous task, there is no log recording **pause id**; otherwise, there is a log recording **pause id**.
diff --git a/en/application-dev/media/audio-playback-stream-management.md b/en/application-dev/media/audio-playback-stream-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..c6cf398b8403b3f799a1db20716021c91ca6e078
--- /dev/null
+++ b/en/application-dev/media/audio-playback-stream-management.md
@@ -0,0 +1,120 @@
+# Audio Playback Stream Management
+
+An audio playback application must notice audio stream state changes and perform corresponding operations. For example, when detecting that an audio stream is being played or paused, the application must change the UI display of the **Play** button.
+
+## Reading or Listening for Audio Stream State Changes in the Application
+
+Create an AudioRenderer by referring to [Using AudioRenderer for Audio Playback](using-audiorenderer-for-playback.md) or [audio.createAudioRenderer](../reference/apis/js-apis-audio.md#audiocreateaudiorenderer8). Then obtain the audio stream state changes in either of the following ways:
+
+- Check the [state](../reference/apis/js-apis-audio.md#attributes) of the AudioRenderer.
+
+ ```ts
+ let audioRendererState = audioRenderer.state;
+ console.info(`Current state is: ${audioRendererState }`)
+ ```
+
+- Register **stateChange** to listen for state changes of the AudioRenderer.
+
+ ```ts
+ audioRenderer.on('stateChange', (rendererState) => {
+ console.info(`State change to: ${rendererState}`)
+ });
+ ```
+
+The application then performs an operation, for example, changing the display of the **Play** button, by comparing the obtained state with [AudioState](../reference/apis/js-apis-audio.md#audiostate8).
+
+## Reading or Listening for Changes in All Audio Streams
+
+If an application needs to obtain the change information about all audio streams, it can use **AudioStreamManager** to read or listen for the changes of all audio streams.
+
+> **NOTE**
+>
+> The audio stream change information marked as the system API can be viewed only by system applications.
+
+The figure below shows the call relationship of audio stream management.
+
+
+
+During application development, first use **getStreamManager()** to create an **AudioStreamManager** instance. Then call **on('audioRendererChange')** to listen for audio stream changes and obtain a notification when the audio stream state or device changes. To cancel the listening for these changes, call **off('audioRendererChange')**. You can also call **getCurrentAudioRendererInfoArray()** to obtain information such as the unique ID of the playback stream, UID of the playback stream client, and stream status.
+
+For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9).
+
+## How to Develop
+
+1. Create an **AudioStreamManager** instance.
+
+ Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance.
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+ let audioManager = audio.getAudioManager();
+ let audioStreamManager = audioManager.getStreamManager();
+ ```
+
+2. Use **on('audioRendererChange')** to listen for audio playback stream changes. If the application needs to receive a notification when the audio playback stream state or device changes, it can subscribe to this event.
+
+ ```ts
+ audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray) => {
+ for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
+ let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i];
+ console.info(`## RendererChange on is called for ${i} ##`);
+ console.info(`StreamId for ${i} is: ${AudioRendererChangeInfo.streamId}`);
+ console.info(`Content ${i} is: ${AudioRendererChangeInfo.rendererInfo.content}`);
+ console.info(`Stream ${i} is: ${AudioRendererChangeInfo.rendererInfo.usage}`);
+ console.info(`Flag ${i} is: ${AudioRendererChangeInfo.rendererInfo.rendererFlags}`);
+ for (let j = 0;j < AudioRendererChangeInfo.deviceDescriptors.length; j++) {
+ console.info(`Id: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].id}`);
+ console.info(`Type: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceType}`);
+ console.info(`Role: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceRole}`);
+ console.info(`Name: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].name}`);
+ console.info(`Address: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].address}`);
+ console.info(`SampleRates: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]}`);
+ console.info(`ChannelCount ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]}`);
+ console.info(`ChannelMask: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelMasks}`);
+ }
+ }
+ });
+ ```
+
+3. (Optional) Use **off('audioRendererChange')** to cancel listening for audio playback stream changes.
+
+ ```ts
+ audioStreamManager.off('audioRendererChange');
+ console.info('RendererChange Off is called ');
+ ```
+
+4. (Optional) Call **getCurrentAudioRendererInfoArray()** to obtain the information about all audio playback streams.
+
+ This API can be used to obtain the unique ID of the audio playback stream, UID of the audio playback client, audio status, and other information about the audio player.
+ > **NOTE**
+ >
+ > Before listening for state changes of all audio streams, the application must request the **ohos.permission.USE_BLUETOOTH** [permission](../security/accesstoken-guidelines.md), for the device name and device address (Bluetooth related attributes) to be displayed correctly.
+
+ ```ts
+ async function getCurrentAudioRendererInfoArray(){
+ await audioStreamManager.getCurrentAudioRendererInfoArray().then( function (AudioRendererChangeInfoArray) {
+ console.info(`getCurrentAudioRendererInfoArray Get Promise is called `);
+ if (AudioRendererChangeInfoArray != null) {
+ for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
+ let AudioRendererChangeInfo = AudioRendererChangeInfoArray[i];
+ console.info(`StreamId for ${i} is: ${AudioRendererChangeInfo.streamId}`);
+ console.info(`Content ${i} is: ${AudioRendererChangeInfo.rendererInfo.content}`);
+ console.info(`Stream ${i} is: ${AudioRendererChangeInfo.rendererInfo.usage}`);
+ console.info(`Flag ${i} is: ${AudioRendererChangeInfo.rendererInfo.rendererFlags}`);
+ for (let j = 0;j < AudioRendererChangeInfo.deviceDescriptors.length; j++) {
+ console.info(`Id: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].id}`);
+ console.info(`Type: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceType}`);
+ console.info(`Role: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].deviceRole}`);
+ console.info(`Name: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].name}`);
+ console.info(`Address: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].address}`);
+ console.info(`SampleRates: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]}`);
+ console.info(`ChannelCount ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]}`);
+ console.info(`ChannelMask: ${i} : ${AudioRendererChangeInfo.deviceDescriptors[j].channelMasks}`);
+ }
+ }
+ }
+ }).catch((err) => {
+ console.error(`Invoke getCurrentAudioRendererInfoArray failed, code is ${err.code}, message is ${err.message}`);
+ });
+ }
+ ```
diff --git a/en/application-dev/media/audio-playback.md b/en/application-dev/media/audio-playback.md
deleted file mode 100644
index 1c7953d32b8ecee4c0ff34e82ab8d13947ac9271..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-playback.md
+++ /dev/null
@@ -1,243 +0,0 @@
-# Audio Playback Development
-
-## Introduction
-
-You can use audio playback APIs to convert audio data into audible analog signals and play the signals using output devices. You can also manage playback tasks. For example, you can control the playback and volume, obtain track information, and release resources.
-
-## Working Principles
-
-The following figures show the audio playback state transition and the interaction with external modules for audio playback.
-
-**Figure 1** Audio playback state transition
-
-
-
-**NOTE**: If the status is **Idle**, setting the **src** attribute does not change the status. In addition, after the **src** attribute is set successfully, you must call **reset()** before setting it to another value.
-
-
-
-**Figure 2** Interaction with external modules for audio playback
-
-
-
-**NOTE**: When a third-party application calls the JS interface provided by the JS interface layer to implement a feature, the framework layer invokes the audio component through the media service of the native framework and outputs the audio data decoded by the software to the audio HDI of the hardware interface layer to implement audio playback.
-
-## How to Develop
-
-For details about the APIs, see [AudioPlayer in the Media API](../reference/apis/js-apis-media.md#audioplayer).
-
-> **NOTE**
->
-> The method for obtaining the path in the FA model is different from that in the stage model. For details about how to obtain the path, see [Application Sandbox Path Guidelines](../reference/apis/js-apis-fileio.md#guidelines).
-
-### Full-Process Scenario
-
-The full audio playback process includes creating an instance, setting the URI, playing audio, seeking to the playback position, setting the volume, pausing playback, obtaining track information, stopping playback, resetting the player, and releasing resources.
-
-For details about the **src** types supported by **AudioPlayer**, see the [src attribute](../reference/apis/js-apis-media.md#audioplayer_attributes).
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-// Print the stream track information.
-function printfDescription(obj) {
- for (let item in obj) {
- let property = obj[item];
- console.info('audio key is ' + item);
- console.info('audio value is ' + property);
- }
-}
-
-// Set the player callbacks.
-function setCallBack(audioPlayer) {
- audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully.
- console.info('audio set source success');
- audioPlayer.play(); // The play() API can be invoked only after the 'dataLoad' event callback is complete. The 'play' event callback is then triggered.
- });
- audioPlayer.on('play', () => { // Set the 'play' event callback.
- console.info('audio play success');
- audioPlayer.pause(); // Trigger the 'pause' event callback and pause the playback.
- });
- audioPlayer.on('pause', () => { // Set the 'pause' event callback.
- console.info('audio pause success');
- audioPlayer.seek(5000); // Trigger the 'timeUpdate' event callback, and seek to 5000 ms for playback.
- });
- audioPlayer.on('stop', () => { // Set the 'stop' event callback.
- console.info('audio stop success');
- audioPlayer.reset(); // Trigger the 'reset' event callback, and reconfigure the src attribute to switch to the next song.
- });
- audioPlayer.on('reset', () => { // Set the 'reset' event callback.
- console.info('audio reset success');
- audioPlayer.release(); // Release the AudioPlayer instance.
- audioPlayer = undefined;
- });
- audioPlayer.on('timeUpdate', (seekDoneTime) => { // Set the 'timeUpdate' event callback.
- if (typeof(seekDoneTime) == 'undefined') {
- console.info('audio seek fail');
- return;
- }
- console.info('audio seek success, and seek time is ' + seekDoneTime);
- audioPlayer.setVolume(0.5); // Trigger the 'volumeChange' event callback.
- });
- audioPlayer.on('volumeChange', () => { // Set the 'volumeChange' event callback.
- console.info('audio volumeChange success');
- audioPlayer.getTrackDescription((error, arrlist) => { // Obtain the audio track information in callback mode.
- if (typeof (arrlist) != 'undefined') {
- for (let i = 0; i < arrlist.length; i++) {
- printfDescription(arrlist[i]);
- }
- } else {
- console.log(`audio getTrackDescription fail, error:${error.message}`);
- }
- audioPlayer.stop(); // Trigger the 'stop' event callback to stop the playback.
- });
- });
- audioPlayer.on('finish', () => { // Set the 'finish' event callback, which is triggered when the playback is complete.
- console.info('audio play finish');
- });
- audioPlayer.on('error', (error) => { // Set the 'error' event callback.
- console.info(`audio error called, errName is ${error.name}`);
- console.info(`audio error called, errCode is ${error.code}`);
- console.info(`audio error called, errMessage is ${error.message}`);
- });
-}
-
-async function audioPlayerDemo() {
- // 1. Create an AudioPlayer instance.
- let audioPlayer = media.createAudioPlayer();
- setCallBack(audioPlayer); // Set the event callbacks.
- // 2. Set the URI of the audio file.
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command.
- let path = pathDir + '/01.mp3'
- let file = await fs.open(path);
- fdPath = fdPath + '' + file.fd;
- audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback.
-}
-```
-
-### Normal Playback Scenario
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-export class AudioDemo {
- // Set the player callbacks.
- setCallBack(audioPlayer) {
- audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully.
- console.info('audio set source success');
- audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback.
- });
- audioPlayer.on('play', () => { // Set the 'play' event callback.
- console.info('audio play success');
- });
- audioPlayer.on('finish', () => { // Set the 'finish' event callback, which is triggered when the playback is complete.
- console.info('audio play finish');
- audioPlayer.release(); // Release the AudioPlayer instance.
- audioPlayer = undefined;
- });
- }
-
- async audioPlayerDemo() {
- let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance.
- this.setCallBack(audioPlayer); // Set the event callbacks.
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command.
- let path = pathDir + '/01.mp3'
- let file = await fs.open(path);
- fdPath = fdPath + '' + file.fd;
- audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback.
- }
-}
-```
-
-### Switching to the Next Song
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-export class AudioDemo {
-// Set the player callbacks.
- private isNextMusic = false;
- setCallBack(audioPlayer) {
- audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully.
- console.info('audio set source success');
- audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback.
- });
- audioPlayer.on('play', () => { // Set the 'play' event callback.
- console.info('audio play success');
- audioPlayer.reset(); // Call the reset() API and trigger the 'reset' event callback.
- });
- audioPlayer.on('reset', () => { // Set the 'reset' event callback.
- console.info('audio play success');
- if (!this.isNextMusic) { // When isNextMusic is false, changing songs is implemented.
- this.nextMusic(audioPlayer); // Changing songs is implemented.
- } else {
- audioPlayer.release(); // Release the AudioPlayer instance.
- audioPlayer = undefined;
- }
- });
- }
-
- async nextMusic(audioPlayer) {
- this.isNextMusic = true;
- let nextFdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\02.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command.
- let nextpath = pathDir + '/02.mp3'
- let nextFile = await fs.open(nextpath);
- nextFdPath = nextFdPath + '' + nextFile.fd;
- audioPlayer.src = nextFdPath; // Set the src attribute and trigger the 'dataLoad' event callback.
- }
-
- async audioPlayerDemo() {
- let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance.
- this.setCallBack(audioPlayer); // Set the event callbacks.
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command.
- let path = pathDir + '/01.mp3'
- let file = await fs.open(path);
- fdPath = fdPath + '' + file.fd;
- audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback.
- }
-}
-```
-
-### Looping a Song
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-export class AudioDemo {
- // Set the player callbacks.
- setCallBack(audioPlayer) {
- audioPlayer.on('dataLoad', () => { // Set the 'dataLoad' event callback, which is triggered when the src attribute is set successfully.
- console.info('audio set source success');
- audioPlayer.loop = true; // Set the loop playback attribute.
- audioPlayer.play(); // Call the play() API to start the playback and trigger the 'play' event callback.
- });
- audioPlayer.on('play', () => { // Set the 'play' event callback to start loop playback.
- console.info('audio play success');
- });
- }
-
- async audioPlayerDemo() {
- let audioPlayer = media.createAudioPlayer(); // Create an AudioPlayer instance.
- this.setCallBack(audioPlayer); // Set the event callbacks.
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\01.mp3 /data/app/el2/100/base/ohos.acts.multimedia.audio.audioplayer/haps/entry/files" command.
- let path = pathDir + '/01.mp3'
- let file = await fs.open(path);
- fdPath = fdPath + '' + file.fd;
- audioPlayer.src = fdPath; // Set the src attribute and trigger the 'dataLoad' event callback.
- }
-}
-```
diff --git a/en/application-dev/media/audio-recorder.md b/en/application-dev/media/audio-recorder.md
deleted file mode 100644
index 78650a61d0a803811394e623ab0bc46155438ba9..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-recorder.md
+++ /dev/null
@@ -1,197 +0,0 @@
-# Audio Recording Development
-
-## Introduction
-
-During audio recording, audio signals are captured, encoded, and saved to files. You can specify parameters such as the sampling rate, number of audio channels, encoding format, encapsulation format, and output file path for audio recording.
-
-## Working Principles
-
-The following figures show the audio recording state transition and the interaction with external modules for audio recording.
-
-**Figure 1** Audio recording state transition
-
-
-
-
-
-**Figure 2** Interaction with external modules for audio recording
-
-
-
-**NOTE**: When a third-party recording application or recorder calls the JS interface provided by the JS interface layer to implement a feature, the framework layer invokes the audio component through the media service of the native framework to obtain the audio data captured through the audio HDI. The framework layer then encodes the audio data through software and saves the encoded and encapsulated audio data to a file to implement audio recording.
-
-## Constraints
-
-Before developing audio recording, configure the **ohos.permission.MICROPHONE** permission for your application. For details about the configuration, see [Permission Application Guide](../security/accesstoken-guidelines.md).
-
-## How to Develop
-
-For details about the APIs, see [AudioRecorder in the Media API](../reference/apis/js-apis-media.md#audiorecorder).
-
-### Full-Process Scenario
-
-The full audio recording process includes creating an instance, setting recording parameters, starting, pausing, resuming, and stopping recording, and releasing resources.
-
-```js
-import media from '@ohos.multimedia.media'
-import mediaLibrary from '@ohos.multimedia.mediaLibrary'
-export class AudioRecorderDemo {
- private testFdNumber; // Used to save the FD address.
-
- // Set the callbacks related to audio recording.
- setCallBack(audioRecorder) {
- audioRecorder.on('prepare', () => { // Set the prepare event callback.
- console.log('prepare success');
- audioRecorder.start(); // Call the start API to start recording and trigger the start event callback.
- });
- audioRecorder.on('start', () => { // Set the start event callback.
- console.log('audio recorder start success');
- audioRecorder.pause(); // Call the pause API to pause recording and trigger the pause event callback.
- });
- audioRecorder.on('pause', () => { // Set the pause event callback.
- console.log('audio recorder pause success');
- audioRecorder.resume(); // Call the resume API to resume recording and trigger the resume event callback.
- });
- audioRecorder.on('resume', () => { // Set the resume event callback.
- console.log('audio recorder resume success');
- audioRecorder.stop(); // Call the stop API to stop recording and trigger the stop event callback.
- });
- audioRecorder.on('stop', () => { // Set the stop event callback.
- console.log('audio recorder stop success');
- audioRecorder.reset(); // Call the reset API to reset the recorder and trigger the reset event callback.
- });
- audioRecorder.on('reset', () => { // Set the reset event callback.
- console.log('audio recorder reset success');
- audioRecorder.release(); // Call the release API to release resources and trigger the release event callback.
- });
- audioRecorder.on('release', () => { // Set the release event callback.
- console.log('audio recorder release success');
- audioRecorder = undefined;
- });
- audioRecorder.on('error', (error) => { // Set the error event callback.
- console.info(`audio error called, errName is ${error.name}`);
- console.info(`audio error called, errCode is ${error.code}`);
- console.info(`audio error called, errMessage is ${error.message}`);
- });
- }
-
- // pathName indicates the passed recording file name, for example, 01.mp3. The generated file address is /storage/media/100/local/files/Video/01.mp3.
- // To use the media library, declare the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA.
- async getFd(pathName) {
- let displayName = pathName;
- const mediaTest = mediaLibrary.getMediaLibrary();
- let fileKeyObj = mediaLibrary.FileKey;
- let mediaType = mediaLibrary.MediaType.VIDEO;
- let publicPath = await mediaTest.getPublicDirectory(mediaLibrary.DirectoryType.DIR_VIDEO);
- let dataUri = await mediaTest.createAsset(mediaType, displayName, publicPath);
- if (dataUri != undefined) {
- let args = dataUri.id.toString();
- let fetchOp = {
- selections : fileKeyObj.ID + "=?",
- selectionArgs : [args],
- }
- let fetchFileResult = await mediaTest.getFileAssets(fetchOp);
- let fileAsset = await fetchFileResult.getAllObject();
- let fdNumber = await fileAsset[0].open('Rw');
- this.testFdNumber = "fd://" + fdNumber.toString();
- }
- }
-
- async audioRecorderDemo() {
- // 1. Create an AudioRecorder instance.
- let audioRecorder = media.createAudioRecorder();
- // 2. Set the callbacks.
- this.setCallBack(audioRecorder);
- await this.getFd('01.mp3'); // Call the getFd method to obtain the FD address of the file to be recorded.
- // 3. Set the recording parameters.
- let audioRecorderConfig = {
- audioEncodeBitRate : 22050,
- audioSampleRate : 22050,
- numberOfChannels : 2,
- uri : this.testFdNumber, // testFdNumber is generated by getFd.
- location : { latitude : 30, longitude : 130},
- audioEncoderMime : media.CodecMimeType.AUDIO_AAC,
- fileFormat : media.ContainerFormatType.CFT_MPEG_4A,
- }
- audioRecorder.prepare(audioRecorderConfig); // Call the prepare method to trigger the prepare event callback.
- }
-}
-```
-
-### Normal Recording Scenario
-
-Unlike the full-process scenario, the normal recording scenario does not include the process of pausing and resuming recording.
-
-```js
-import media from '@ohos.multimedia.media'
-import mediaLibrary from '@ohos.multimedia.mediaLibrary'
-export class AudioRecorderDemo {
- private testFdNumber; // Used to save the FD address.
-
- // Set the callbacks related to audio recording.
- setCallBack(audioRecorder) {
- audioRecorder.on('prepare', () => { // Set the prepare event callback.
- console.log('prepare success');
- audioRecorder.start(); // Call the start API to start recording and trigger the start event callback.
- });
- audioRecorder.on('start', () => { // Set the start event callback.
- console.log('audio recorder start success');
- audioRecorder.stop(); // Call the stop API to stop recording and trigger the stop event callback.
- });
- audioRecorder.on('stop', () => { // Set the stop event callback.
- console.log('audio recorder stop success');
- audioRecorder.release(); // Call the release API to release resources and trigger the release event callback.
- });
- audioRecorder.on('release', () => { // Set the release event callback.
- console.log('audio recorder release success');
- audioRecorder = undefined;
- });
- audioRecorder.on('error', (error) => { // Set the error event callback.
- console.info(`audio error called, errName is ${error.name}`);
- console.info(`audio error called, errCode is ${error.code}`);
- console.info(`audio error called, errMessage is ${error.message}`);
- });
- }
-
- // pathName indicates the passed recording file name, for example, 01.mp3. The generated file address is /storage/media/100/local/files/Video/01.mp3.
- // To use the media library, declare the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA.
- async getFd(pathName) {
- let displayName = pathName;
- const mediaTest = mediaLibrary.getMediaLibrary();
- let fileKeyObj = mediaLibrary.FileKey;
- let mediaType = mediaLibrary.MediaType.VIDEO;
- let publicPath = await mediaTest.getPublicDirectory(mediaLibrary.DirectoryType.DIR_VIDEO);
- let dataUri = await mediaTest.createAsset(mediaType, displayName, publicPath);
- if (dataUri != undefined) {
- let args = dataUri.id.toString();
- let fetchOp = {
- selections : fileKeyObj.ID + "=?",
- selectionArgs : [args],
- }
- let fetchFileResult = await mediaTest.getFileAssets(fetchOp);
- let fileAsset = await fetchFileResult.getAllObject();
- let fdNumber = await fileAsset[0].open('Rw');
- this.testFdNumber = "fd://" + fdNumber.toString();
- }
- }
-
- async audioRecorderDemo() {
- // 1. Create an AudioRecorder instance.
- let audioRecorder = media.createAudioRecorder();
- // 2. Set the callbacks.
- this.setCallBack(audioRecorder);
- await this.getFd('01.mp3'); // Call the getFd method to obtain the FD address of the file to be recorded.
- // 3. Set the recording parameters.
- let audioRecorderConfig = {
- audioEncodeBitRate : 22050,
- audioSampleRate : 22050,
- numberOfChannels : 2,
- uri : this.testFdNumber, // testFdNumber is generated by getFd.
- location : { latitude : 30, longitude : 130},
- audioEncoderMime : media.CodecMimeType.AUDIO_AAC,
- fileFormat : media.ContainerFormatType.CFT_MPEG_4A,
- }
- audioRecorder.prepare(audioRecorderConfig); // Call the prepare method to trigger the prepare event callback.
- }
-}
-```
diff --git a/en/application-dev/media/audio-recording-overview.md b/en/application-dev/media/audio-recording-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..698255fddd78d98f9e635b16b3db94e6980bd4a0
--- /dev/null
+++ b/en/application-dev/media/audio-recording-overview.md
@@ -0,0 +1,17 @@
+# Audio Recording Development
+
+## Selecting an Audio Recording Development Mode
+
+OpenHarmony provides multiple classes for you to develop audio recording applications. You can select them based on the recording output formats, audio usage scenarios, and even the programming language you use. Selecting a suitable class helps you reduce development workload and your application deliver a better effect.
+
+- [AVRecorder](using-avrecorder-for-recording.md): provides ArkTS and JS APIs to implement audio and video recording. It also supports audio input, audio encoding, and media encapsulation. You can directly call device hardware, such as microphone, for recording and generate M4A audio files.
+
+- [AudioCapturer](using-audiocapturer-for-recording.md): provides ArkTS and JS API to implement audio input. It supports only the PCM format and requires applications to continuously read audio data. The application can perform data processing after audio output. This class can be used to develop more professional and diverse recording applications. To use this class, you must have basic audio processing knowledge.
+
+- [OpenSLES](using-opensl-es-for-recording.md): provides a set of standard, cross-platform, yet unique native audio APIs. It supports audio input in PCM format and is applicable to recording applications that are ported from other embedded platforms or that implements audio input at the native layer.
+
+## Precautions for Developing Audio Recording Applications
+
+The application must request the **ohos.permission.MICROPHONE** permission from the user before invoking the microphone to record audio.
+
+For details about how to request the permission, see [Permission Application Guide](../security/accesstoken-guidelines.md). For details about how to use and manage microphones, see [Microphone Management](mic-management.md).
diff --git a/en/application-dev/media/audio-recording-stream-management.md b/en/application-dev/media/audio-recording-stream-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..8161d1bd5bbe5fbc55560ab557570baaaa99976a
--- /dev/null
+++ b/en/application-dev/media/audio-recording-stream-management.md
@@ -0,0 +1,118 @@
+# Audio Recording Stream Management
+
+An audio recording application must notice audio stream state changes and perform corresponding operations. For example, when detecting that the user stops recording, the application must notify the user that the recording finishes.
+
+## Reading or Listening for Audio Stream State Changes in the Application
+
+Create an AudioCapturer by referring to [Using AudioCapturer for Audio Recording](using-audiocapturer-for-recording.md) or [audio.createAudioCapturer](../reference/apis/js-apis-audio.md#audiocreateaudiocapturer8). Then obtain the audio stream state changes in either of the following ways:
+
+- Check the [state](../reference/apis/js-apis-audio.md#attributes) of the AudioCapturer.
+
+ ```ts
+ let audioCapturerState = audioCapturer.state;
+ console.info(`Current state is: ${audioCapturerState }`)
+ ```
+
+- Register **stateChange** to listen for state changes of the AudioCapturer.
+
+ ```ts
+ audioCapturer.on('stateChange', (capturerState) => {
+ console.info(`State change to: ${capturerState}`)
+ });
+ ```
+
+The application then performs an operation, for example, displays a message indicating the end of the recording, by comparing the obtained state with [AudioState](../reference/apis/js-apis-audio.md#audiostate8).
+
+## Reading or Listening for Changes in All Audio Streams
+
+If an application needs to obtain the change information about all audio streams, it can use **AudioStreamManager** to read or listen for the changes of all audio streams.
+
+> **NOTE**
+>
+> The audio stream change information marked as the system API can be viewed only by system applications.
+
+The figure below shows the call relationship of audio stream management.
+
+
+
+During application development, first use **getStreamManager()** to create an **AudioStreamManager** instance. Then call **on('audioCapturerChange')** to listen for audio stream changes and obtain a notification when the audio stream state or device changes. To cancel the listening for these changes, call **off('audioCapturerChange')**. You can call **getCurrentAudioCapturerInfoArray()** to obtain information such as the unique ID of the recording stream, UID of the recording stream client, and stream status.
+
+For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9).
+
+
+## How to Develop
+
+1. Create an **AudioStreamManager** instance.
+
+ Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance.
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+ let audioManager = audio.getAudioManager();
+ let audioStreamManager = audioManager.getStreamManager();
+ ```
+
+2. Use **on('audioCapturerChange')** to listen for audio recording stream changes. If the application needs to receive a notification when the audio recording stream state or device changes, it can subscribe to this event.
+
+ ```ts
+ audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray) => {
+ for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
+ console.info(`## CapChange on is called for element ${i} ##`);
+ console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`);
+ console.info(`Source for ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.source}`);
+ console.info(`Flag ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags}`);
+ let devDescriptor = AudioCapturerChangeInfoArray[i].deviceDescriptors;
+ for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) {
+ console.info(`Id: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id}`);
+ console.info(`Type: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType}`);
+ console.info(`Role: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole}`);
+ console.info(`Name: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name}`);
+ console.info(`Address: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address}`);
+ console.info(`SampleRates: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]}`);
+ console.info(`ChannelCounts ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]}`);
+ console.info(`ChannelMask: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks}`);
+ }
+ }
+ });
+ ```
+
+3. (Optional) Use **off('audioCapturerChange')** to cancel listening for audio recording stream changes.
+
+ ```ts
+ audioStreamManager.off('audioCapturerChange');
+ console.info('CapturerChange Off is called');
+ ```
+
+4. (Optional) Call **getCurrentAudioCapturerInfoArray()** to obtain information about the current audio recording stream.
+
+ This API can be used to obtain the unique ID of the audio recording stream, UID of the audio recording client, audio status, and other information about the AudioCapturer.
+ > **NOTE**
+ >
+ > Before listening for state changes of all audio streams, the application must request the **ohos.permission.USE_BLUETOOTH** [permission](../security/accesstoken-guidelines.md), for the device name and device address (Bluetooth related attributes) to be displayed correctly.
+
+ ```ts
+ async function getCurrentAudioCapturerInfoArray(){
+ await audioStreamManager.getCurrentAudioCapturerInfoArray().then( function (AudioCapturerChangeInfoArray) {
+ console.info('getCurrentAudioCapturerInfoArray Get Promise Called ');
+ if (AudioCapturerChangeInfoArray != null) {
+ for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
+ console.info(`StreamId for ${i} is: ${AudioCapturerChangeInfoArray[i].streamId}`);
+ console.info(`Source for ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.source}`);
+ console.info(`Flag ${i} is: ${AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags}`);
+ for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) {
+ console.info(`Id: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id}`);
+ console.info(`Type: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType}`);
+ console.info(`Role: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole}`);
+ console.info(`Name: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name}`);
+ console.info(`Address: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address}`);
+ console.info(`SampleRates: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]}`);
+ console.info(`ChannelCounts ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]}`);
+ console.info(`ChannelMask: ${i} : ${AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks}`);
+ }
+ }
+ }
+ }).catch((err) => {
+ console.error(`Invoke getCurrentAudioCapturerInfoArray failed, code is ${err.code}, message is ${err.message}`);
+ });
+ }
+ ```
diff --git a/en/application-dev/media/audio-renderer.md b/en/application-dev/media/audio-renderer.md
deleted file mode 100644
index 4a39544e7483b68d0bc15b00d643c8403dbded46..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-renderer.md
+++ /dev/null
@@ -1,538 +0,0 @@
-# Audio Rendering Development
-
-## Introduction
-
-**AudioRenderer** provides APIs for rendering audio files and controlling playback. It also supports audio interruption. You can use the APIs provided by **AudioRenderer** to play audio files in output devices and manage playback tasks.
-Before calling the APIs, be familiar with the following terms:
-
-- **Audio interruption**: When an audio stream with a higher priority needs to be played, the audio renderer interrupts the stream with a lower priority. For example, if a call comes in when the user is listening to music, the music playback, which is the lower priority stream, is paused.
-- **Status check**: During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the **AudioRenderer** instance. This is because some operations can be performed only when the audio renderer is in a given state. If the application performs an operation when the audio renderer is not in the given state, the system may throw an exception or generate other undefined behavior.
-- **Asynchronous operation**: To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions. For more information, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8).
-- **Audio interruption mode**: OpenHarmony provides two audio interruption modes: **shared mode** and **independent mode**. In shared mode, all **AudioRenderer** instances created by the same application share one focus object, and there is no focus transfer inside the application. Therefore, no callback will be triggered. In independent mode, each **AudioRenderer** instance has an independent focus object, and focus transfer is triggered by focus preemption. When focus transfer occurs, the **AudioRenderer** instance that is having the focus receives a notification through the callback. By default, the shared mode is used. You can call **setInterruptMode()** to switch to the independent mode.
-
-## Working Principles
-
-The following figure shows the audio renderer state transitions.
-
-**Figure 1** Audio renderer state transitions
-
-
-
-- **PREPARED**: The audio renderer enters this state by calling **create()**.
-
-- **RUNNING**: The audio renderer enters this state by calling **start()** when it is in the **PREPARED** state or by calling **start()** when it is in the **STOPPED** state.
-
-- **PAUSED**: The audio renderer enters this state by calling **pause()** when it is in the **RUNNING** state. When the audio playback is paused, it can call **start()** to resume the playback.
-
-- **STOPPED**: The audio renderer enters this state by calling **stop()** when it is in the **PAUSED** or **RUNNING** state.
-
-- **RELEASED**: The audio renderer enters this state by calling **release()** when it is in the **PREPARED**, **PAUSED**, or **STOPPED** state. In this state, the audio renderer releases all occupied hardware and software resources and will not transit to any other state.
-
-## How to Develop
-
-For details about the APIs, see [AudioRenderer in Audio Management](../reference/apis/js-apis-audio.md#audiorenderer8).
-
-1. Use **createAudioRenderer()** to create an **AudioRenderer** instance.
-
- Set parameters of the **AudioRenderer** instance in **audioRendererOptions**. This instance is used to render audio, control and obtain the rendering status, and register a callback for notification.
-
- ```js
- import audio from '@ohos.multimedia.audio';
-
- let audioStreamInfo = {
- samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
- channels: audio.AudioChannel.CHANNEL_1,
- sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
- encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
- }
- let audioRendererInfo = {
- content: audio.ContentType.CONTENT_TYPE_SPEECH,
- usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
- rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0.
- }
- let audioRendererOptions = {
- streamInfo: audioStreamInfo,
- rendererInfo: audioRendererInfo
- }
-
- let audioRenderer = await audio.createAudioRenderer(audioRendererOptions);
- console.log("Create audio renderer success.");
- ```
-
-2. Use **start()** to start audio rendering.
-
- ```js
- async function startRenderer() {
- let state = audioRenderer.state;
- // The audio renderer should be in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state when start() is called.
- if (state != audio.AudioState.STATE_PREPARED && state != audio.AudioState.STATE_PAUSED &&
- state != audio.AudioState.STATE_STOPPED) {
- console.info('Renderer is not in a correct state to start');
- return;
- }
-
- await audioRenderer.start();
-
- state = audioRenderer.state;
- if (state == audio.AudioState.STATE_RUNNING) {
- console.info('Renderer started');
- } else {
- console.error('Renderer start failed');
- }
- }
- ```
- The renderer state will be **STATE_RUNNING** once the audio renderer is started. The application can then begin reading buffers.
-
-
-3. Call **write()** to write data to the buffer.
-
- Read the audio data to be played to the buffer. Call **write()** repeatedly to write the data to the buffer.
-
- ```js
- import fs from '@ohos.file.fs';
- import audio from '@ohos.multimedia.audio';
-
- async function writeBuffer(buf) {
- // The write operation can be performed only when the state is STATE_RUNNING.
- if (audioRenderer.state != audio.AudioState.STATE_RUNNING) {
- console.error('Renderer is not running, do not write');
- return;
- }
- let writtenbytes = await audioRenderer.write(buf);
- console.info(`Actual written bytes: ${writtenbytes} `);
- if (writtenbytes < 0) {
- console.error('Write buffer failed. check the state of renderer');
- }
- }
-
- // Set a proper buffer size for the audio renderer. You can also select a buffer of another size.
- const bufferSize = await audioRenderer.getBufferSize();
- let dir = globalThis.fileDir; // You must use the sandbox path.
- const filePath = dir + '/file_example_WAV_2MG.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/file_example_WAV_2MG.wav
- console.info(`file filePath: ${ filePath}`);
-
- let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
- let stat = await fs.stat(filePath); // Music file information.
- let buf = new ArrayBuffer(bufferSize);
- let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
- for (let i = 0;i < len; i++) {
- let options = {
- offset: i * this.bufferSize,
- length: this.bufferSize
- }
- let readsize = await fs.read(file.fd, buf, options)
- let writeSize = await new Promise((resolve,reject)=>{
- this.audioRenderer.write(buf,(err,writeSize)=>{
- if(err){
- reject(err)
- }else{
- resolve(writeSize)
- }
- })
- })
- }
-
- fs.close(file)
- await audioRenderer.stop(); // Stop rendering.
- await audioRenderer.release(); // Releases the resources.
- ```
-
-4. (Optional) Call **pause()** or **stop()** to pause or stop rendering.
-
- ```js
- async function pauseRenderer() {
- let state = audioRenderer.state;
- // The audio renderer can be paused only when it is in the STATE_RUNNING state.
- if (state != audio.AudioState.STATE_RUNNING) {
- console.info('Renderer is not running');
- return;
- }
-
- await audioRenderer.pause();
-
- state = audioRenderer.state;
- if (state == audio.AudioState.STATE_PAUSED) {
- console.info('Renderer paused');
- } else {
- console.error('Renderer pause failed');
- }
- }
-
- async function stopRenderer() {
- let state = audioRenderer.state;
- // The audio renderer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state.
- if (state != audio.AudioState.STATE_RUNNING && state != audio.AudioState.STATE_PAUSED) {
- console.info('Renderer is not running or paused');
- return;
- }
-
- await audioRenderer.stop();
-
- state = audioRenderer.state;
- if (state == audio.AudioState.STATE_STOPPED) {
- console.info('Renderer stopped');
- } else {
- console.error('Renderer stop failed');
- }
- }
- ```
-
-5. (Optional) Call **drain()** to clear the buffer.
-
- ```js
- async function drainRenderer() {
- let state = audioRenderer.state;
- // drain() can be used only when the audio renderer is in the STATE_RUNNING state.
- if (state != audio.AudioState.STATE_RUNNING) {
- console.info('Renderer is not running');
- return;
- }
-
- await audioRenderer.drain();
- state = audioRenderer.state;
- }
- ```
-
-6. After the task is complete, call **release()** to release related resources.
-
- **AudioRenderer** uses a large number of system resources. Therefore, ensure that the resources are released after the task is complete.
-
- ```js
- async function releaseRenderer() {
- let state = audioRenderer.state;
- // The audio renderer can be released only when it is not in the STATE_RELEASED or STATE_NEW state.
- if (state == audio.AudioState.STATE_RELEASED || state == audio.AudioState.STATE_NEW) {
- console.info('Renderer already released');
- return;
- }
- await audioRenderer.release();
-
- state = audioRenderer.state;
- if (state == audio.AudioState.STATE_RELEASED) {
- console.info('Renderer released');
- } else {
- console.info('Renderer release failed');
- }
- }
- ```
-
-7. (Optional) Obtain the audio renderer information.
-
- You can use the following code to obtain the audio renderer information:
-
- ```js
- // Obtain the audio renderer state.
- let state = audioRenderer.state;
-
- // Obtain the audio renderer information.
- let audioRendererInfo : audio.AudioRendererInfo = await audioRenderer.getRendererInfo();
-
- // Obtain the audio stream information.
- let audioStreamInfo : audio.AudioStreamInfo = await audioRenderer.getStreamInfo();
-
- // Obtain the audio stream ID.
- let audioStreamId : number = await audioRenderer.getAudioStreamId();
-
- // Obtain the Unix timestamp, in nanoseconds.
- let audioTime : number = await audioRenderer.getAudioTime();
-
- // Obtain a proper minimum buffer size.
- let bufferSize : number = await audioRenderer.getBufferSize();
-
- // Obtain the audio renderer rate.
- let renderRate : audio.AudioRendererRate = await audioRenderer.getRenderRate();
- ```
-
-8. (Optional) Set the audio renderer information.
-
- You can use the following code to set the audio renderer information:
-
- ```js
- // Set the audio renderer rate to RENDER_RATE_NORMAL.
- let renderRate : audio.AudioRendererRate = audio.AudioRendererRate.RENDER_RATE_NORMAL;
- await audioRenderer.setRenderRate(renderRate);
-
- // Set the interruption mode of the audio renderer to SHARE_MODE.
- let interruptMode : audio.InterruptMode = audio.InterruptMode.SHARE_MODE;
- await audioRenderer.setInterruptMode(interruptMode);
-
- // Set the volume of the stream to 0.5.
- let volume : number = 0.5;
- await audioRenderer.setVolume(volume);
- ```
-
-9. (Optional) Use **on('audioInterrupt')** to subscribe to the audio interruption event, and use **off('audioInterrupt')** to unsubscribe from the event.
-
- Audio interruption means that Stream A will be interrupted when Stream B with a higher or equal priority requests to become active and use the output device.
-
- In some cases, the audio renderer performs forcible operations such as pausing and ducking, and notifies the application through **InterruptEvent**. In other cases, the application can choose to act on the **InterruptEvent** or ignore it.
-
- In the case of audio interruption, the application may encounter write failures. To avoid such failures, interruption-unaware applications can use **audioRenderer.state** to check the audio renderer state before writing audio data. The applications can obtain more details by subscribing to the audio interruption events. For details, see [InterruptEvent](../reference/apis/js-apis-audio.md#interruptevent9).
-
- It should be noted that the audio interruption event subscription of the **AudioRenderer** module is slightly different from **on('interrupt')** in [AudioManager](../reference/apis/js-apis-audio.md#audiomanager). The **on('interrupt')** and **off('interrupt')** APIs are deprecated since API version 9. In the **AudioRenderer** module, you only need to call **on('audioInterrupt')** to listen for focus change events. When the **AudioRenderer** instance created by the application performs actions such as start, stop, and pause, it requests the focus, which triggers focus transfer and in return enables the related **AudioRenderer** instance to receive a notification through the callback. For instances other than **AudioRenderer**, such as frequency modulation (FM) and voice wakeup, the application does not create an instance. In this case, the application can call **on('interrupt')** in **AudioManager** to receive a focus change notification.
-
- ```js
- audioRenderer.on('audioInterrupt', (interruptEvent) => {
- console.info('InterruptEvent Received');
- console.info(`InterruptType: ${interruptEvent.eventType}`);
- console.info(`InterruptForceType: ${interruptEvent.forceType}`);
- console.info(`AInterruptHint: ${interruptEvent.hintType}`);
-
- if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_FORCE) {
- switch (interruptEvent.hintType) {
- // Forcible pausing initiated by the audio framework. To prevent data loss, stop the write operation.
- case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
- isPlay = false;
- break;
- // Forcible stopping initiated by the audio framework. To prevent data loss, stop the write operation.
- case audio.InterruptHint.INTERRUPT_HINT_STOP:
- isPlay = false;
- break;
- // Forcible ducking initiated by the audio framework.
- case audio.InterruptHint.INTERRUPT_HINT_DUCK:
- break;
- // Undocking initiated by the audio framework.
- case audio.InterruptHint.INTERRUPT_HINT_UNDUCK:
- break;
- }
- } else if (interruptEvent.forceType == audio.InterruptForceType.INTERRUPT_SHARE) {
- switch (interruptEvent.hintType) {
- // Notify the application that the rendering starts.
- case audio.InterruptHint.INTERRUPT_HINT_RESUME:
- startRenderer();
- break;
- // Notify the application that the audio stream is interrupted. The application then determines whether to continue. (In this example, the application pauses the rendering.)
- case audio.InterruptHint.INTERRUPT_HINT_PAUSE:
- isPlay = false;
- pauseRenderer();
- break;
- }
- }
- });
-
- audioRenderer.off('audioInterrupt'); // Unsubscribe from the audio interruption event. This event will no longer be listened for.
- ```
-
-10. (Optional) Use **on('markReach')** to subscribe to the mark reached event, and use **off('markReach')** to unsubscribe from the event.
-
- After the mark reached event is subscribed to, when the number of frames rendered by the audio renderer reaches the specified value, a callback is triggered and the specified value is returned.
-
- ```js
- audioRenderer.on('markReach', (reachNumber) => {
- console.info('Mark reach event Received');
- console.info(`The renderer reached frame: ${reachNumber}`);
- });
-
- audioRenderer.off('markReach'); // Unsubscribe from the mark reached event. This event will no longer be listened for.
- ```
-
-11. (Optional) Use **on('periodReach')** to subscribe to the period reached event, and use **off('periodReach')** to unsubscribe from the event.
-
- After the period reached event is subscribed to, each time the number of frames rendered by the audio renderer reaches the specified value, a callback is triggered and the specified value is returned.
-
- ```js
- audioRenderer.on('periodReach', (reachNumber) => {
- console.info('Period reach event Received');
- console.info(`In this period, the renderer reached frame: ${reachNumber} `);
- });
-
- audioRenderer.off('periodReach'); // Unsubscribe from the period reached event. This event will no longer be listened for.
- ```
-
-12. (Optional) Use **on('stateChange')** to subscribe to audio renderer state changes.
-
- After the **stateChange** event is subscribed to, when the audio renderer state changes, a callback is triggered and the audio renderer state is returned.
-
- ```js
- audioRenderer.on('stateChange', (audioState) => {
- console.info('State change event Received');
- console.info(`Current renderer state is: ${audioState}`);
- });
- ```
-
-13. (Optional) Handle exceptions of **on()**.
-
- If the string or the parameter type passed in **on()** is incorrect , the application throws an exception. In this case, you can use **try catch** to capture the exception.
-
- ```js
- try {
- audioRenderer.on('invalidInput', () => { // The string is invalid.
- })
- } catch (err) {
- console.info(`Call on function error, ${err}`); // The application throws exception 401.
- }
- try {
- audioRenderer.on(1, () => { // The type of the input parameter is incorrect.
- })
- } catch (err) {
- console.info(`Call on function error, ${err}`); // The application throws exception 6800101.
- }
- ```
-
-14. (Optional) Refer to the complete example of **on('audioInterrupt')**.
-
- Create **AudioRender1** and **AudioRender2** in an application, configure the independent interruption mode, and call **on('audioInterrupt')** to subscribe to audio interruption events. At the beginning, **AudioRender1** has the focus. When **AudioRender2** attempts to obtain the focus, **AudioRender1** receives a focus transfer notification and the related log information is printed. If the shared mode is used, the log information will not be printed during application running.
-
- ```js
- async runningAudioRender1(){
- let audioStreamInfo = {
- samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000,
- channels: audio.AudioChannel.CHANNEL_1,
- sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S32LE,
- encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
- }
- let audioRendererInfo = {
- content: audio.ContentType.CONTENT_TYPE_MUSIC,
- usage: audio.StreamUsage.STREAM_USAGE_MEDIA,
- rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0.
- }
- let audioRendererOptions = {
- streamInfo: audioStreamInfo,
- rendererInfo: audioRendererInfo
- }
-
- // 1.1 Create an instance.
- audioRenderer1 = await audio.createAudioRenderer(audioRendererOptions);
- console.info("Create audio renderer 1 success.");
-
- // 1.2 Set the independent mode.
- audioRenderer1.setInterruptMode(1).then( data => {
- console.info('audioRenderer1 setInterruptMode Success!');
- }).catch((err) => {
- console.error(`audioRenderer1 setInterruptMode Fail: ${err}`);
- });
-
- // 1.3 Set the listener.
- audioRenderer1.on('audioInterrupt', async(interruptEvent) => {
- console.info(`audioRenderer1 on audioInterrupt : ${JSON.stringify(interruptEvent)}`)
- });
-
- // 1.4 Start rendering.
- await audioRenderer1.start();
- console.info('startAudioRender1 success');
-
- // 1.5 Obtain the buffer size, which is the proper minimum buffer size of the audio renderer. You can also select a buffer of another size.
- const bufferSize = await audioRenderer1.getBufferSize();
- console.info(`audio bufferSize: ${bufferSize}`);
-
- // 1.6 Obtain the original audio data file.
- let dir = globalThis.fileDir; // You must use the sandbox path.
- const path1 = dir + '/music001_48000_32_1.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/music001_48000_32_1.wav
- console.info(`audioRender1 file path: ${ path1}`);
- let file1 = fs.openSync(path1, fs.OpenMode.READ_ONLY);
- let stat = await fs.stat(path1); // Music file information.
- let buf = new ArrayBuffer(bufferSize);
- let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
-
- // 1.7 Render the original audio data in the buffer by using audioRender.
- for (let i = 0;i < len; i++) {
- let options = {
- offset: i * this.bufferSize,
- length: this.bufferSize
- }
- let readsize = await fs.read(file.fd, buf, options)
- let writeSize = await new Promise((resolve,reject)=>{
- this.audioRenderer1.write(buf,(err,writeSize)=>{
- if(err){
- reject(err)
- }else{
- resolve(writeSize)
- }
- })
- })
- }
- fs.close(file1)
- await audioRenderer1.stop(); // Stop rendering.
- await audioRenderer1.release(); Releases the resources.
- }
-
- async runningAudioRender2(){
- let audioStreamInfo = {
- samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000,
- channels: audio.AudioChannel.CHANNEL_1,
- sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S32LE,
- encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
- }
- let audioRendererInfo = {
- content: audio.ContentType.CONTENT_TYPE_MUSIC,
- usage: audio.StreamUsage.STREAM_USAGE_MEDIA,
- rendererFlags: 0 // 0 is the extended flag bit of the audio renderer. The default value is 0.
- }
- let audioRendererOptions = {
- streamInfo: audioStreamInfo,
- rendererInfo: audioRendererInfo
- }
-
- // 2.1 Create another instance.
- audioRenderer2 = await audio.createAudioRenderer(audioRendererOptions);
- console.info("Create audio renderer 2 success.");
-
- // 2.2 Set the independent mode.
- audioRenderer2.setInterruptMode(1).then( data => {
- console.info('audioRenderer2 setInterruptMode Success!');
- }).catch((err) => {
- console.error(`audioRenderer2 setInterruptMode Fail: ${err}`);
- });
-
- // 2.3 Set the listener.
- audioRenderer2.on('audioInterrupt', async(interruptEvent) => {
- console.info(`audioRenderer2 on audioInterrupt : ${JSON.stringify(interruptEvent)}`)
- });
-
- // 2.4 Start rendering.
- await audioRenderer2.start();
- console.info('startAudioRender2 success');
-
- // 2.5 Obtain the buffer size.
- const bufferSize = await audioRenderer2.getBufferSize();
- console.info(`audio bufferSize: ${bufferSize}`);
-
- // 2.6 Read the original audio data file.
- let dir = globalThis.fileDir; // You must use the sandbox path.
- const path2 = dir + '/music002_48000_32_1.wav'; // The file to render is in the following path: /data/storage/el2/base/haps/entry/files/music002_48000_32_1.wav
- console.info(`audioRender2 file path: ${ path2}`);
- let file2 = fs.openSync(path2, fs.OpenMode.READ_ONLY);
- let stat = await fs.stat(path2); // Music file information.
- let buf = new ArrayBuffer(bufferSize);
- let len = stat.size % this.bufferSize == 0 ? Math.floor(stat.size / this.bufferSize) : Math.floor(stat.size / this.bufferSize + 1);
-
- // 2.7 Render the original audio data in the buffer by using audioRender.
- for (let i = 0;i < len; i++) {
- let options = {
- offset: i * this.bufferSize,
- length: this.bufferSize
- }
- let readsize = await fs.read(file.fd, buf, options)
- let writeSize = await new Promise((resolve,reject)=>{
- this.audioRenderer2.write(buf,(err,writeSize)=>{
- if(err){
- reject(err)
- }else{
- resolve(writeSize)
- }
- })
- })
- }
- fs.close(file2)
- await audioRenderer2.stop(); // Stop rendering.
- await audioRenderer2.release(); // Releases the resources.
- }
-
- async writeBuffer(buf, audioRender) {
- let writtenbytes;
- await audioRender.write(buf).then((value) => {
- writtenbytes = value;
- console.info(`Actual written bytes: ${writtenbytes} `);
- });
- if (typeof(writtenbytes) != 'number' || writtenbytes < 0) {
- console.error('get Write buffer failed. check the state of renderer');
- }
- }
-
- // Integrated invoking entry.
- async test(){
- await runningAudioRender1();
- await runningAudioRender2();
- }
-
- ```
diff --git a/en/application-dev/media/audio-routing-manager.md b/en/application-dev/media/audio-routing-manager.md
deleted file mode 100644
index 55febdca0fad968d946601fce4faed99bc148dd2..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-routing-manager.md
+++ /dev/null
@@ -1,111 +0,0 @@
-# Audio Routing and Device Management Development
-
-## Overview
-
-The **AudioRoutingManager** module provides APIs for audio routing and device management. You can use the APIs to obtain the current input and output audio devices, listen for connection status changes of audio devices, and activate communication devices.
-
-## Working Principles
-
-The figure below shows the common APIs provided by the **AudioRoutingManager** module.
-
-**Figure 1** Common APIs of AudioRoutingManager
-
-
-
-You can use these APIs to obtain the device list, subscribe to or unsubscribe from device connection status changes, activate communication devices, and obtain their activation status. For details, see [Audio Management](../reference/apis/js-apis-audio.md).
-
-
-## How to Develop
-
-For details about the APIs, see [AudioRoutingManager in Audio Management](../reference/apis/js-apis-audio.md#audioroutingmanager9).
-
-1. Obtain an **AudioRoutingManager** instance.
-
- Before using an API in **AudioRoutingManager**, you must use **getRoutingManager()** to obtain an **AudioRoutingManager** instance.
-
- ```js
- import audio from '@ohos.multimedia.audio';
- async loadAudioRoutingManager() {
- var audioRoutingManager = await audio.getAudioManager().getRoutingManager();
- console.info('audioRoutingManager------create-------success.');
- }
-
- ```
-
-2. (Optional) Obtain the device list and subscribe to device connection status changes.
-
- To obtain the device list (such as input, output, distributed input, and distributed output devices) or listen for connection status changes of audio devices, refer to the following code:
-
- ```js
- import audio from '@ohos.multimedia.audio';
- // Obtain an AudioRoutingManager instance.
- async loadAudioRoutingManager() {
- var audioRoutingManager = await audio.getAudioManager().getRoutingManager();
- console.info('audioRoutingManager------create-------success.');
- }
- // Obtain information about all audio devices. (You can set DeviceFlag as required.)
- async getDevices() {
- await loadAudioRoutingManager();
- await audioRoutingManager.getDevices(audio.DeviceFlag.ALL_DEVICES_FLAG).then((data) => {
- console.info(`getDevices success and data is: ${JSON.stringify(data)}.`);
- });
- }
- // Subscribe to connection status changes of audio devices.
- async onDeviceChange() {
- await loadAudioRoutingManager();
- await audioRoutingManager.on('deviceChange', audio.DeviceFlag.ALL_DEVICES_FLAG, (deviceChanged) => {
- console.info('on device change type : ' + deviceChanged.type);
- console.info('on device descriptor size : ' + deviceChanged.deviceDescriptors.length);
- console.info('on device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole);
- console.info('on device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceType);
- });
- }
- // Unsubscribe from the connection status changes of audio devices.
- async offDeviceChange() {
- await loadAudioRoutingManager();
- await audioRoutingManager.off('deviceChange', (deviceChanged) => {
- console.info('off device change type : ' + deviceChanged.type);
- console.info('off device descriptor size : ' + deviceChanged.deviceDescriptors.length);
- console.info('off device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceRole);
- console.info('off device change descriptor : ' + deviceChanged.deviceDescriptors[0].deviceType);
- });
- }
- // Complete process: Call APIs to obtain all devices and subscribe to device changes, then manually change the connection status of a device (for example, wired headset), and finally call APIs to obtain all devices and unsubscribe from the device changes.
- async test(){
- await getDevices();
- await onDeviceChange()();
- // Manually disconnect or connect devices.
- await getDevices();
- await offDeviceChange();
- }
- ```
-
-3. (Optional) Activate a communication device and obtain its activation status.
-
- ```js
- import audio from '@ohos.multimedia.audio';
- // Obtain an AudioRoutingManager instance.
- async loadAudioRoutingManager() {
- var audioRoutingManager = await audio.getAudioManager().getRoutingManager();
- console.info('audioRoutingManager------create-------success.');
- }
- // Activate a communication device.
- async setCommunicationDevice() {
- await loadAudioRoutingManager();
- await audioRoutingManager.setCommunicationDevice(audio.CommunicationDeviceType.SPEAKER, true).then(() => {
- console.info('setCommunicationDevice true is success.');
- });
- }
- // Obtain the activation status of the communication device.
- async isCommunicationDeviceActive() {
- await loadAudioRoutingManager();
- await audioRoutingManager.isCommunicationDeviceActive(audio.CommunicationDeviceType.SPEAKER).then((value) => {
- console.info(`CommunicationDevice state is: ${value}.`);
- });
- }
- // Complete process: Activate a device and obtain the activation status.
- async test(){
- await setCommunicationDevice();
- await isCommunicationDeviceActive();
- }
- ```
diff --git a/en/application-dev/media/audio-stream-manager.md b/en/application-dev/media/audio-stream-manager.md
deleted file mode 100644
index 44ec37cd11f3666131214e5e908a1ce761fea111..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-stream-manager.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# Audio Stream Management Development
-
-## Introduction
-
-You can use **AudioStreamManager** to manage audio streams.
-
-## Working Principles
-
-The following figure shows the calling relationship of **AudioStreamManager** APIs.
-
-**Figure 1** AudioStreamManager API calling relationship
-
-
-
-**NOTE**: During application development, use **getStreamManager()** to create an **AudioStreamManager** instance. Then, you can call **on('audioRendererChange')** or **on('audioCapturerChange')** to listen for status, client, and audio attribute changes of the audio playback or recording application. To cancel the listening for these changes, call **off('audioRendererChange')** or **off('audioCapturerChange')**. You can call **getCurrentAudioRendererInfoArray()** to obtain information about the audio playback application, such as the unique audio stream ID, UID of the audio playback client, and audio status. Similarly, you can call **getCurrentAudioCapturerInfoArray()** to obtain information about the audio recording application.
-
-## How to Develop
-
-For details about the APIs, see [AudioStreamManager](../reference/apis/js-apis-audio.md#audiostreammanager9).
-
-1. Create an **AudioStreamManager** instance.
-
- Before using **AudioStreamManager** APIs, you must use **getStreamManager()** to create an **AudioStreamManager** instance.
-
- ```js
- var audioManager = audio.getAudioManager();
- var audioStreamManager = audioManager.getStreamManager();
- ```
-
-2. (Optional) Call **on('audioRendererChange')** to listen for audio renderer changes.
-
- If an application needs to receive notifications when the audio playback application status, audio playback client, or audio attribute changes, it can subscribe to this event. For more events that can be subscribed to, see [Audio Management](../reference/apis/js-apis-audio.md).
-
- ```js
- audioStreamManager.on('audioRendererChange', (AudioRendererChangeInfoArray) => {
- for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
- AudioRendererChangeInfo = AudioRendererChangeInfoArray[i];
- console.info('## RendererChange on is called for ' + i + ' ##');
- console.info('StreamId for ' + i + ' is:' + AudioRendererChangeInfo.streamId);
- console.info('ClientUid for ' + i + ' is:' + AudioRendererChangeInfo.clientUid);
- console.info('Content for ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.content);
- console.info('Stream for ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.usage);
- console.info('Flag ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.rendererFlags);
- console.info('State for ' + i + ' is:' + AudioRendererChangeInfo.rendererState);
- var devDescriptor = AudioRendererChangeInfo.deviceDescriptors;
- for (let j = 0; j < AudioRendererChangeInfo.deviceDescriptors.length; j++) {
- console.info('Id:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].id);
- console.info('Type:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceType);
- console.info('Role:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceRole);
- console.info('Name:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].name);
- console.info('Address:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].address);
- console.info('SampleRates:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]);
- console.info('ChannelCounts' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]);
- console.info('ChannelMask:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelMasks);
- }
- }
- });
- ```
-
-3. (Optional) Call **off('audioRendererChange')** to cancel listening for audio renderer changes.
-
- ```js
- audioStreamManager.off('audioRendererChange');
- console.info('######### RendererChange Off is called #########');
- ```
-
-4. (Optional) Call **on('audioCapturerChange')** to listen for audio capturer changes.
-
- If an application needs to receive notifications when the audio recording application status, audio recording client, or audio attribute changes, it can subscribe to this event. For more events that can be subscribed to, see [Audio Management](../reference/apis/js-apis-audio.md).
-
- ```js
- audioStreamManager.on('audioCapturerChange', (AudioCapturerChangeInfoArray) => {
- for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
- console.info(' ## audioCapturerChange on is called for element ' + i + ' ##');
- console.info('StreamId for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].streamId);
- console.info('ClientUid for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].clientUid);
- console.info('Source for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.source);
- console.info('Flag ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags);
- console.info('State for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerState);
- for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) {
- console.info('Id:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id);
- console.info('Type:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType);
- console.info('Role:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole);
- console.info('Name:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name);
- console.info('Address:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address);
- console.info('SampleRates:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]);
- console.info('ChannelCounts' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]);
- console.info('ChannelMask:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks);
- }
- }
- });
- ```
-
-5. (Optional) Call **off('audioCapturerChange')** to cancel listening for audio capturer changes.
-
- ```js
- audioStreamManager.off('audioCapturerChange');
- console.info('######### CapturerChange Off is called #########');
- ```
-
-6. (Optional) Call **getCurrentAudioRendererInfoArray()** to obtain information about the current audio renderer.
-
- This API can be used to obtain the unique ID of the audio stream, UID of the audio playback client, audio status, and other information about the audio player. Before calling this API, a third-party application must have the **ohos.permission.USE_BLUETOOTH** permission configured, for the device name and device address to be displayed correctly.
-
- ```js
- await audioStreamManager.getCurrentAudioRendererInfoArray().then( function (AudioRendererChangeInfoArray) {
- console.info('######### Get Promise is called ##########');
- if (AudioRendererChangeInfoArray != null) {
- for (let i = 0; i < AudioRendererChangeInfoArray.length; i++) {
- AudioRendererChangeInfo = AudioRendererChangeInfoArray[i];
- console.info('StreamId for ' + i +' is:' + AudioRendererChangeInfo.streamId);
- console.info('ClientUid for ' + i + ' is:' + AudioRendererChangeInfo.clientUid);
- console.info('Content ' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.content);
- console.info('Stream' + i +' is:' + AudioRendererChangeInfo.rendererInfo.usage);
- console.info('Flag' + i + ' is:' + AudioRendererChangeInfo.rendererInfo.rendererFlags);
- console.info('State for ' + i + ' is:' + AudioRendererChangeInfo.rendererState);
- var devDescriptor = AudioRendererChangeInfo.deviceDescriptors;
- for (let j = 0; j < AudioRendererChangeInfo.deviceDescriptors.length; j++) {
- console.info('Id:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].id);
- console.info('Type:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceType);
- console.info('Role:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].deviceRole);
- console.info('Name:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].name);
- console.info('Address:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].address);
- console.info('SampleRates:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].sampleRates[0]);
- console.info('ChannelCounts' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelCounts[0]);
- console.info('ChannelMask:' + i + ':' + AudioRendererChangeInfo.deviceDescriptors[j].channelMasks);
- }
- }
- }
- }).catch((err) => {
- console.log('getCurrentAudioRendererInfoArray :ERROR: ' + err.message);
- });
- ```
-
-7. (Optional) Call **getCurrentAudioCapturerInfoArray()** to obtain information about the current audio capturer.
- This API can be used to obtain the unique ID of the audio stream, UID of the audio recording client, audio status, and other information about the audio capturer. Before calling this API, a third-party application must have the **ohos.permission.USE_BLUETOOTH** permission configured, for the device name and device address to be displayed correctly.
-
- ```js
- await audioStreamManager.getCurrentAudioCapturerInfoArray().then( function (AudioCapturerChangeInfoArray) {
- console.info('getCurrentAudioCapturerInfoArray: **** Get Promise Called ****');
- if (AudioCapturerChangeInfoArray != null) {
- for (let i = 0; i < AudioCapturerChangeInfoArray.length; i++) {
- console.info('StreamId for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].streamId);
- console.info('ClientUid for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].clientUid);
- console.info('Source for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.source);
- console.info('Flag ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerInfo.capturerFlags);
- console.info('State for ' + i + 'is:' + AudioCapturerChangeInfoArray[i].capturerState);
- var devDescriptor = AudioCapturerChangeInfoArray[i].deviceDescriptors;
- for (let j = 0; j < AudioCapturerChangeInfoArray[i].deviceDescriptors.length; j++) {
- console.info('Id:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].id);
- console.info('Type:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceType);
- console.info('Role:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].deviceRole);
- console.info('Name:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].name)
- console.info('Address:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].address);
- console.info('SampleRates:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].sampleRates[0]);
- console.info('ChannelCounts' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelCounts[0]);
- console.info('ChannelMask:' + i + ':' + AudioCapturerChangeInfoArray[i].deviceDescriptors[j].channelMasks);
- }
- }
- }
- }).catch((err) => {
- console.log('getCurrentAudioCapturerInfoArray :ERROR: ' + err.message);
- });
- ```
diff --git a/en/application-dev/media/audio-volume-manager.md b/en/application-dev/media/audio-volume-manager.md
deleted file mode 100644
index 28ed3dcbc8709609d092a96065a70996b4f487b5..0000000000000000000000000000000000000000
--- a/en/application-dev/media/audio-volume-manager.md
+++ /dev/null
@@ -1,126 +0,0 @@
-# Volume Management Development
-
-## Overview
-
-The **AudioVolumeManager** module provides APIs for volume management. You can use the APIs to obtain the volume of a stream, listen for ringer mode changes, and mute a microphone.
-
-## Working Principles
-
-The figure below shows the common APIs provided by the **AudioVolumeManager** module.
-
-**Figure 1** Common APIs of AudioVolumeManager
-
-
-
-**AudioVolumeManager** provides the APIs for subscribing to system volume changes and obtaining the audio volume group manager (an **AudioVolumeGroupManager** instance). Before calling any API in **AudioVolumeGroupManager**, you must call **getVolumeGroupManager** to obtain an **AudioVolumeGroupManager** instance. You can use the APIs provided by **AudioVolumeGroupManager** to obtain the volume of a stream, mute a microphone, and listen for microphone state changes. For details, see [Audio Management](../reference/apis/js-apis-audio.md).
-
-## Constraints
-
-Before developing a microphone management application, configure the permission **ohos.permission.MICROPHONE** for the application. To set the microphone state, configure the permission **ohos.permission.MANAGE_AUDIO_CONFIG** (a system permission). For details, see [Permission Application Guide](../security/accesstoken-guidelines.md#declaring-permissions-in-the-configuration-file).
-
-## How to Develop
-
-For details about the APIs, see [AudioVolumeManager in Audio Management](../reference/apis/js-apis-audio.md#audiovolumemanager9)
-
-1. Obtain an **AudioVolumeGroupManager** instance.
-
- Before using an API in **AudioVolumeGroupManager**, you must use **getVolumeGroupManager()** to obtain an **AudioStreamManager** instance.
-
- ```js
- import audio from '@ohos.multimedia.audio';
- async loadVolumeGroupManager() {
- const groupid = audio.DEFAULT_VOLUME_GROUP_ID;
- var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid);
- console.error('audioVolumeGroupManager create success.');
- }
-
- ```
-
-2. (Optional) Obtain the volume information and ringer mode.
-
- To obtain the volume information of an audio stream (such as the ringtone, voice call, media, and voice assistant) or obtain the ringer mode (silent, vibration, or normal) of the current device, refer to the code below. For more details, see [Audio Management](../reference/apis/js-apis-audio.md).
-
- ```js
- import audio from '@ohos.multimedia.audio';
- async loadVolumeGroupManager() {
- const groupid = audio.DEFAULT_VOLUME_GROUP_ID;
- var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid);
- console.info('audioVolumeGroupManager create success.');
- }
-
- // Obtain the volume of a stream. The value ranges from 0 to 15.
- async getVolume() {
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.getVolume(audio.AudioVolumeType.MEDIA).then((value) => {
- console.info(`getVolume success and volume is: ${value}.`);
- });
- }
- // Obtain the minimum volume of a stream.
- async getMinVolume() {
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.getMinVolume(audio.AudioVolumeType.MEDIA).then((value) => {
- console.info(`getMinVolume success and volume is: ${value}.`);
- });
- }
- // Obtain the maximum volume of a stream.
- async getMaxVolume() {
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.getMaxVolume(audio.AudioVolumeType.MEDIA).then((value) => {
- console.info(`getMaxVolume success and volume is: ${value}.`);
- });
- }
- // Obtain the ringer mode in use: silent (0) | vibrate (1) | normal (2).
- async getRingerMode() {
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.getRingerMode().then((value) => {
- console.info(`getRingerMode success and RingerMode is: ${value}.`);
- });
- }
- ```
-
-3. (Optional) Obtain and set the microphone state, and subscribe to microphone state changes.
-
- To obtain and set the microphone state or subscribe to microphone state changes, refer to the following code:
-
- ```js
- import audio from '@ohos.multimedia.audio';
- async loadVolumeGroupManager() {
- const groupid = audio.DEFAULT_VOLUME_GROUP_ID;
- var audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid);
- console.info('audioVolumeGroupManager create success.');
- }
-
- async on() { // Subscribe to microphone state changes.
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.audioVolumeGroupManager.on('micStateChange', (micStateChange) => {
- console.info(`Current microphone status is: ${micStateChange.mute} `);
- });
- }
-
- async isMicrophoneMute() { // Check whether the microphone is muted.
- await audioVolumeGroupManager.audioVolumeGroupManager.isMicrophoneMute().then((value) => {
- console.info(`isMicrophoneMute is: ${value}.`);
- });
- }
-
- async setMicrophoneMuteTrue() { // Mute the microphone.
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.audioVolumeGroupManager.setMicrophoneMute(true).then(() => {
- console.info('setMicrophoneMute to mute.');
- });
- }
-
- async setMicrophoneMuteFalse() { // Unmute the microphone.
- await loadVolumeGroupManager();
- await audioVolumeGroupManager.audioVolumeGroupManager.setMicrophoneMute(false).then(() => {
- console.info('setMicrophoneMute to not mute.');
- });
- }
- async test(){ // Complete process: Subscribe to microphone state changes, obtain the microphone state, mute the microphone, obtain the microphone state, and then unmute the microphone.
- await on();
- await isMicrophoneMute();
- await setMicrophoneMuteTrue();
- await isMicrophoneMute();
- await setMicrophoneMuteFalse();
- }
- ```
diff --git a/en/application-dev/media/av-overview.md b/en/application-dev/media/av-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb0ea76dbfa90a3d3e3dd13e98ecf40876714310
--- /dev/null
+++ b/en/application-dev/media/av-overview.md
@@ -0,0 +1,66 @@
+# Audio and Video Overview
+
+You will learn how to use the audio and video APIs provided by the multimedia subsystem to develop a wealth of audio and video playback or recording scenarios. For example, you can use the **TonePlayer** class to implement simple prompt tones so that a drip sound is played upon the receipt of a new message, or use the **AVPlayer** class to develop a music player, which can loop a piece of music.
+
+For every functionality provided by the multimedia subsystem, you will learn multiple implementation modes, each of which corresponds to a specific usage scenario. You will also learn the sub-functionalities in these scenarios. For example, in the **Audio Playback** chapter, you will learn audio concurrency policies, volume management, and output device processing methods. All these will help you develop an application with more comprehensive features.
+
+This development guide applies only to audio and video playback and recording, which are implemented by the [@ohos.multimedia.audio](../reference/apis/js-apis-audio.md) and [@ohos.multimedia.media](../reference/apis/js-apis-media.md) modules. The UI, image processing, media storage, or other related capabilities are not covered.
+
+## Development Description
+
+Before developing an audio feature, especially before implementing audio data processing, you are advised to understand the following acoustic concepts. This will help you understand how the OpenHarmony APIs control the audio module and how to develop audio and video applications that are easier to use and deliver better experience.
+
+- Audio quantization process: sampling > quantization > encoding
+
+- Concepts related to audio quantization: analog signal, digital signal, sampling rate, audio channel, sample format, bit width, bit rate, common encoding formats (such as AAC, MP3, PCM, and WMA), and common encapsulation formats (such as WAV, MPA, FLAC, AAC, and OGG)
+
+Before developing features related to audio and video playback, you are advised to understand the following concepts:
+
+- Playback process: network protocol > container format > audio and video codec > graphics/audio rendering
+- Network protocols: HLS, HTTP, HTTPS, and more
+- Container formats: MP4, MKV, MPEG-TS, WebM, and more
+- Encoding formats: H.263/H.264/H.265, MPEG4/MPEG2, and more
+
+## Introduction to Audio Streams
+
+An audio stream is an independent audio data processing unit that has a specific audio format and audio usage scenario information. The audio stream can be used in playback and recording scenarios, and supports independent volume adjustment and audio device routing.
+
+The basic audio stream information is defined by [AudioStreamInfo](../reference/apis/js-apis-audio.md#audiostreaminfo8), which includes the sampling, audio channel, bit width, and encoding information. It describes the basic attributes of audio data and is mandatory for creating an audio playback or recording stream. To enable the audio module to correctly process audio data, the configured basic information must match the transmitted audio data.
+
+### Audio Stream Usage Scenario Information
+
+In addition to the basic information (which describes only audio data), an audio stream has usage scenario information. This is because audio streams differ in the volume, device routing, and concurrency policy. The system chooses an appropriate processing policy for an audio stream based on the usage scenario information, thereby delivering the optimal user experience.
+
+- Playback scenario
+
+Information about the audio playback scenario is defined by using [StreamUsage](../reference/apis/js-apis-audio.md#streamusage) and [ContentType](../reference/apis/js-apis-audio.md#contenttype).
+
+- **StreamUsage** specifies the usage type of an audio stream, for example, used for media, voice communication, voice assistant, notification, and ringtone.
+
+- **ContentType** specifies the content type of data in an audio stream, for example, speech, music, movie, notification tone, and ringtone.
+
+- Recording scenario
+
+Information about the audio stream recording scenario is defined by [SourceType](../reference/apis/js-apis-audio.md#sourcetype8).
+
+ **SourceType** specifies the recording source type of an audio stream, including the mic source, voice recognition source, and voice communication source.
+
+## Supported Audio Formats
+
+The APIs of the audio module support PCM encoding, including AudioRenderer, AudioCapturer, TonePlayer, and OpenSL ES.
+
+Be familiar with the following about the audio format:
+
+- The common audio sampling rates are supported: 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, and 96000, in units of Hz. For details, see [AudioSamplingRate](../reference/apis/js-apis-audio.md#audiosamplingrate8).
+
+The sampling rate varies according to the device type.
+
+- Mono and stereo are supported. For details, see [AudioChannel](../reference/apis/js-apis-audio.md#audiochannel8).
+
+- The following sampling formats are supported: U8 (unsigned 8-bit integer), S16LE (signed 16-bit integer, little endian), S24LE (signed 24-bit integer, little endian), S32LE (signed 32-bit integer, little endian), and F32LE (signed 32-bit floating point number, little endian). For details, see [AudioSampleFormat](../reference/apis/js-apis-audio.md#audiosampleformat8).
+
+Due to system restrictions, only some devices support the sampling formats S24LE, S32LE, and F32LE.
+
+ Little endian means that the most significant byte is stored at the largest memory address and the least significant byte of data is stored at the smallest. This storage mode effectively combines the memory address with the bit weight of the data. Specifically, the largest memory address has a high weight, and the smallest memory address has a low weight.
+
+The audio and video formats supported by the APIs of the media module are described in [AVPlayer and AVRecorder](avplayer-avrecorder-overview.md).
diff --git a/en/application-dev/media/avplayer-avrecorder-overview.md b/en/application-dev/media/avplayer-avrecorder-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..051ca3b66ce1839046a2e783a8c274c304625045
--- /dev/null
+++ b/en/application-dev/media/avplayer-avrecorder-overview.md
@@ -0,0 +1,148 @@
+# AVPlayer and AVRecorder
+
+The media module provides the [AVPlayer](#avplayer) and [AVRecorder](#avrecorder) class to implement audio and video playback and recording.
+
+## AVPlayer
+
+The AVPlayer transcodes audio and video media assets (such as MP4, MP3, MKV, and MPEG-TS) into renderable images and hearable audio analog signals, and plays the audio and video through output devices.
+
+The AVPlayer provides the integrated playback capability. This means that your application only needs to provide streaming media sources to implement media playback. It does not need to parse or decode data.
+
+
+### Audio Playback
+
+The figure below shows the interaction when the **AVPlayer** class is used to develop a music application.
+
+**Figure 1** Interaction with external modules for audio playback
+
+
+
+When a music application calls the **AVPlayer** APIs at the JS interface layer to implement audio playback, the player framework at the framework layer parses the media asset into audio data streams (in PCM format). The audio data streams are then decoded by software and output to the audio framework. The audio framework outputs the audio data streams to the audio HDI for rendering. A complete audio playback process requires the cooperation of the application, player framework, audio framework, and audio HDI.
+
+In Figure 1, the numbers indicate the process where data is transferred to external modules.
+
+1. The music application transfers the media asset to the **AVPlayer** instance.
+
+2. The player framework outputs the audio PCM data streams to the audio framework, which then outputs the data streams to the audio HDI.
+
+### Video Playback
+
+The figure below shows the interaction when the **AVPlayer** class is used to develop a video application.
+
+**Figure 2** Interaction with external modules for video playback
+
+
+
+When the video application calls the **AVPlayer** APIs at the JS interface layer to implement audio and video playback, the player framework at the framework layer parses the media asset into separate audio data streams and video data streams. The audio data streams are then decoded by software and output to the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. The video data streams are then decoded by hardware (recommended) or software and output to the graphic framework. The graphic framework outputs the video data streams to the display HDI at the hardware interface layer to implement graphics rendering.
+
+A complete video playback process requires the cooperation of the application, XComponent, player framework, graphic framework, audio framework, display HDI, and audio HDI.
+
+In Figure 2, the numbers indicate the process where data is transferred to external modules.
+
+1. The application obtains a window surface ID from the XComponent. For details about how to obtain the window surface ID, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md).
+
+2. The application transfers the media asset and surface ID to the **AVPlayer** instance.
+
+3. The player framework outputs the video elementary streams (ESs) to the decoding HDI to obtain video frames (NV12/NV21/RGBA).
+
+4. The player framework outputs the audio PCM data streams to the audio framework, which then outputs the data streams to the audio HDI.
+
+5. The player framework outputs the video frames (NV12/NV21/RGBA) to the graphic framework, which then outputs the video frames to the display HDI.
+
+### Supported Formats and Protocols
+
+Audio and video containers and codecs are domains specific to content creators. You are advised to use the mainstream playback formats, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system will not be affected by incompatibility issues. If such an issue occurs, you can exit playback.
+
+The table below lists the supported protocols.
+
+| Scenario| Description|
+| -------- | -------- |
+| Local VOD| The file descriptor is supported, but the file path is not.|
+| Network VoD| HTTP, HTTPS, and HLS are supported.|
+
+The table below lists the supported audio playback formats.
+
+| Audio Container Format| Description|
+| -------- | -------- |
+| M4A| Audio format: AAC|
+| AAC| Audio format: AAC|
+| MP3| Audio format: MP3|
+| OGG| Audio format: VORBIS |
+| WAV| Audio format: PCM |
+
+> **NOTE**
+>
+> The supported video formats are further classified into mandatory and optional ones. All vendors must support mandatory ones and can determine whether to implement optional ones based on their service requirements. You are advised to perform compatibility processing to ensure that all the application functions are compatible on different platforms.
+
+| Video Format| Mandatory or Not|
+| -------- | -------- |
+| H.264 | Yes|
+| MPEG-2 | No|
+| MPEG-4 | No|
+| H.263 | No|
+| VP8 | No|
+
+The table below lists the supported playback formats and mainstream resolutions.
+
+| Video Container Format| Description| Resolution|
+| -------- | -------- | -------- |
+| MP4| Video formats: H.264, MPEG-2, MPEG-4, and H.263
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p|
+| MKV| Video formats: H.264, MPEG-2, MPEG-4, and H.263
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p|
+| TS| Video formats: H.264, MPEG-2, and MPEG-4
Audio formats: AAC and MP3| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p|
+| WebM| Video format: VP8
Audio format: VORBIS| Mainstream resolutions, such as 4K, 1080p, 720p, 480p, and 270p|
+
+## AVRecorder
+
+The AVRecorder captures audio signals, receives video signals, encodes the audio and video signals, and saves them to files. With the AVRecorder, you can easily implement audio and video recording, including starting, pausing, resuming, and stopping recording, and releasing resources. You can also specify parameters such as the encoding format, encapsulation format, and file path for recording.
+
+**Figure 3** Interaction with external modules for video recording
+
+
+
+- Audio recording: When an application calls the **AVRecorder** APIs at the JS interface layer to implement audio recording, the player framework at the framework layer invokes the audio framework to capture audio data through the audio HDI. The audio data is then encoded by software and saved into a file.
+
+- Video recording: When an application calls the **AVRecorder** APIs at the JS interface layer to implement video recording, the camera framework is first invoked to capture image data. Through the video encoding HDI, the camera framework sends the data to the player framework at the framework layer. The player framework encodes the image data through the video HDI and saves the encoded image data into a file.
+
+With the AVRecorder, you can implement pure audio recording, pure video recording, and audio and video recording.
+
+In Figure 3, the numbers indicate the process where data is transferred to external modules.
+
+1. The application obtains a surface ID from the player framework through the **AVRecorder** instance.
+
+2. The application sets the surface ID for the camera framework, which obtains the surface corresponding to the surface ID. The camera framework captures image data through the video HDI and sends the data to the player framework at the framework layer.
+
+3. The camera framework transfers the video data to the player framework through the surface.
+
+4. The player framework encodes video data through the video HDI.
+
+5. The player framework sets the audio parameters for the audio framework and obtains the audio data from the audio framework.
+
+### Supported Formats
+
+The table below lists the supported audio sources.
+
+| Type| Description|
+| -------- | -------- |
+| mic | The system microphone is used as the audio source input.|
+
+The table below lists the supported video sources.
+
+| Type| Description |
+| -------- | -------- |
+| surface_yuv | The input surface carries raw data.|
+| surface_es | The input surface carries ES data.|
+
+The table below lists the supported audio and video encoding formats.
+
+| Encoding Format| Description |
+| -------- | -------- |
+| audio/mp4a-latm | Audio encoding format MP4A-LATM.|
+| video/mp4v-es | Video encoding format MPEG-4.|
+| video/avc | Video encoding format AVC.|
+
+The table below lists the supported output file formats.
+
+| Format| Description |
+| -------- | -------- |
+| MP4| Video container format MP4.|
+| M4A| Audio container format M4A.|
diff --git a/en/application-dev/media/avplayer-playback.md b/en/application-dev/media/avplayer-playback.md
deleted file mode 100644
index 324dd43e6f73d46e5f0d264ae81ba36802ee6021..0000000000000000000000000000000000000000
--- a/en/application-dev/media/avplayer-playback.md
+++ /dev/null
@@ -1,477 +0,0 @@
-# AVPlayer Development
-
-## Introduction
-
-The AVPlayer converts audio or video resources into audible analog signals or renderable images and plays the signals or images using output devices. You can manage playback tasks on the AVPlayer. For example, you can control the playback (start/pause/stop/seek), set the volume, obtain track information, and release resources.
-
-## Working Principles
-
-The following figures show the [AVPlayer state](../reference/apis/js-apis-media.md#avplayerstate9) transition and interaction with external audio and video playback modules.
-
-**Figure 1** AVPlayer state transition
-
-
-
-**Figure 2** Interaction with external modules for audio playback
-
-
-
-**NOTE**: When an application calls the **AVPlayer** JS APIs at the JS interface layer to implement a feature, the framework layer parses the resources into audio data streams through the playback service of the player framework. The audio data streams are then decoded by software and output to the audio service of the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. A complete audio playback process requires the cooperation of the application (application adaptation required), player framework, audio framework, and audio HDI (driver adaptation required).
-
-1. An application passes a URL into the **AVPlayer** JS API.
-2. The playback service outputs the audio PCM data streams to the audio service, and the audio service outputs the data streams to the audio HDI.
-
-
-**Figure 3** Interaction with external modules for video playback
-
-
-
-**NOTE**: When an application calls the **AVPlayer** JS APIs at the JS interface layer to implement a feature, the framework layer parses the resources into separate audio data streams and video data streams through the playback service of the player framework. The audio data streams are then decoded by software and output to the audio service of the audio framework. The audio framework outputs the audio data streams to the audio HDI at the hardware interface layer to implement audio playback. The video data streams are then decoded by hardware (recommended) or software and output to the renderer service of the graphic framework. The renderer service outputs the video data streams to the display HDI at the hardware interface layer. A complete video playback process requires the cooperation of the application (application adaptation required), XComponent, player framework, graphic framework, audio framework, display HDI (driver adaptation required), and audio HDI (driver adaptation required).
-
-1. An application obtains the surface ID from the XComponent. For details about the obtaining method, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md).
-2. The application passes a URL and the surface ID into the **AVPlayer** JS API.
-3. The playback service outputs video elementary streams (ESs) to the codec HDI, which decodes the ESs to obtain video frames (NV12/NV21/RGBA).
-4. The playback service outputs the audio PCM data streams to the audio service, and the audio service outputs the data streams to the audio HDI.
-5. The playback service outputs video frames (NV12/NV21/RGBA) to the renderer service, and the renderer service outputs the video frames to the display HDI.
-
-## Compatibility
-
-Use the mainstream playback formats and resolutions, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system will not be affected by incompatibility issues. If such an issue occurs, you can exit stream playback.
-
-The table below lists the mainstream playback formats and resolutions.
-
-| Video Container Format| Description | Resolution |
-| :----------: | :-----------------------------------------------: | :--------------------------------: |
-| mp4 | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| mkv | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| ts | Video format: H.264/MPEG-2/MPEG-4; audio format: AAC/MP3 | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| webm | Video format: VP8; audio format: VORBIS | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-
-| Audio Container Format | Description |
-| :----------: | :----------: |
-| m4a | Audio format: AAC|
-| aac | Audio format: AAC|
-| mp3 | Audio format: MP3|
-| ogg | Audio format: VORBIS |
-| wav | Audio format: PCM |
-
-## How to Develop
-
-For details about the APIs, see the [AVPlayer APIs in the Media Class](../reference/apis/js-apis-media.md#avplayer9).
-
-### Full-Process Scenario
-
-The full playback process includes creating an instance, setting resources, setting a video window, preparing for playback, controlling playback, and resetting or releasing the resources. (During the preparation, you can obtain track information, volume, speed, focus mode, and zoom mode, and set bit rates. To control the playback, you can start, pause, and stop the playback, seek to a playback position, and set the volume.)
-
-1. Call [createAVPlayer()](../reference/apis/js-apis-media.md#mediacreateavplayer9) to create an **AVPlayer** instance. The AVPlayer is initialized to the [idle](#avplayer_state) state.
-
-2. Set the events to listen for, which will be used in the full-process scenario.
-
-3. Set the resource [URL](../reference/apis/js-apis-media.md#avplayer_attributes). When the AVPlayer enters the [initialized](#avplayer_state) state, you can set the [surface ID](../reference/apis/js-apis-media.md#avplayer_attributes) for the video window. For details about the supported specifications, see [AVPlayer Attributes](../reference/apis/js-apis-media.md#avplayer_attributes).
-
-4. Call [prepare()](../reference/apis/js-apis-media.md#avplayer_prepare) to switch the AVPlayer to the [prepared](#avplayer_state) state.
-
-5. Perform video playback control. For example, you can call [play()](../reference/apis/js-apis-media.md#avplayer_play), [pause()](../reference/apis/js-apis-media.md#avplayer_pause), [seek()](../reference/apis/js-apis-media.md#avplayer_seek), and [stop()](../reference/apis/js-apis-media.md#avplayer_stop) to control the playback.
-
-6. Call [reset()](../reference/apis/js-apis-media.md#avplayer_reset) to reset resources. The AVPlayer enters the [idle](#avplayer_state) state again, and you can change the resource [URL](../reference/apis/js-apis-media.md#avplayer_attributes).
-
-7. Call [release()](../reference/apis/js-apis-media.md#avplayer_release) to release the instance. The AVPlayer enters the [released](#avplayer_state) state and exits the playback.
-
-> **NOTE**
->
-> When the AVPlayer is in the prepared, playing, paused, or completed state, the playback engine is working and a large amount of system running memory is occupied. If your application does not need to use the AVPlayer, call **reset()** or **release()** to release the resources.
-
-### Listening Events
-
-| Event Type | Description |
-| ------------------------------------------------- | ------------------------------------------------------------ |
-| stateChange | Mandatory; used to listen for player state changes. |
-| error | Mandatory; used to listen for player error information. |
-| durationUpdate | Used to listen for progress bar updates to refresh the resource duration. |
-| timeUpdate | Used to listen for the current position of the progress bar to refresh the current time. |
-| seekDone | Used to listen for the completion status of the **seek()** request. |
-| speedDone | Used to listen for the completion status of the **setSpeed()** request. |
-| volumeChange | Used to listen for the completion status of the **setVolume()** request. |
-| bitrateDone | Used to listen for the completion status of the **setBitrate()** request, which is used for HTTP Live Streaming (HLS) streams. |
-| availableBitrates | Used to listen for available bit rates of HLS resources. The available bit rates are provided for **setBitrate()**. |
-| bufferingUpdate | Used to listen for network playback buffer information. |
-| startRenderFrame | Used to listen for the rendering time of the first frame during video playback. |
-| videoSizeChange | Used to listen for the width and height of video playback and adjust the window size and ratio.|
-| audioInterrupt | Used to listen for audio interruption during video playback. This event is used together with the **audioInterruptMode** attribute.|
-
-### Full-Process Scenario API Example
-
-```js
-import media from '@ohos.multimedia.media'
-import audio from '@ohos.multimedia.audio';
-import fs from '@ohos.file.fs'
-
-const TAG = 'AVPlayerDemo:'
-export class AVPlayerDemo {
- private count:number = 0
- private avPlayer
- private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API.
-
- // Set AVPlayer callback functions.
- setAVPlayerCallback() {
- // Callback function for state changes.
- this.avPlayer.on('stateChange', async (state, reason) => {
- switch (state) {
- case 'idle': // This state is reported upon a successful callback of reset().
- console.info(TAG + 'state idle called')
- this.avPlayer.release() // Release the AVPlayer instance.
- break;
- case 'initialized': // This state is reported when the AVPlayer sets the playback source.
- console.info(TAG + 'state initialized called ')
- this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played.
- this.avPlayer.prepare().then(() => {
- console.info(TAG+ 'prepare success');
- }, (err) => {
- console.error(TAG + 'prepare filed,error message is :' + err.message)
- })
- break;
- case 'prepared': // This state is reported upon a successful callback of prepare().
- console.info(TAG + 'state prepared called')
- this.avPlayer.play() // Call play() to start playback.
- break;
- case 'playing': // This state is reported upon a successful callback of play().
- console.info(TAG + 'state playing called')
- if (this.count == 0) {
- this.avPlayer.pause() // Call pause() to pause the playback.
- } else {
- this.avPlayer.seek(10000, media.SeekMode.SEEK_PREV_SYNC) // Seek to 10 seconds. The seekDone callback is triggered.
- }
- break;
- case 'paused': // This state is reported upon a successful callback of pause().
- console.info(TAG + 'state paused called')
- if (this.count == 0) {
- this.count++
- this.avPlayer.play() // Call play() to continue the playback.
- }
- break;
- case 'completed': // This state is reported upon the completion of the playback.
- console.info(TAG + 'state completed called')
- this.avPlayer.stop() // Call stop() to stop the playback.
- break;
- case 'stopped': // This state is reported upon a successful callback of stop().
- console.info(TAG + 'state stopped called')
- this.avPlayer.reset() // Call reset() to initialize the AVPlayer state.
- break;
- case 'released':
- console.info(TAG + 'state released called')
- break;
- case 'error':
- console.info(TAG + 'state error called')
- break;
- default:
- console.info(TAG + 'unkown state :' + state)
- break;
- }
- })
- // Callback function for time updates.
- this.avPlayer.on('timeUpdate', (time:number) => {
- console.info(TAG + 'timeUpdate success,and new time is :' + time)
- })
- // Callback function for volume updates.
- this.avPlayer.on('volumeChange', (vol:number) => {
- console.info(TAG + 'volumeChange success,and new volume is :' + vol)
- this.avPlayer.setSpeed(media.AVPlayerSpeed.SPEED_FORWARD_2_00_X) // Double the playback speed. The speedDone callback is triggered.
- })
- // Callback function for the video playback completion event.
- this.avPlayer.on('endOfStream', () => {
- console.info(TAG + 'endOfStream success')
- })
- // Callback function for the seek operation.
- this.avPlayer.on('seekDone', (seekDoneTime:number) => {
- console.info(TAG + 'seekDone success,and seek time is:' + seekDoneTime)
- this.avPlayer.setVolume(0.5) // Set the volume to 0.5. The volumeChange callback is triggered.
- })
- // Callback function for the speed setting operation.
- this.avPlayer.on('speedDone', (speed:number) => {
- console.info(TAG + 'speedDone success,and speed value is:' + speed)
- })
- // Callback function for successful bit rate setting.
- this.avPlayer.on('bitrateDone', (bitrate:number) => {
- console.info(TAG + 'bitrateDone success,and bitrate value is:' + bitrate)
- })
- // Callback function for buffering updates.
- this.avPlayer.on('bufferingUpdate', (infoType: media.BufferingInfoType, value: number) => {
- console.info(TAG + 'bufferingUpdate success,and infoType value is:' + infoType + ', value is :' + value)
- })
- // Callback function invoked when frame rendering starts.
- this.avPlayer.on('startRenderFrame', () => {
- console.info(TAG + 'startRenderFrame success')
- })
- // Callback function for video width and height changes.
- this.avPlayer.on('videoSizeChange', (width: number, height: number) => {
- console.info(TAG + 'videoSizeChange success,and width is:' + width + ', height is :' + height)
- })
- // Callback function for the audio interruption event.
- this.avPlayer.on('audioInterrupt', (info: audio.InterruptEvent) => {
- console.info(TAG + 'audioInterrupt success,and InterruptEvent info is:' + info)
- })
- // Callback function to report the available bit rates of HLS.
- this.avPlayer.on('availableBitrates', (bitrates: Array) => {
- console.info(TAG + 'availableBitrates success,and availableBitrates length is:' + bitrates.length)
- })
- }
-
- async avPlayerDemo() {
- // Create an AVPlayer instance.
- this.avPlayer = await media.createAVPlayer()
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command.
- let path = pathDir + '/H264_AAC.mp4'
- let file = await fs.open(path)
- fdPath = fdPath + '' + file.fd
- this.avPlayer.url = fdPath
- }
-}
-```
-
-### Normal Playback Scenario
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-const TAG = 'AVPlayerDemo:'
-export class AVPlayerDemo {
- private avPlayer
- private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API.
-
- // Set AVPlayer callback functions.
- setAVPlayerCallback() {
- // Callback function for state changes.
- this.avPlayer.on('stateChange', async (state, reason) => {
- switch (state) {
- case 'idle': // This state is reported upon a successful callback of reset().
- console.info(TAG + 'state idle called')
- break;
- case 'initialized': // This state is reported when the AVPlayer sets the playback source.
- console.info(TAG + 'state initialized called ')
- this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played.
- this.avPlayer.prepare().then(() => {
- console.info(TAG+ 'prepare success');
- }, (err) => {
- console.error(TAG + 'prepare filed,error message is :' + err.message)
- })
- break;
- case 'prepared': // This state is reported upon a successful callback of prepare().
- console.info(TAG + 'state prepared called')
- this.avPlayer.play() // Call play() to start playback.
- break;
- case 'playing': // This state is reported upon a successful callback of play().
- console.info(TAG + 'state playing called')
- break;
- case 'paused': // This state is reported upon a successful callback of pause().
- console.info(TAG + 'state paused called')
- break;
- case 'completed': // This state is reported upon the completion of the playback.
- console.info(TAG + 'state completed called')
- this.avPlayer.stop() // Call stop() to stop the playback.
- break;
- case 'stopped': // This state is reported upon a successful callback of stop().
- console.info(TAG + 'state stopped called')
- this.avPlayer.release() // Call reset() to initialize the AVPlayer state.
- break;
- case 'released':
- console.info(TAG + 'state released called')
- break;
- case 'error':
- console.info(TAG + 'state error called')
- break;
- default:
- console.info(TAG + 'unkown state :' + state)
- break;
- }
- })
- }
-
- async avPlayerDemo() {
- // Create an AVPlayer instance.
- this.avPlayer = await media.createAVPlayer()
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command.
- let path = pathDir + '/H264_AAC.mp4'
- let file = await fs.open(path)
- fdPath = fdPath + '' + file.fd
- this.avPlayer.url = fdPath
- }
-}
-```
-
-### Looping a Song
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-const TAG = 'AVPlayerDemo:'
-export class AVPlayerDemo {
- private count:number = 0
- private avPlayer
- private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API.
-
- // Set AVPlayer callback functions.
- setAVPlayerCallback() {
- // Callback function for state changes.
- this.avPlayer.on('stateChange', async (state, reason) => {
- switch (state) {
- case 'idle': // This state is reported upon a successful callback of reset().
- console.info(TAG + 'state idle called')
- break;
- case 'initialized': // This state is reported when the AVPlayer sets the playback source.
- console.info(TAG + 'state initialized called ')
- this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played.
- this.avPlayer.prepare().then(() => {
- console.info(TAG+ 'prepare success');
- }, (err) => {
- console.error(TAG + 'prepare filed,error message is :' + err.message)
- })
- break;
- case 'prepared': // This state is reported upon a successful callback of prepare().
- console.info(TAG + 'state prepared called')
- this.avPlayer.loop = true // Set the AVPlayer to loop a single item. The endOfStream callback is triggered when the previous round of the playback is complete.
- this.avPlayer.play() // Call play() to start playback.
- break;
- case 'playing': // This state is reported upon a successful callback of play().
- console.info(TAG + 'state playing called')
- break;
- case 'paused': // This state is reported upon a successful callback of pause().
- console.info(TAG + 'state paused called')
- break;
- case 'completed': // This state is reported upon the completion of the playback.
- console.info(TAG + 'state completed called')
- // Cancel the loop playback when the endOfStream callback is triggered for the second time. The completed state is reported when the next round of the playback is complete.
- this.avPlayer.stop() // Call stop() to stop the playback.
- break;
- case 'stopped': // This state is reported upon a successful callback of stop().
- console.info(TAG + 'state stopped called')
- this.avPlayer.release() // Call reset() to initialize the AVPlayer state.
- break;
- case 'released':
- console.info(TAG + 'state released called')
- break;
- case 'error':
- console.info(TAG + 'state error called')
- break;
- default:
- console.info(TAG + 'unkown state :' + state)
- break;
- }
- })
- // Callback function for the video playback completion event.
- this.avPlayer.on('endOfStream', () => {
- console.info(TAG + 'endOfStream success')
- if (this.count == 1) {
- this.avPlayer.loop = false // Cancel loop playback.
- } else {
- this.count++
- }
- })
- }
-
- async avPlayerDemo() {
- // Create an AVPlayer instance.
- this.avPlayer = await media.createAVPlayer()
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command.
- let path = pathDir + '/H264_AAC.mp4'
- let file = await fs.open(path)
- fdPath = fdPath + '' + file.fd
- this.avPlayer.url = fdPath
- }
-}
-```
-### Switching to the Next Video Clip
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-
-const TAG = 'AVPlayerDemo:'
-export class AVPlayerDemo {
- private count:number = 0
- private avPlayer
- private surfaceID:string // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API.
-
- async nextVideo() {
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_MP3.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command.
- let path = pathDir + '/H264_MP3.mp4'
- let file = await fs.open(path)
- fdPath = fdPath + '' + file.fd
- this.avPlayer.url = fdPath // The initialized state is reported again.
- }
-
- // Set AVPlayer callback functions.
- setAVPlayerCallback() {
- // Callback function for state changes.
- this.avPlayer.on('stateChange', async (state, reason) => {
- switch (state) {
- case 'idle': // This state is reported upon a successful callback of reset().
- console.info(TAG + 'state idle called')
- await this.nextVideo() // Switch to the next video.
- break;
- case 'initialized': // This state is reported when the AVPlayer sets the playback source.
- console.info(TAG + 'state initialized called ')
- this.avPlayer.surfaceId = this.surfaceID // Set the image to be displayed. This setting is not required when a pure audio resource is to be played.
- this.avPlayer.prepare().then(() => {
- console.info(TAG+ 'prepare success');
- }, (err) => {
- console.error(TAG + 'prepare filed,error message is :' + err.message)
- })
- break;
- case 'prepared': // This state is reported upon a successful callback of prepare().
- console.info(TAG + 'state prepared called')
- this.avPlayer.play() // Call play() to start playback.
- break;
- case 'playing': // This state is reported upon a successful callback of play().
- console.info(TAG + 'state playing called')
- break;
- case 'paused': // This state is reported upon a successful callback of pause().
- console.info(TAG + 'state paused called')
- break;
- case 'completed': // This state is reported upon the completion of the playback.
- console.info(TAG + 'state completed called')
- if (this.count == 0) {
- this.count++
- this.avPlayer.reset() // Call reset() to prepare for switching to the next video.
- } else {
- this.avPlayer.release() // Release the AVPlayer instance when the new video finishes playing.
- }
- break;
- case 'stopped': // This state is reported upon a successful callback of stop().
- console.info(TAG + 'state stopped called')
- break;
- case 'released':
- console.info(TAG + 'state released called')
- break;
- case 'error':
- console.info(TAG + 'state error called')
- break;
- default:
- console.info(TAG + 'unkown state :' + state)
- break;
- }
- })
- }
-
- async avPlayerDemo() {
- // Create an AVPlayer instance.
- this.avPlayer = await media.createAVPlayer()
- let fdPath = 'fd://'
- let pathDir = "/data/storage/el2/base/haps/entry/files" // The path used here is an example. Obtain the path based on project requirements.
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el2/100/base/ohos.acts.multimedia.media.avplayer/haps/entry/files" command.
- let path = pathDir + '/H264_AAC.mp4'
- let file = await fs.open(path)
- fdPath = fdPath + '' + file.fd
- this.avPlayer.url = fdPath
- }
-}
-```
diff --git a/en/application-dev/media/avrecorder.md b/en/application-dev/media/avrecorder.md
deleted file mode 100644
index 9214df032d7d060cabe9900e8a0d5ab6e7aa12f9..0000000000000000000000000000000000000000
--- a/en/application-dev/media/avrecorder.md
+++ /dev/null
@@ -1,488 +0,0 @@
-# AVRecorder Development
-
-## Introduction
-
-The AVRecorder captures audio signals, receives video signals, encodes audio and video signals, and saves them to files. With the AVRecorder, you can easily implement audio and video recording, including starting, pausing, resuming, and stopping recording, and releasing resources. You can also specify parameters such as the encoding format, encapsulation format, and file path for recording.
-
-## Working Principles
-
-The following figures show the AVRecorder state transition and the interaction with external modules for audio and video recording.
-
-**Figure 1** AVRecorder state transition
-
-
-
-**Figure 2** Interaction between external modules for audio and video recording
-
-
-
-**NOTE**: During audio recording, the framework layer calls the audio subsystem through the media service of the native framework to capture audio data through the audio HDI, encodes and encapsulates the data by using software, and saves the data to a file. During video recording, the camera subsystem captures image data through the video HDI. The media service encodes the image data through the video encoding HDI and encapsulates the encoded image data into a file. With the AVRecorder, you can implement pure audio recording, pure video recording, and audio and video recording.
-
-## Constraints
-
-Before developing the recording feature, configure permissions for your application. If audio recording is involved, obtain the permission **ohos.permission.MICROPHONE** by following the instructions provided in [Permission Application Guide](../security/accesstoken-guidelines.md).
-
-To use the camera to record videos, the camera module is required. For details about how to use the APIs and obtain permissions, see [Camera Management](../reference/apis/js-apis-camera.md).
-
-## How to Develop
-
-For details about the AVRecorder APIs, see the [AVRecorder APIs in the Media Class](../reference/apis/js-apis-media.md#avrecorder9).
-
-For details about the processes related to the media library, see [Media Library Management](../reference/apis/js-apis-medialibrary.md).
-
-For details about the camera-related process, see [Camera Management](../reference/apis/js-apis-camera.md).
-
-### Full-Process Scenario of Audio and Video Recording
-
-The full audio and video recording process includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources.
-
-The value range that can be set for the audio recording parameters is restricted by the codec performance of the device and the performance of the audio subsystem.
-
-The video range that can be set for the video recording parameters is restricted by the codec performance of the device and the performance of the camera subsystem.
-
-```
-import media from '@ohos.multimedia.media'
-import camera from '@ohos.multimedia.camera'
-import mediaLibrary from '@ohos.multimedia.mediaLibrary'
-
-export class AVRecorderDemo {
- private testFdNumber; // Used to save the File Descriptor (FD) address.
-
- // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA.
- async getFd(fileName) {
- // For details about the implementation mode, see the media library documentation.
- this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54
- }
-
- // Error callback triggered in the case of an error in the promise mode.
- failureCallback(error) {
- console.info('error happened, error message is ' + error.message);
- }
-
- // Error callback triggered in the case of an exception in the promise mode.
- catchCallback(error) {
- console.info('catch error happened, error message is ' + error.message);
- }
-
- async AVRecorderDemo() {
- let AVRecorder; // Assign a value to the empty AVRecorder instance upon a successful call of createAVRecorder().
- let surfaceID; // The surface ID is obtained by calling getInputSurface and transferred to the videoOutput object of the camera.
- await this.getFd('01.mp4');
-
- // Configure the parameters related to audio and video recording based on those supported by the hardware device.
- let avProfile = {
- audioBitrate : 48000,
- audioChannels : 2,
- audioCodec : media.CodecMimeType.AUDIO_AAC,
- audioSampleRate : 48000,
- fileFormat : media.ContainerFormatType.CFT_MPEG_4,
- videoBitrate : 2000000,
- videoCodec : media.CodecMimeType.VIDEO_MPEG4,
- videoFrameWidth : 640,
- videoFrameHeight : 480,
- videoFrameRate : 30
- }
- let avConfig = {
- audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
- videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
- profile : avProfile,
- url : 'fd://',
- rotation : 0,
- location : { latitude : 30, longitude : 130 }
- }
-
- // Create an AVRecorder instance.
- await media.createAVRecorder().then((recorder) => {
- console.info('case createAVRecorder called');
- if (typeof (recorder) != 'undefined') {
- AVRecorder = recorder;
- console.info('createAVRecorder success');
- } else {
- console.info('createAVRecorder failed');
- }
- }, this.failureCallback).catch(this.catchCallback);
-
- // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors.
- AVRecorder.on('stateChange', async (state, reason) => {
- console.info('case state has changed, new state is :' + state);
- switch (state) {
- // Your can set the desired behavior in different states as required.
- case 'idle':
- // This state is reported upon a successful call of rest() or create().
- break;
- case 'prepared':
- // This state is reported upon a successful call of prepare().
- break;
- case 'started':
- // This state is reported upon a successful call of start().
- break;
- case 'paused':
- // This state is reported upon a successful call of pause().
- break;
- case 'stopped':
- // This state is reported upon a successful call of stop().
- break;
- case 'released':
- // This state is reported upon a successful call of release().
- break;
- case 'error':
- // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create an AVRecorder instance again.
- break;
- default:
- console.info('case state is unknown');
- }
- });
- AVRecorder.on('error', (err) => {
- // Listen for non-interface errors.
- console.info('case avRecorder.on(error) called, errMessage is ' + err.message);
- });
-
- // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare().
- await AVRecorder.prepare(avConfig).then(() => {
- console.info('prepare success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // If video recording is involved, call getInputSurface to obtain the input surface and pass the returned surface ID to the related camera API.
- await AVRecorder.getInputSurface().then((surface) => {
- console.info('getInputSurface success');
- surfaceID = surface; // The surfaceID is passed into createVideoOutput() of the camera as an input parameter.
- }, this.failureCallback).catch(this.catchCallback);
-
- // Video recording depends on camera-related APIs. The following operations can be performed only after the video output start API is invoked.
- // Start video recording.
- await AVRecorder.start().then(() => {
- console.info('start success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Pause video recording before the video output stop API of the camera is invoked.
- await AVRecorder.pause().then(() => {
- console.info('pause success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Resume video recording after the video output start API of the camera is invoked.
- await AVRecorder.resume().then(() => {
- console.info('resume success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Stop video recording after the video output stop API of the camera is invoked.
- await AVRecorder.stop().then(() => {
- console.info('stop success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Reset the recording configuration.
- await AVRecorder.reset().then(() => {
- console.info('reset success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off().
- AVRecorder.off('stateChange');
- AVRecorder.off('error');
-
- // Release the video recording resources and camera object resources.
- await AVRecorder.release().then(() => {
- console.info('release success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Set the AVRecorder instance to null.
- AVRecorder = undefined;
- surfaceID = undefined;
- }
-}
-```
-
-### Full-Process Scenario of Pure Audio Recording
-
-The full audio recording process includes creating an instance, setting recording parameters, starting, pausing, resuming, and stopping recording, and releasing resources.
-
-The value range that can be set for the audio recording parameters is restricted by the codec performance of the device and the performance of the audio subsystem.
-
-```
-import media from '@ohos.multimedia.media'
-import mediaLibrary from '@ohos.multimedia.mediaLibrary'
-
-export class AudioRecorderDemo {
- private testFdNumber; // Used to save the FD address.
-
- // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA.
- async getFd(fileName) {
- // For details about the implementation mode, see the media library documentation.
- this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54
- }
-
- // Error callback triggered in the case of an error in the promise mode.
- failureCallback(error) {
- console.info('error happened, error message is ' + error.message);
- }
-
- // Error callback triggered in the case of an exception in the promise mode.
- catchCallback(error) {
- console.info('catch error happened, error message is ' + error.message);
- }
-
- async audioRecorderDemo() {
- let audioRecorder; // Assign a value to the empty AudioRecorder instance upon a successful call of createAVRecorder().
- await this.getFd('01.m4a');
- // Configure the parameters related to audio recording.
- let audioProfile = {
- audioBitrate : 48000,
- audioChannels : 2,
- audioCodec : media.CodecMimeType.AUDIO_AAC,
- audioSampleRate : 48000,
- fileFormat : media.ContainerFormatType.CFT_MPEG_4,
- }
- let audioConfig = {
- audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
- profile : audioProfile,
- url : this.testFdNumber,
- rotation : 0,
- location : { latitude : 30, longitude : 130 }
- }
-
- // Create an AudioRecorder instance.
- await media.createAVRecorder().then((recorder) => {
- console.info('case createAVRecorder called');
- if (typeof (recorder) != 'undefined') {
- audioRecorder = recorder;
- console.info('createAudioRecorder success');
- } else {
- console.info('createAudioRecorder failed');
- }
- }, this.failureCallback).catch(this.catchCallback);
-
- // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors.
- audioRecorder.on('stateChange', async (state, reason) => {
- console.info('case state has changed, new state is :' + state);
- switch (state) {
- // Your can set the desired behavior in different states as required.
- case 'idle':
- // This state is reported upon a successful call of rest() or create().
- break;
- case 'prepared':
- // This state is reported upon a successful call of prepare().
- break;
- case 'started':
- // This state is reported upon a successful call of start().
- break;
- case 'paused':
- // This state is reported upon a successful call of pause().
- break;
- case 'stopped':
- // This state is reported upon a successful call of stop().
- break;
- case 'released':
- // This state is reported upon a successful call of release().
- break;
- case 'error':
- // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create an AudioRecorder instance again.
- break;
- default:
- console.info('case state is unknown');
- }
- });
- audioRecorder.on('error', (err) => {
- // Listen for non-interface errors.
- console.info('case avRecorder.on(error) called, errMessage is ' + err.message);
- });
-
- // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare().
- await audioRecorder.prepare(audioConfig).then(() => {
- console.info('prepare success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Call start() to start audio recording.
- await audioRecorder.start().then(() => {
- console.info('start success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Call pause() to pause audio recording.
- await audioRecorder.pause().then(() => {
- console.info('pause success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Call resume() to resume audio recording.
- await audioRecorder.resume().then(() => {
- console.info('resume success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Call stop() to stop audio recording.
- await audioRecorder.stop().then(() => {
- console.info('stop success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Call reset() to reset the recording configuration.
- await audioRecorder.reset().then(() => {
- console.info('reset success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off().
- avRecorder.off('stateChange');
- avRecorder.off('error');
-
- // Call release() to release audio recording resources.
- await audioRecorder.release().then(() => {
- console.info('release success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Set the AudioRecorder instance to null.
- audioRecorder = undefined;
- }
-}
-
-```
-
-### Full-Process Scenario of Pure Video Recording
-
-The full video recording process includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources.
-
-The video range that can be set for the video recording parameters is restricted by the codec performance of the device and the performance of the camera subsystem.
-
-```
-import media from '@ohos.multimedia.media'
-import camera from '@ohos.multimedia.camera'
-import mediaLibrary from '@ohos.multimedia.mediaLibrary'
-
-export class VideoRecorderDemo {
- private testFdNumber; // Used to save the FD address.
-
- // Obtain the FD corresponding to fileName of the recorded file. The media library capability is required. To use the media library, configure the following permissions: ohos.permission.MEDIA_LOCATION, ohos.permission.WRITE_MEDIA, and ohos.permission.READ_MEDIA.
- async getFd(fileName) {
- // For details about the implementation mode, see the media library documentation.
- this.testFdNumber = "fd://" + fdNumber.toString(); // e.g. fd://54
- }
-
- // Error callback triggered in the case of an error in the promise mode.
- failureCallback(error) {
- console.info('error happened, error message is ' + error.message);
- }
-
- // Error callback triggered in the case of an exception in the promise mode.
- catchCallback(error) {
- console.info('catch error happened, error message is ' + error.message);
- }
-
- async videoRecorderDemo() {
- let videoRecorder; // Assign a value to the empty VideoRecorder instance upon a successful call of createAVRecorder().
- let surfaceID; // The surface ID is obtained by calling getInputSurface and transferred to the videoOutput object of the camera.
- await this.getFd('01.mp4');
-
- // Configure the parameters related to pure video recording based on those supported by the hardware device.
- let videoProfile = {
- fileFormat : media.ContainerFormatType.CFT_MPEG_4,
- videoBitrate : 2000000,
- videoCodec : media.CodecMimeType.VIDEO_MPEG4,
- videoFrameWidth : 640,
- videoFrameHeight : 480,
- videoFrameRate : 30
- }
- let videoConfig = {
- videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
- profile : videoProfile,
- url : 'fd://',
- rotation : 0,
- location : { latitude : 30, longitude : 130 }
- }
-
- // Create a VideoRecorder instance.
- await media.createAVRecorder().then((recorder) => {
- console.info('case createVideoRecorder called');
- if (typeof (recorder) != 'undefined') {
- videoRecorder = recorder;
- console.info('createVideoRecorder success');
- } else {
- console.info('createVideoRecorder failed');
- }
- }, this.failureCallback).catch(this.catchCallback);
-
- // After the instance is created, use the on('stateChange') and on('error') callbacks to listen for state changes and errors.
- videoRecorder.on('stateChange', async (state, reason) => {
- console.info('case state has changed, new state is :' + state);
- switch (state) {
- // Your can set the desired behavior in different states as required.
- case 'idle':
- // This state is reported upon a successful call of rest() or create().
- break;
- case 'prepared':
- // This state is reported upon a successful call of prepare().
- break;
- case 'started':
- // This state is reported upon a successful call of start().
- break;
- case 'paused':
- // This state is reported upon a successful call of pause().
- break;
- case 'stopped':
- // This state is reported upon a successful call of stop().
- break;
- case 'released':
- // This state is reported upon a successful call of release().
- break;
- case 'error':
- // The error state indicates that an error occurs at the bottom layer. You must rectify the fault and create a VideoRecorder instance again.
- break;
- default:
- console.info('case state is unknown');
- }
- });
- videoRecorder.on('error', (err) => {
- // Listen for non-interface errors.
- console.info('case avRecorder.on(error) called, errMessage is ' + err.message);
- });
-
- // Call prepare() to prepare for recording. The bottom layer determines whether to record audio, video, or audio and video based on the input parameters of prepare().
- await videoRecorder.prepare(videoConfig).then(() => {
- console.info('prepare success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // If video recording is involved, call getInputSurface to obtain the input surface and pass the returned surface ID to the related camera API.
- await videoRecorder.getInputSurface().then((surface) => {
- console.info('getInputSurface success');
- surfaceID = surface; // The surfaceID is passed into createVideoOutput() of the camera as an input parameter.
- }, this.failureCallback).catch(this.catchCallback);
-
- // Video recording depends on camera-related APIs. The following operations can be performed only after the video output start API is invoked.
- // Start video recording.
- await videoRecorder.start().then(() => {
- console.info('start success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Pause video recording before the video output stop API of the camera is invoked.
- await videoRecorder.pause().then(() => {
- console.info('pause success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Resume video recording after the video output start API of the camera is invoked.
- await videoRecorder.resume().then(() => {
- console.info('resume success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Stop video recording after the video output stop API of the camera is invoked.
- await videoRecorder.stop().then(() => {
- console.info('stop success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Reset the recording configuration.
- await videoRecorder.reset().then(() => {
- console.info('reset success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Disable the listeners. The configured callbacks will be invalid after release() is invoked, even if you do not call off().
- videoRecorder.off('stateChange');
- videoRecorder.off('error');
-
- // Release the video recording resources and camera object resources.
- await videoRecorder.release().then(() => {
- console.info('release success');
- }, this.failureCallback).catch(this.catchCallback);
-
- // Set the VideoRecorder instance to null.
- videoRecorder = undefined;
- surfaceID = undefined;
- }
-}
-```
-
-### AVRecorder App
-
-The AVRecorder app provides a complete audio and video recording process, which includes creating an instance, setting recording parameters, obtaining the input surface, starting, pausing, resuming, and stopping recording, and releasing resources.
-
-For details about the code, see [AVRecorderDemo]([multimedia_player_framework: Implementation of media playback and recording](https://gitee.com/openharmony/multimedia_player_framework/tree/master/test/appdemo/AVRecorderDemo)).
diff --git a/en/application-dev/media/avsession-guidelines.md b/en/application-dev/media/avsession-guidelines.md
deleted file mode 100644
index 3d1ac479f0f358c42778e60a0d4b47edafe0a0cd..0000000000000000000000000000000000000000
--- a/en/application-dev/media/avsession-guidelines.md
+++ /dev/null
@@ -1,633 +0,0 @@
-# AVSession Development
-
-> **NOTE**
->
-> All APIs of the **AVSession** module are system APIs and can be called only by system applications.
-
-## Development for the Session Access End
-
-### Basic Concepts
-- **AVMetadata**: media data related attributes, including the IDs of the current media asset, previous media asset, and next media asset, title, author, album, writer, and duration.
-- **AVSessionDescriptor**: descriptor about a media session, including the session ID, session type (audio/video), custom session name (**sessionTag**), and information about the corresponding application (**elementName**).
-- **AVPlaybackState**: information related to the media playback state, including the playback state, position, speed, buffered time, loop mode, and whether the media asset is favorited (**isFavorite**).
-
-### Available APIs
-The table below lists the APIs available for the development of the session access end. The APIs use either a callback or promise to return the result. The APIs listed below use a callback, which provide the same functions as their counterparts that use a promise. For details, see [AVSession Management](../reference/apis/js-apis-avsession.md).
-
-Table 1 Common APIs for session access end development
-
-| API | Description |
-|----------------------------------------------------------------------------------|-------------|
-| createAVSession(context: Context, tag: string, type: AVSessionType, callback: AsyncCallback\): void | Creates a session.|
-| setAVMetadata(data: AVMetadata, callback: AsyncCallback\): void | Sets session metadata. |
-| setAVPlaybackState(state: AVPlaybackState, callback: AsyncCallback\): void | Sets the playback state information. |
-| setLaunchAbility(ability: WantAgent, callback: AsyncCallback\): void | Sets the launcher ability.|
-| getController(callback: AsyncCallback\): void | Obtains the controller of this session.|
-| getOutputDevice(callback: AsyncCallback\): void | Obtains the output device information. |
-| activate(callback: AsyncCallback\): void | Activates this session. |
-| destroy(callback: AsyncCallback\): void | Destroys this session. |
-
-### How to Develop
-1. Import the modules.
-
-```js
-import avSession from '@ohos.multimedia.avsession';
-import wantAgent from '@ohos.app.ability.wantAgent';
-import featureAbility from '@ohos.ability.featureAbility';
-```
-
-2. Create and activate a session.
-```js
-// Define global variables.
-let mediaFavorite = false;
-let currentSession = null;
-let context = featureAbility.getContext();
-
-// Create an audio session.
-avSession.createAVSession(context, "AudioAppSample", 'audio').then((session) => {
- currentSession = session;
- currentSession.activate(); // Activate the session.
-}).catch((err) => {
- console.info(`createAVSession : ERROR : ${err.message}`);
-});
-```
-
-3. Set the session information, including:
-- Session metadata. In addition to the current media asset ID (mandatory), you can set the title, album, author, duration, and previous/next media asset ID. For details about the session metadata, see **AVMetadata** in the API document.
-- Launcher ability, which is implemented by calling an API of [WantAgent](../reference/apis/js-apis-app-ability-wantAgent.md). Generally, **WantAgent** is used to encapsulate want information.
-- Playback state information.
-```js
-// Set the session metadata.
-let metadata = {
- assetId: "121278",
- title: "lose yourself",
- artist: "Eminem",
- author: "ST",
- album: "Slim shady",
- writer: "ST",
- composer: "ST",
- duration: 2222,
- mediaImage: "https://www.example.com/example.jpg", // Set it based on your project requirements.
- subtitle: "8 Mile",
- description: "Rap",
- lyric: "https://www.example.com/example.lrc", // Set it based on your project requirements.
- previousAssetId: "121277",
- nextAssetId: "121279",
-};
-currentSession.setAVMetadata(metadata).then(() => {
- console.info('setAVMetadata successfully');
-}).catch((err) => {
- console.info(`setAVMetadata : ERROR : ${err.message}`);
-});
-```
-
-```js
-// Set the launcher ability.
-let wantAgentInfo = {
- wants: [
- {
- bundleName: "com.neu.setResultOnAbilityResultTest1",
- abilityName: "com.example.test.EntryAbility",
- }
- ],
- operationType: wantAgent.OperationType.START_ABILITIES,
- requestCode: 0,
- wantAgentFlags:[wantAgent.WantAgentFlags.UPDATE_PRESENT_FLAG]
-}
-
-wantAgent.getWantAgent(wantAgentInfo).then((agent) => {
- currentSession.setLaunchAbility(agent).then(() => {
- console.info('setLaunchAbility successfully');
- }).catch((err) => {
- console.info(`setLaunchAbility : ERROR : ${err.message}`);
- });
-});
-```
-
-```js
-// Set the playback state information.
-let PlaybackState = {
- state: avSession.PlaybackState.PLAYBACK_STATE_STOP,
- speed: 1.0,
- position:{elapsedTime: 0, updateTime: (new Date()).getTime()},
- bufferedTime: 1000,
- loopMode: avSession.LoopMode.LOOP_MODE_SEQUENCE,
- isFavorite: false,
-};
-currentSession.setAVPlaybackState(PlaybackState).then(() => {
- console.info('setAVPlaybackState successfully');
-}).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
-});
-```
-
-```js
-// Obtain the controller of this session.
-currentSession.getController().then((selfController) => {
- console.info('getController successfully');
-}).catch((err) => {
- console.info(`getController : ERROR : ${err.message}`);
-});
-```
-
-```js
-// Obtain the output device information.
-currentSession.getOutputDevice().then((outputInfo) => {
- console.info(`getOutputDevice successfully, deviceName : ${outputInfo.deviceName}`);
-}).catch((err) => {
- console.info(`getOutputDevice : ERROR : ${err.message}`);
-});
-```
-
-4. Subscribe to control command events.
-```js
-// Subscribe to the 'play' command event.
-currentSession.on('play', () => {
- console.log ("Call AudioPlayer.play.");
- // Set the playback state information.
- currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PLAY}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-
-// Subscribe to the 'pause' command event.
-currentSession.on('pause', () => {
- console.log ("Call AudioPlayer.pause.");
- // Set the playback state information.
- currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PAUSE}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'stop' command event.
-currentSession.on('stop', () => {
- console.log ("Call AudioPlayer.stop.");
- // Set the playback state information.
- currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_STOP}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'playNext' command event.
-currentSession.on('playNext', () => {
- // When the media file is not ready, download and cache the media file, and set the 'PREPARE' state.
- currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PREPARE}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
- // The media file is obtained.
- currentSession.setAVMetadata({assetId: '58970105', title: 'See you tomorrow'}).then(() => {
- console.info('setAVMetadata successfully');
- }).catch((err) => {
- console.info(`setAVMetadata : ERROR : ${err.message}`);
- });
- console.log ("Call AudioPlayer.play.");
- // Set the playback state information.
- let time = (new Date()).getTime();
- currentSession.setAVPlaybackState({state: avSession.PlaybackState.PLAYBACK_STATE_PLAY, position: {elapsedTime: 0, updateTime: time}, bufferedTime:2000}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'fastForward' command event.
-currentSession.on('fastForward', () => {
- console.log("Call AudioPlayer for fast forwarding.");
- // Set the playback state information.
- currentSession.setAVPlaybackState({speed: 2.0}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'seek' command event.
-currentSession.on('seek', (time) => {
- console.log("Call AudioPlayer.seek.");
- // Set the playback state information.
- currentSession.setAVPlaybackState({position: {elapsedTime: time, updateTime: (new Data()).getTime()}}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'setSpeed' command event.
-currentSession.on('setSpeed', (speed) => {
- console.log(`Call AudioPlayer to set the speed to ${speed}`);
- // Set the playback state information.
- currentSession.setAVPlaybackState({speed: speed}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'setLoopMode' command event.
-currentSession.on('setLoopMode', (mode) => {
- console.log(`The application switches to the loop mode ${mode}`);
- // Set the playback state information.
- currentSession.setAVPlaybackState({loopMode: mode}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
-});
-
-// Subscribe to the 'toggleFavorite' command event.
-currentSession.on('toggleFavorite', (assetId) => {
- console.log(`The application favorites ${assetId}.`);
- // Perform the switch based on the last status.
- let favorite = mediaFavorite == false ? true : false;
- currentSession.setAVPlaybackState({isFavorite: favorite}).then(() => {
- console.info('setAVPlaybackState successfully');
- }).catch((err) => {
- console.info(`setAVPlaybackState : ERROR : ${err.message}`);
- });
- mediaFavorite = favorite;
-});
-
-// Subscribe to the key event.
-currentSession.on('handleKeyEvent', (event) => {
- console.log(`User presses the key ${event.keyCode}`);
-});
-
-// Subscribe to output device changes.
-currentSession.on('outputDeviceChange', (device) => {
- console.log(`Output device changed to ${device.deviceName}`);
-});
-```
-
-5. Release resources.
-```js
-// Unsubscribe from the events.
-currentSession.off('play');
-currentSession.off('pause');
-currentSession.off('stop');
-currentSession.off('playNext');
-currentSession.off('playPrevious');
-currentSession.off('fastForward');
-currentSession.off('rewind');
-currentSession.off('seek');
-currentSession.off('setSpeed');
-currentSession.off('setLoopMode');
-currentSession.off('toggleFavorite');
-currentSession.off('handleKeyEvent');
-currentSession.off('outputDeviceChange');
-
-// Deactivate the session and destroy the object.
-currentSession.deactivate().then(() => {
- currentSession.destroy();
-});
-```
-
-### Verification
-Touch the play, pause, or next button on the media application. Check whether the media playback state changes accordingly.
-
-### FAQs
-
-1. Session Service Exception
-- Symptoms
-
- The session service is abnormal, and the application cannot obtain a response from the session service. For example, the session service is not running or the communication with the session service fails. The error message "Session service exception" is displayed.
-
-- Possible causes
-
- The session service is killed during session restart.
-
-- Solution
-
- (1) The system retries the operation automatically. If the error persists for 3 seconds or more, stop the operation on the session or controller.
-
- (2) Destroy the current session or session controller and re-create it. If the re-creation fails, stop the operation on the session.
-
-2. Session Does Not Exist
-- Symptoms
-
- Parameters are set for or commands are sent to the session that does not exist. The error message "The session does not exist" is displayed.
-
-- Possible causes
-
- The session has been destroyed, and no session record exists on the server.
-
-- Solution
-
- (1) If the error occurs on the application, re-create the session. If the error occurs on Media Controller, stop sending query or control commands to the session.
-
- (2) If the error occurs on the session service, query the current session record and pass the correct session ID when creating the controller.
-
-3. Session Not Activated
-- Symptoms
-
- A control command or event is sent to the session when it is not activated. The error message "The session not active" is displayed.
-
-- Possible causes
-
- The session is in the inactive state.
-
-- Solution
-
- Stop sending the command or event. Subscribe to the session activation status, and resume the sending when the session is activated.
-
-## Development for the Session Control End (Media Controller)
-
-### Basic Concepts
-- Remote projection: A local media session is projected to a remote device. The local controller sends commands to control media playback on the remote device.
-- Sending key events: The controller controls media playback by sending key events.
-- Sending control commands: The controller controls media playback by sending control commands.
-- Sending system key events: A system application calls APIs to send system key events to control media playback.
-- Sending system control commands: A system application calls APIs to send system control commands to control media playback.
-
-### Available APIs
-
-The table below lists the APIs available for the development of the session control end. The APIs use either a callback or promise to return the result. The APIs listed below use a callback, which provide the same functions as their counterparts that use a promise. For details, see [AVSession Management](../reference/apis/js-apis-avsession.md).
-
-Table 2 Common APIs for session control end development
-
-| API | Description |
-| ------------------------------------------------------------------------------------------------ | ----------------- |
-| getAllSessionDescriptors(callback: AsyncCallback\>>): void | Obtains the descriptors of all sessions. |
-| createController(sessionId: string, callback: AsyncCallback\): void | Creates a controller. |
-| sendAVKeyEvent(event: KeyEvent, callback: AsyncCallback\): void | Sends a key event. |
-| getLaunchAbility(callback: AsyncCallback\): void | Obtains the launcher ability. |
-| sendControlCommand(command: AVControlCommand, callback: AsyncCallback\): void | Sends a control command. |
-| sendSystemAVKeyEvent(event: KeyEvent, callback: AsyncCallback\): void | Send a system key event. |
-| sendSystemControlCommand(command: AVControlCommand, callback: AsyncCallback\): void | Sends a system control command. |
-| castAudio(session: SessionToken \| 'all', audioDevices: Array\, callback: AsyncCallback\): void | Casts the media session to a remote device.|
-
-### How to Develop
-1. Import the modules.
-```js
-import avSession from '@ohos.multimedia.avsession';
-import {Action, KeyEvent} from '@ohos.multimodalInput.KeyEvent';
-import wantAgent from '@ohos.app.ability.wantAgent';
-import audio from '@ohos.multimedia.audio';
-```
-
-2. Obtain the session descriptors and create a controller.
-```js
-// Define global variables.
-let g_controller = new Array();
-let g_centerSupportCmd:Set = new Set(['play', 'pause', 'playNext', 'playPrevious', 'fastForward', 'rewind', 'seek','setSpeed', 'setLoopMode', 'toggleFavorite']);
-let g_validCmd:Set;
-
-// Obtain the session descriptors and create a controller.
-avSession.getAllSessionDescriptors().then((descriptors) => {
- descriptors.forEach((descriptor) => {
- avSession.createController(descriptor.sessionId).then((controller) => {
- g_controller.push(controller);
- }).catch((err) => {
- console.error('createController error');
- });
- });
-}).catch((err) => {
- console.error('getAllSessionDescriptors error');
-});
-
-// Subscribe to the 'sessionCreate' event and create a controller.
-avSession.on('sessionCreate', (session) => {
- // After a session is added, you must create a controller.
- avSession.createController(session.sessionId).then((controller) => {
- g_controller.push(controller);
- }).catch((err) => {
- console.info(`createController : ERROR : ${err.message}`);
- });
-});
-```
-
-3. Subscribe to the session state and service changes.
-```js
-// Subscribe to the 'activeStateChange' event.
-controller.on('activeStateChange', (isActive) => {
- if (isActive) {
- console.log ("The widget corresponding to the controller is highlighted.");
- } else {
- console.log("The widget corresponding to the controller is invalid.");
- }
-});
-
-// Subscribe to the 'sessionDestroy' event to enable Media Controller to get notified when the session dies.
-controller.on('sessionDestroy', () => {
- console.info('on sessionDestroy : SUCCESS ');
- controller.destroy().then(() => {
- console.info('destroy : SUCCESS ');
- }).catch((err) => {
- console.info(`destroy : ERROR :${err.message}`);
- });
-});
-
-// Subscribe to the 'sessionDestroy' event to enable the application to get notified when the session dies.
-avSession.on('sessionDestroy', (session) => {
- let index = g_controller.findIndex((controller) => {
- return controller.sessionId == session.sessionId;
- });
- if (index != 0) {
- g_controller[index].destroy();
- g_controller.splice(index, 1);
- }
-});
-
-// Subscribe to the 'topSessionChange' event.
-avSession.on('topSessionChange', (session) => {
- let index = g_controller.findIndex((controller) => {
- return controller.sessionId == session.sessionId;
- });
- // Place the session on the top.
- if (index != 0) {
- g_controller.sort((a, b) => {
- return a.sessionId == session.sessionId ? -1 : 0;
- });
- }
-});
-
-// Subscribe to the 'sessionServiceDie' event.
-avSession.on('sessionServiceDie', () => {
- // The server is abnormal, and the application clears resources.
- console.log("Server exception");
-})
-```
-
-4. Subscribe to media session information changes.
-```js
-// Subscribe to metadata changes.
-let metaFilter = ['assetId', 'title', 'description'];
-controller.on('metadataChange', metaFilter, (metadata) => {
- console.info(`on metadataChange assetId : ${metadata.assetId}`);
-});
-
-// Subscribe to playback state changes.
-let playbackFilter = ['state', 'speed', 'loopMode'];
-controller.on('playbackStateChange', playbackFilter, (playbackState) => {
- console.info(`on playbackStateChange state : ${playbackState.state}`);
-});
-
-// Subscribe to supported command changes.
-controller.on('validCommandChange', (cmds) => {
- console.info(`validCommandChange : SUCCESS : size : ${cmds.size}`);
- console.info(`validCommandChange : SUCCESS : cmds : ${cmds.values()}`);
- g_validCmd.clear();
- for (let c of g_centerSupportCmd) {
- if (cmds.has(c)) {
- g_validCmd.add(c);
- }
- }
-});
-
-// Subscribe to output device changes.
-controller.on('outputDeviceChange', (device) => {
- console.info(`on outputDeviceChange device isRemote : ${device.isRemote}`);
-});
-```
-
-5. Control the session behavior.
-```js
-// When the user touches the play button, the control command 'play' is sent to the session.
-if (g_validCmd.has('play')) {
- controller.sendControlCommand({command:'play'}).then(() => {
- console.info('sendControlCommand successfully');
- }).catch((err) => {
- console.info(`sendControlCommand : ERROR : ${err.message}`);
- });
-}
-
-// When the user selects the single loop mode, the corresponding control command is sent to the session.
-if (g_validCmd.has('setLoopMode')) {
- controller.sendControlCommand({command: 'setLoopMode', parameter: avSession.LoopMode.LOOP_MODE_SINGLE}).then(() => {
- console.info('sendControlCommand successfully');
- }).catch((err) => {
- console.info(`sendControlCommand : ERROR : ${err.message}`);
- });
-}
-
-// Send a key event.
-let keyItem = {code: 0x49, pressedTime: 123456789, deviceId: 0};
-let event = {action: 2, key: keyItem, keys: [keyItem]};
-controller.sendAVKeyEvent(event).then(() => {
- console.info('sendAVKeyEvent Successfully');
-}).catch((err) => {
- console.info(`sendAVKeyEvent : ERROR : ${err.message}`);
-});
-
-// The user touches the blank area on the widget to start the application.
-controller.getLaunchAbility().then((want) => {
- console.log("Starting the application in the foreground");
-}).catch((err) => {
- console.info(`getLaunchAbility : ERROR : ${err.message}`);
-});
-
-// Send the system key event.
-let keyItem = {code: 0x49, pressedTime: 123456789, deviceId: 0};
-let event = {action: 2, key: keyItem, keys: [keyItem]};
-avSession.sendSystemAVKeyEvent(event).then(() => {
- console.info('sendSystemAVKeyEvent Successfully');
-}).catch((err) => {
- console.info(`sendSystemAVKeyEvent : ERROR : ${err.message}`);
-});
-
-// Send a system control command to the top session.
-let avcommand = {command: 'toggleFavorite', parameter: "false"};
-avSession.sendSystemControlCommand(avcommand).then(() => {
- console.info('sendSystemControlCommand successfully');
-}).catch((err) => {
- console.info(`sendSystemControlCommand : ERROR : ${err.message}`);
-});
-
-// Cast the session to another device.
-let audioManager = audio.getAudioManager();
-let audioDevices;
-await audioManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => {
- audioDevices = data;
- console.info('Promise returned to indicate that the device list is obtained.');
-}).catch((err) => {
- console.info(`getDevices : ERROR : ${err.message}`);
-});
-
-avSession.castAudio('all', audioDevices).then(() => {
- console.info('createController : SUCCESS');
-}).catch((err) => {
- console.info(`createController : ERROR : ${err.message}`);
-});
-```
-
-6. Release resources.
-```js
-// Unsubscribe from the events.
- controller.off('metadataChange');
- controller.off('playbackStateChange');
- controller.off('sessionDestroy');
- controller.off('activeStateChange');
- controller.off('validCommandChange');
- controller.off('outputDeviceChange');
-
- // Destroy the controller.
- controller.destroy().then(() => {
- console.info('destroy : SUCCESS ');
- }).catch((err) => {
- console.info(`destroy : ERROR : ${err.message}`);
- });
-```
-
-### Verification
-When you touch the play, pause, or next button in Media Controller, the playback state of the application changes accordingly.
-
-### FAQs
-1. Controller Does Not Exist
-- Symptoms
-
- A control command or an event is sent to the controller that does not exist. The error message "The session controller does not exist" is displayed.
-
-- Possible causes
-
- The controller has been destroyed.
-
-- Solution
-
- Query the session record and create the corresponding controller.
-
-2. Remote Session Connection Failure
-- Symptoms
-
- The communication between the local session and the remote session fails. The error information "The remote session connection failed" is displayed.
-
-- Possible causes
-
- The communication between devices is interrupted.
-
-- Solution
-
- Stop sending control commands to the session. Subscribe to output device changes, and resume the sending when the output device is changed.
-
-3. Invalid Session Command
-- Symptoms
-
- The control command or event sent to the session is not supported. The error message "Invalid session command" is displayed.
-
-- Possible causes
-
- The session does not support this command.
-
-- Solution
-
- Stop sending the command or event. Query the commands supported by the session, and send a command supported.
-
-4. Too Many Commands or Events
-- Symptoms
-
- The session client sends too many messages or commands to the server in a period of time, causing the server to be overloaded. The error message "Command or event overload" is displayed.
-
-- Possible causes
-
- The server is overloaded with messages or events.
-
-- Solution
-
- Control the frequency of sending commands or events.
diff --git a/en/application-dev/media/avsession-overview.md b/en/application-dev/media/avsession-overview.md
index c46211765644330ac26c1154f181904c2db4c3d0..766e642eebc2ba861bf6aceca5f9ea702f99d74f 100644
--- a/en/application-dev/media/avsession-overview.md
+++ b/en/application-dev/media/avsession-overview.md
@@ -1,56 +1,50 @@
# AVSession Overview
-> **NOTE**
->
-> All APIs of the **AVSession** module are system APIs and can be called only by system applications.
+The Audio and Video Session (AVSession) service is used to manage the playback behavior of all audio and video applications in the system in a unified manner. For example, it allows only one audio application in the playing state.
-## Overview
+Audio and video applications access the AVSession service and send application data (for example, a song that is being played and playback state) to it. Through a controller, the user can choose another application or device to continue the playback. If an application does not access the AVSession service, its playback will be forcibly interrupted when it switches to the background.
- AVSession, short for audio and video session, is also known as media session.
- - Application developers can use the APIs provided by the **AVSession** module to connect their audio and video applications to the system's Media Controller.
- - System developers can use the APIs provided by the **AVSession** module to display media information of system audio and video applications and carry out unified playback control.
+To implement background playback, you must request a continuous task to prevent the task from being suspended. For details, see [Continuous Task Development](../task-management/continuous-task-dev-guide.md).
- You can implement the following features through the **AVSession** module:
+## Basic Concepts
- 1. Unified playback control entry
+Be familiar with the following basic concepts before development:
- If there are multiple audio and video applications on the device, users need to switch to and access different applications to control media playback. With AVSession, a unified playback control entry of the system (such as Media Controller) is used for playback control of these audio and video applications. No more switching is required.
+- AVSession
- 2. Better background application management
+ For AVSession, one end is the audio and video applications under control, and the other end is a controller (for example, Media Controller or AI Voice). AVSession provides a channel for information exchange between the application and controller.
- When an application running in the background automatically starts audio playback, it is difficult for users to locate the application. With AVSession, users can quickly find the application that plays the audio clip in Media Controller.
+- Provider
-## Basic Concepts
+ An audio and video application that accesses the AVSession service. After accessing AVSession, the audio and video application must provide the media information, for example, the name of the item to play and the playback state, to AVSession. Through AVSession, the application also receives control commands from the controller and responds accordingly.
-- AVSession
+- Controller
+
+ A system application that accesses AVSession to provide global control on audio and video playback behavior. Typical controllers on OpenHarmony devices are Media Controller and AI Voice. The following sections use Media Controller as an example of the controller. After accessing AVSession, the controller obtains the latest media information and sends control commands to the audio and video applications through AVSession.
- A channel used for information exchange between applications and Media Controller. For AVSession, one end is the media application under control, and the other end is Media Controller. Through AVSession, an application can transfer the media playback information to Media Controller and receive control commands from Media Controller.
-
- AVSessionController
- Object that controls media sessions and thereby controls the playback behavior of applications. Through AVSessionController, Media Controller can control the playback behavior of applications, obtain playback information, and send control commands. It can also monitor the playback state of applications to ensure synchronization of the media session information.
+ An object that controls the playback behavior of the provider. It obtains the playback information of the audio and video application and listens for the application playback changes to synchronize the AVSession information between the application and controller. The controller is the holder of an **AVSessionController** object.
+
+- AVSessionManager
+
+ An object that provides the capability of managing sessions. It can create an **AVSession** object, create an **AVSessionController** object, send control commands, and listen for session state changes.
+
-- Media Controller
-
- Holder of AVSessionController. Through AVSessionController, Media Controller sends commands to control media playback of applications.
+## AVSession Interaction Process
-## Implementation Principle
+AVSessions are classified into local AVSessions and distributed AVSessions.
-The **AVSession** module provides two classes: **AVSession** and **AVSessionController**.
+
-**Figure 1** AVSession interaction
+- Local AVSession
-
+ Local AVSession establishes a connection between the provider and controller in the local device, so as to implement unified playback control and media information display for audio and video applications in the system.
-- Interaction between the application and Media Controller: First, an audio application creates an **AVSession** object and sets session information, including media metadata, launcher ability, and playback state information. Then, Media Controller creates an **AVSessionController** object to obtain session-related information and send the 'play' command to the audio application. Finally, the audio application responds to the command and updates the playback state.
+- Distributed AVSession
-- Distributed projection: When a connected device creates a local session, Media Controller or the audio application can select another device to be projected based on the device list, synchronize the local session to the remote device, and generate a controllable remote session. The remote session is controlled by sending control commands to the remote device's application through its AVSessionController.
+ Distributed AVSession establishes a connection between the provider and controller in the cross-device scenario, so as to implement cross-device playback control and media information display for audio and video applications in the system. For example, you can project the content played on device A to device B and perform playback control on device B.
## Constraints
-- The playback information displayed in Media Controller is the media information proactively written by the media application to AVSession.
-- Media Controller controls the playback of a media application based on the responses of the media application to control commands.
-- AVSession can transmit media playback information and control commands. It does not display information or execute control commands.
-- Do not develop Media Controller for common applications. For common audio and video applications running on OpenHarmony, the default control end is Media Controller, which is a system application. You do not need to carry out additional development for Media Controller.
-- If you want to develop your own system running OpenHarmony, you can develop your own Media Controller.
-- For better background management of audio and video applications, the **AVSession** module enforces background control for applications. Only applications that have accessed AVSession can play audio in the background. Otherwise, the system forcibly pauses the playback when an application switches to the background.
+The AVSession service manages the playback behavior of all audio and video applications in the system. To continue the playback after switching to the background, the audio and video applications must access the AVSession service.
diff --git a/en/application-dev/media/camera-device-input.md b/en/application-dev/media/camera-device-input.md
new file mode 100644
index 0000000000000000000000000000000000000000..3702e16760c002010c50da236d4ef9c2af079e5e
--- /dev/null
+++ b/en/application-dev/media/camera-device-input.md
@@ -0,0 +1,82 @@
+# Device Input Management
+
+Before developing a camera application, you must create an independent camera object. The application invokes and controls the camera object to perform basic operations such as preview, photographing, and video recording.
+
+## How to Develop
+
+Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
+
+1. Import the camera module, which provides camera-related attributes and methods.
+
+ ```ts
+ import camera from '@ohos.multimedia.camera';
+ ```
+
+2. Call **getCameraManager()** to obtain a **CameraManager** object.
+
+ ```ts
+ let cameraManager;
+ let context: any = getContext(this);
+ cameraManager = camera.getCameraManager(context)
+ ```
+
+ > **NOTE**
+ >
+ > If obtaining the object fails, the camera hardware may be occupied or unusable. If it is occupied, wait until it is released.
+
+3. Call **getSupportedCameras()** in the **CameraManager** class to obtain the list of cameras supported by the current device. The list stores the IDs of all cameras supported. If the list is not empty, each ID in the list can be used to create an independent camera object. Otherwise, no camera is available for the current device and subsequent operations cannot be performed.
+
+ ```ts
+ let cameraArray = cameraManager.getSupportedCameras();
+ if (cameraArray.length <= 0) {
+ console.error("cameraManager.getSupportedCameras error");
+ return;
+ }
+
+ for (let index = 0; index < cameraArray.length; index++) {
+ console.info('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID.
+ console.info('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position.
+ console.info('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type.
+ console.info('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type.
+ }
+ ```
+
+4. Call **getSupportedOutputCapability()** to obtain all output streams supported by the current device, such as preview streams and photo streams. The output stream is in each **profile** field under **CameraOutputCapability**.
+
+ ```ts
+ // Create a camera input stream.
+ let cameraInput;
+ try {
+ cameraInput = cameraManager.createCameraInput(cameraArray[0]);
+ } catch (error) {
+ console.error('Failed to createCameraInput errorCode = ' + error.code);
+ }
+ // Listen for CameraInput errors.
+ let cameraDevice = cameraArray[0];
+ cameraInput.on('error', cameraDevice, (error) => {
+ console.info(`Camera input error code: ${error.code}`);
+ })
+ // Open the camera.
+ await cameraInput.open();
+ // Obtain the output stream capabilities supported by the camera.
+ let cameraOutputCapability = cameraManager.getSupportedOutputCapability(cameraArray[0]);
+ if (!cameraOutputCapability) {
+ console.error("cameraManager.getSupportedOutputCapability error");
+ return;
+ }
+ console.info("outputCapability: " + JSON.stringify(cameraOutputCapability));
+ ```
+
+
+## Status Listening
+
+During camera application development, you can listen for the camera status, including the appearance of a new camera, removal of a camera, and availability of a camera. The camera ID and camera status are used in the callback function. When a new camera appears, the new camera can be added to the supported camera list.
+
+Register the 'cameraStatus' event and return the listening result through a callback, which carries the **CameraStatusInfo** parameter. For details about the parameter, see [CameraStatusInfo](../reference/apis/js-apis-camera.md#camerastatusinfo).
+
+```ts
+cameraManager.on('cameraStatus', (cameraStatusInfo) => {
+ console.info(`camera: ${cameraStatusInfo.camera.cameraId}`);
+ console.info(`status: ${cameraStatusInfo.status}`);
+})
+```
diff --git a/en/application-dev/media/camera-metadata.md b/en/application-dev/media/camera-metadata.md
new file mode 100644
index 0000000000000000000000000000000000000000..8fdeff1df08f624374f2a2a5cee32b99b2c41e03
--- /dev/null
+++ b/en/application-dev/media/camera-metadata.md
@@ -0,0 +1,66 @@
+# Camera Metadata
+
+Metadata is the description and context of image information returned by the camera application. It provides detailed data for the image information, for example, coordinates of a viewfinder frame for identifying a portrait in a photo or a video.
+
+Metadata uses a tag (key) to find the corresponding data during the transfer of parameters and configurations, reducing memory copy operations.
+
+## How to Develop
+
+Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
+
+1. Obtain the metadata types supported by the current device from **supportedMetadataObjectTypes** in **CameraOutputCapability**, and then use **createMetadataOutput()** to create a metadata output stream.
+
+ ```ts
+ let metadataObjectTypes = cameraOutputCapability.supportedMetadataObjectTypes;
+ let metadataOutput;
+ try {
+ metadataOutput = cameraManager.createMetadataOutput(metadataObjectTypes);
+ } catch (error) {
+ // If the operation fails, error.code is returned and processed.
+ console.info(error.code);
+ }
+ ```
+
+2. Call **start()** to start outputting metadata. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ metadataOutput.start().then(() => {
+ console.info('Callback returned with metadataOutput started.');
+ }).catch((err) => {
+ console.info('Failed to metadataOutput start '+ err.code);
+ });
+ ```
+
+3. Call **stop()** to stop outputting metadata. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ metadataOutput.stop().then(() => {
+ console.info('Callback returned with metadataOutput stopped.');
+ }).catch((err) => {
+ console.info('Failed to metadataOutput stop '+ err.code);
+ });
+ ```
+
+## Status Listening
+
+During camera application development, you can listen for the status of metadata objects and output stream.
+
+- Register the 'metadataObjectsAvailable' event to listen for metadata objects that are available. When a valid metadata object is detected, the callback function returns the metadata. This event can be registered when a **MetadataOutput** object is created.
+
+ ```ts
+ metadataOutput.on('metadataObjectsAvailable', (metadataObjectArr) => {
+ console.info(`metadata output metadataObjectsAvailable`);
+ })
+ ```
+
+ > **NOTE**
+ >
+ > Currently, only **FACE_DETECTION** is available for the metadata type. The metadata object is the rectangle of the recognized face, including the x-axis coordinate and y-axis coordinate of the upper left corner of the rectangle as well as the width and height of the rectangle.
+
+- Register the 'error' event to listen for metadata stream errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ metadataOutput.on('error', (metadataOutputError) => {
+ console.info(`Metadata output error code: ${metadataOutputError.code}`);
+ })
+ ```
diff --git a/en/application-dev/media/camera-overview.md b/en/application-dev/media/camera-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..03445ee6979c28fb4084a2f3c8186d77f14e5b89
--- /dev/null
+++ b/en/application-dev/media/camera-overview.md
@@ -0,0 +1,27 @@
+# Camera Overview
+
+With the APIs provided by the camera module of the multimedia subsystem, you can develop a camera application. The application accesses and operates the camera hardware to implement basic operations, such as preview, photographing, and video recording. It can also perform more operations, for example, controlling the flash and exposure time, and focusing or adjusting the focus.
+
+## Development Model
+
+The camera application invokes the camera hardware to collect and process image and video data, and output images and videos. It can be used when there are multiple lenses (such as wide-angle lens, long-focus lens, and ToF lens) in various service scenarios (such as different requirements on the resolution, format, and effect).
+
+The figure below illustrates the working process of the camera module. The working process can be summarized into three parts: input device management, session management, and output management.
+
+- During input device management, the camera application invokes the camera hardware to collect data and uses the data as an input stream.
+
+- During session management, you can configure an input stream to determine the camera to be used. You can also set parameters, such as the flash, exposure time, focus, and focus adjustment, to implement different shooting effects in various service scenarios. The application can switch between sessions to meet service requirements in different scenarios.
+
+- During output management, you can configure an output stream, which can be a preview stream, photo stream, or video stream.
+
+**Figure 1** Camera working process
+
+
+For better application development, you are also advised understanding the camera development model.
+
+**Figure 2** Camera development model
+
+
+The camera application controls the camera hardware to implement basic operations such as image display (preview), photo saving (photographing), and video recording. During the implementation, the camera service controls the camera hardware to collect and output data, and transmits the data to a specific module for processing through a BufferQueue at the bottom camera device hardware interface (HDI) layer. You can ignore the BufferQueue during application development. It is used to send the data processed by the bottom layer to the upper layer for image display.
+
+For example, in a video recording scenario, the recording service creates a video surface and provides it to the camera service for data transmission. The camera service controls the camera device to collect video data and generate a video stream. After processing the collected data at the HDI layer, the camera service transmits the video stream to the recording service through the surface. The recording service processes the video stream and saves it as a video file. Now video recording is complete.
diff --git a/en/application-dev/media/camera-preparation.md b/en/application-dev/media/camera-preparation.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb504af9a69f65473f27de59a45a17891357be7f
--- /dev/null
+++ b/en/application-dev/media/camera-preparation.md
@@ -0,0 +1,25 @@
+# Camera Development Preparations
+
+The main process of camera application development includes development preparations, device input management, session management, preview, photographing, and video recording.
+
+Before developing a camera application, you must request camera-related permissions (as described in the table below) to ensure that the application has the permission to access the camera hardware and other services. Before requesting the permission, ensure that the [basic principles for permission management](../security/accesstoken-overview.md#basic-principles-for-permission-management) are met.
+
+
+| Permission| Description| Authorization Mode|
+| -------- | -------- | -------- |
+| ohos.permission.CAMERA | Allows an application to use the camera to take photos and record videos.| user_grant |
+| ohos.permission.MICROPHONE | Allows an application to access the microphone.
This permission is required only if the application is used to record audio.| user_grant |
+| ohos.permission.WRITE_MEDIA | Allows an application to read media files from and write media files into the user's external storage. This permission is optional.| user_grant |
+| ohos.permission.READ_MEDIA | Allows an application to read media files from the user's external storage. This permission is optional.| user_grant |
+| ohos.permission.MEDIA_LOCATION | Allows an application to access geographical locations in the user's media file. This permission is optional.| user_grant |
+
+
+After configuring the permissions in the **module.json5** file, the application must call [abilityAccessCtrl.requestPermissionsFromUser](../reference/apis/js-apis-abilityAccessCtrl.md#requestpermissionsfromuser9) to check whether the required permissions are granted. If not, request the permissions from the user by displaying a dialog box.
+
+
+For details about how to request and verify the permissions, see [Permission Application Guide](../security/accesstoken-guidelines.md).
+
+
+> **NOTE**
+>
+> Even if the user has granted a permission, the application must check for the permission before calling an API protected by the permission. It should not persist the permission granted status, because the user can revoke the permission through the system application **Settings**.
diff --git a/en/application-dev/media/camera-preview.md b/en/application-dev/media/camera-preview.md
new file mode 100644
index 0000000000000000000000000000000000000000..e65f5dac8c96737b81b20703ce6ffa6fe7daa54b
--- /dev/null
+++ b/en/application-dev/media/camera-preview.md
@@ -0,0 +1,87 @@
+# Camera Preview
+
+Preview is the image you see after you start the camera application but before you take photos or record videos.
+
+## How to Develop
+
+Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
+
+1. Create a surface.
+
+ The XComponent, the capabilities of which are provided by the UI, offers the surface for preview streams. For details, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md).
+
+ ```ts
+ // Create an XComponentController object.
+ mXComponentController: XComponentController = new XComponentController;
+ build() {
+ Flex() {
+ // Create an XComponent.
+ XComponent({
+ id: '',
+ type: 'surface',
+ libraryname: '',
+ controller: this.mXComponentController
+ })
+ .onLoad(() => {
+ // Set the surface width and height (1920 x 1080). For details about how to set the preview size, see the preview resolutions supported by the current device, which are obtained from previewProfilesArray.
+ this.mXComponentController.setXComponentSurfaceSize({surfaceWidth:1920,surfaceHeight:1080});
+ // Obtain the surface ID.
+ globalThis.surfaceId = this.mXComponentController.getXComponentSurfaceId();
+ })
+ .width('1920px')
+ .height('1080px')
+ }
+ }
+ ```
+
+2. Call **previewProfiles()** in the **CameraOutputCapability** class to obtain the preview capabilities, in the format of an **previewProfilesArray** array, supported by the current device. Then call **createPreviewOutput()** to create a preview output stream, with the first parameter set to the first item in the **previewProfilesArray** array and the second parameter set to the surface ID obtained in step 1.
+
+ ```ts
+ let previewProfilesArray = cameraOutputCapability.previewProfiles;
+ let previewOutput;
+ try {
+ previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId);
+ }
+ catch (error) {
+ console.error("Failed to create the PreviewOutput instance." + error);
+ }
+ ```
+
+3. Call **start()** to start outputting the preview stream. If the call fails, an error code is returned. For details, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ previewOutput.start().then(() => {
+ console.info('Callback returned with previewOutput started.');
+ }).catch((err) => {
+ console.info('Failed to previewOutput start '+ err.code);
+ });
+ ```
+
+
+## Status Listening
+
+During camera application development, you can listen for the preview output stream status, including preview stream start, preview stream end, and preview stream output errors.
+
+- Register the 'frameStart' event to listen for preview start events This event can be registered when a **PreviewOutput** object is created and is triggered when the bottom layer starts exposure for the first time. The preview stream is started as long as a result is returned.
+
+ ```ts
+ previewOutput.on('frameStart', () => {
+ console.info('Preview frame started');
+ })
+ ```
+
+- Register the 'frameEnd' event to listen for preview end events. This event can be registered when a **PreviewOutput** object is created and is triggered when the last frame of preview ends. The preview stream ends as long as a result is returned.
+
+ ```ts
+ previewOutput.on('frameEnd', () => {
+ console.info('Preview frame ended');
+ })
+ ```
+
+- Register the 'error' event to listen for preview output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ previewOutput.on('error', (previewOutputError) => {
+ console.info(`Preview output error code: ${previewOutputError.code}`);
+ })
+ ```
diff --git a/en/application-dev/media/camera-recording-case.md b/en/application-dev/media/camera-recording-case.md
new file mode 100644
index 0000000000000000000000000000000000000000..4d284f7e675fe0693240bbb678391147926652e7
--- /dev/null
+++ b/en/application-dev/media/camera-recording-case.md
@@ -0,0 +1,247 @@
+# Video Recording Sample
+
+## Development Process
+
+After obtaining the output stream capabilities supported by the camera, create a video stream. The development process is as follows:
+
+
+
+
+## Sample Code
+
+```ts
+import camera from '@ohos.multimedia.camera'
+import media from '@ohos.multimedia.media'
+
+// Create a CameraManager instance.
+context: any = getContext(this)
+let cameraManager = camera.getCameraManager(this.context)
+if (!cameraManager) {
+ console.error("camera.getCameraManager error")
+ return;
+}
+
+// Listen for camera status changes.
+cameraManager.on('cameraStatus', (cameraStatusInfo) => {
+ console.log(`camera : ${cameraStatusInfo.camera.cameraId}`);
+ console.log(`status: ${cameraStatusInfo.status}`);
+})
+
+// Obtain the output stream capabilities supported by the camera.
+let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]);
+if (!cameraOutputCap) {
+ console.error("cameraManager.getSupportedOutputCapability error")
+ return;
+}
+console.log("outputCapability: " + JSON.stringify(cameraOutputCap));
+
+let previewProfilesArray = cameraOutputCap.previewProfiles;
+if (!previewProfilesArray) {
+ console.error("createOutput previewProfilesArray == null || undefined")
+}
+
+let photoProfilesArray = cameraOutputCap.photoProfiles;
+if (!photoProfilesArray) {
+ console.error("createOutput photoProfilesArray == null || undefined")
+}
+
+let videoProfilesArray = cameraOutputCap.videoProfiles;
+if (!videoProfilesArray) {
+ console.error("createOutput videoProfilesArray == null || undefined")
+}
+
+let metadataObjectTypesArray = cameraOutputCap.supportedMetadataObjectTypes;
+if (!metadataObjectTypesArray) {
+ console.error("createOutput metadataObjectTypesArray == null || undefined")
+}
+
+// Configure the parameters based on those supported by the hardware device.
+let AVRecorderProfile = {
+ audioBitrate : 48000,
+ audioChannels : 2,
+ audioCodec : media.CodecMimeType.AUDIO_AAC,
+ audioSampleRate : 48000,
+ fileFormat : media.ContainerFormatType.CFT_MPEG_4,
+ videoBitrate : 2000000,
+ videoCodec : media.CodecMimeType.VIDEO_MPEG4,
+ videoFrameWidth : 640,
+ videoFrameHeight : 480,
+ videoFrameRate : 30
+}
+let AVRecorderConfig = {
+ audioSourceType : media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC,
+ videoSourceType : media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
+ profile : AVRecorderProfile,
+ url : 'fd://', // Before passing in a file descriptor to this parameter, the file must be created by the caller and granted with the read and write permissions. Example value: eg.fd://45--file:///data/media/01.mp4.
+ rotation: 0, // The value can be 0, 90, 180, or 270. If any other value is used, prepare() reports an error.
+ location : { latitude : 30, longitude : 130 }
+}
+
+let avRecorder
+media.createAVRecorder((error, recorder) => {
+ if (recorder != null) {
+ avRecorder = recorder;
+ console.log('createAVRecorder success');
+ } else {
+ console.log(`createAVRecorder fail, error:${error}`);
+ }
+});
+
+avRecorder.prepare(AVRecorderConfig, (err) => {
+ if (err == null) {
+ console.log('prepare success');
+ } else {
+ console.log('prepare failed and error is ' + err.message);
+ }
+})
+
+let videoSurfaceId = null; // The surfaceID is passed in to the camera API to create a VideoOutput instance.
+avRecorder.getInputSurface((err, surfaceId) => {
+ if (err == null) {
+ console.log('getInputSurface success');
+ videoSurfaceId = surfaceId;
+ } else {
+ console.log('getInputSurface failed and error is ' + err.message);
+ }
+});
+
+// Create a VideoOutput instance.
+let videoOutput
+try {
+ videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId)
+} catch (error) {
+ console.error('Failed to create the videoOutput instance. errorCode = ' + error.code);
+}
+
+// Listen for video output errors.
+videoOutput.on('error', (error) => {
+ console.log(`Preview output error code: ${error.code}`);
+})
+
+// Create a session.
+let captureSession
+try {
+ captureSession = cameraManager.createCaptureSession()
+} catch (error) {
+ console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code);
+}
+
+// Listen for session errors.
+captureSession.on('error', (error) => {
+ console.log(`Capture session error code: ${error.code}`);
+})
+
+// Start configuration for the session.
+try {
+ captureSession.beginConfig()
+} catch (error) {
+ console.error('Failed to beginConfig. errorCode = ' + error.code);
+}
+
+// Obtain the camera list.
+let cameraArray = cameraManager.getSupportedCameras();
+if (cameraArray.length <= 0) {
+ console.error("cameraManager.getSupportedCameras error")
+ return;
+}
+
+// Create a camera input stream.
+let cameraInput
+try {
+ cameraInput = cameraManager.createCameraInput(cameraArray[0]);
+} catch (error) {
+ console.error('Failed to createCameraInput errorCode = ' + error.code);
+}
+
+// Listen for camera input errors.
+let cameraDevice = cameraArray[0];
+cameraInput.on('error', cameraDevice, (error) => {
+ console.log(`Camera input error code: ${error.code}`);
+})
+
+// Open the camera.
+await cameraInput.open();
+
+// Add the camera input stream to the session.
+try {
+ captureSession.addInput(cameraInput)
+} catch (error) {
+ console.error('Failed to addInput. errorCode = ' + error.code);
+}
+
+// Create a preview output stream. For details about the surfaceId parameter, see the XComponent. The preview stream is the surface provided by the XComponent.
+let previewOutput
+try {
+ previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId)
+} catch (error) {
+ console.error("Failed to create the PreviewOutput instance.")
+}
+
+// Add the preview input stream to the session.
+try {
+ captureSession.addOutput(previewOutput)
+} catch (error) {
+ console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code);
+}
+
+// Add a video output stream to the session.
+try {
+ captureSession.addOutput(videoOutput)
+} catch (error) {
+ console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code);
+}
+
+// Commit the session configuration.
+await captureSession.commitConfig()
+
+// Start the session.
+await captureSession.start().then(() => {
+ console.log('Promise returned to indicate the session start success.');
+})
+
+// Start the video output stream.
+videoOutput.start(async (err) => {
+ if (err) {
+ console.error('Failed to start the video output ${err.message}');
+ return;
+ }
+ console.log('Callback invoked to indicate the video output start success.');
+});
+
+// Start video recording.
+avRecorder.start().then(() => {
+ console.log('videoRecorder start success');
+})
+
+// Stop the video output stream.
+videoOutput.stop((err) => {
+ if (err) {
+ console.error('Failed to stop the video output ${err.message}');
+ return;
+ }
+ console.log('Callback invoked to indicate the video output stop success.');
+});
+
+// Stop video recording.
+avRecorder.stop().then(() => {
+ console.log('stop success');
+})
+
+// Stop the session.
+captureSession.stop()
+
+// Release the camera input stream.
+cameraInput.close()
+
+// Release the preview output stream.
+previewOutput.release()
+
+// Release the video output stream.
+videoOutput.release()
+
+// Release the session.
+captureSession.release()
+
+// Set the session to null.
+captureSession = null
+```
diff --git a/en/application-dev/media/camera-recording.md b/en/application-dev/media/camera-recording.md
new file mode 100644
index 0000000000000000000000000000000000000000..421ff990bf45b372dd39cd3346e29b636f292762
--- /dev/null
+++ b/en/application-dev/media/camera-recording.md
@@ -0,0 +1,155 @@
+# Video Recording
+
+Video recording is also an important function of the camera application. Video recording is the process of cyclic capturing of frames. To smooth videos, you can follow step 4 in [Camera Photographing](camera-shooting.md) to set the resolution, flash, focal length, photo quality, and rotation angle.
+
+## How to Develop
+
+Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
+
+1. Import the media module. The [APIs](../reference/apis/js-apis-media.md) provided by this module are used to obtain the surface ID and create a photo output stream.
+
+ ```ts
+ import media from '@ohos.multimedia.media';
+ ```
+
+2. Create a surface.
+
+ Call **createAVRecorder()** of the media module to create an **AVRecorder** instance, and call **getInputSurface()** of the instance to obtain the surface ID, which is associated with the view output stream to process the data output by the stream.
+
+ ```ts
+ let AVRecorder;
+ media.createAVRecorder((error, recorder) => {
+ if (recorder != null) {
+ AVRecorder = recorder;
+ console.info('createAVRecorder success');
+ } else {
+ console.info(`createAVRecorder fail, error:${error}`);
+ }
+ });
+ // For details about AVRecorderConfig, see the next section.
+ AVRecorder.prepare(AVRecorderConfig, (err) => {
+ if (err == null) {
+ console.log('prepare success');
+ } else {
+ console.log('prepare failed and error is ' + err.message);
+ }
+ })
+
+ let videoSurfaceId = null;
+ AVRecorder.getInputSurface().then((surfaceId) => {
+ console.info('getInputSurface success');
+ videoSurfaceId = surfaceId;
+ }).catch((err) => {
+ console.info('getInputSurface failed and catch error is ' + err.message);
+ });
+ ```
+
+3. Create a video output stream.
+
+ Obtain the video output streams supported by the current device from **videoProfiles** in the **CameraOutputCapability** class. Then, define video recording parameters and use **createVideoOutput()** to create a video output stream.
+
+ ```ts
+ let videoProfilesArray = cameraOutputCapability.videoProfiles;
+ if (!videoProfilesArray) {
+ console.error("createOutput videoProfilesArray == null || undefined");
+ }
+
+ // Define video recording parameters.
+ let videoConfig = {
+ videoSourceType: media.VideoSourceType.VIDEO_SOURCE_TYPE_SURFACE_YUV,
+ profile: {
+ fileFormat: media.ContainerFormatType.CFT_MPEG_4, // Video file encapsulation format. Only MP4 is supported.
+ videoBitrate: 100000, // Video bit rate.
+ videoCodec: media.CodecMimeType.VIDEO_MPEG4, // Video file encoding format. Both MPEG-4 and AVC are supported.
+ videoFrameWidth: 640, // Video frame width.
+ videoFrameHeight: 480, // Video frame height.
+ videoFrameRate: 30 // Video frame rate.
+ },
+ url: 'fd://35',
+ rotation: 0
+ }
+ // Create an AVRecorder instance.
+ let avRecorder;
+ media.createAVRecorder((error, recorder) => {
+ if (recorder != null) {
+ avRecorder = recorder;
+ console.info('createAVRecorder success');
+ } else {
+ console.info(`createAVRecorder fail, error:${error}`);
+ }
+ });
+ // Set video recording parameters.
+ avRecorder.prepare(videoConfig);
+ // Create a VideoOutput instance.
+ let videoOutput;
+ try {
+ videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId);
+ } catch (error) {
+ console.error('Failed to create the videoOutput instance. errorCode = ' + error.code);
+ }
+ ```
+
+4. Start video recording.
+
+ Call **start()** of the **VideoOutput** instance to start the video output stream, and then call **start()** of the **AVRecorder** instance to start recording.
+
+ ```
+ videoOutput.start(async (err) => {
+ if (err) {
+ console.error('Failed to start the video output ${err.message}');
+ return;
+ }
+ console.info('Callback invoked to indicate the video output start success.');
+ });
+
+ avRecorder.start().then(() => {
+ console.info('avRecorder start success');
+ }
+ ```
+
+5. Stop video recording.
+
+ Call **stop()** of the **AVRecorder** instance to stop recording, and then call **stop()** of the **VideoOutput** instance to stop the video output stream.
+
+ ```ts
+ videoRecorder.stop().then(() => {
+ console.info('stop success');
+ }
+
+ videoOutput.stop((err) => {
+ if (err) {
+ console.error('Failed to stop the video output ${err.message}');
+ return;
+ }
+ console.info('Callback invoked to indicate the video output stop success.');
+ });
+ ```
+
+
+## Status Listening
+
+During camera application development, you can listen for the status of the video output stream, including recording start, recording end, and recording stream output errors.
+
+- Register the 'frameStart' event to listen for recording start events. This event can be registered when a **VideoOutput** object is created and is triggered when the bottom layer starts exposure for recording for the first time. Video recording is started as long as a result is returned.
+
+ ```ts
+ videoOutput.on('frameStart', () => {
+ console.info('Video frame started');
+ })
+ ```
+
+- Register the 'frameEnd' event to listen for recording end events. This event can be registered when a **VideoOutput** object is created and is triggered when the last frame of recording ends. Video recording ends as long as a result is returned.
+
+ ```ts
+ videoOutput.on('frameEnd', () => {
+ console.info('Video frame ended');
+ })
+ ```
+
+- Register the 'error' event to listen for video output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ videoOutput.on('error', (error) => {
+ console.info(`Video output error code: ${error.code}`);
+ })
+ ```
diff --git a/en/application-dev/media/camera-session-management.md b/en/application-dev/media/camera-session-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..1d0d2fcfe20428d33d72569cbf2212b830ad42e2
--- /dev/null
+++ b/en/application-dev/media/camera-session-management.md
@@ -0,0 +1,86 @@
+# Camera Session Management
+
+Before using the camera application for preview, photographing, video recording, and metadata, you must create a camera session.
+
+You can implement the following functions in the session:
+
+- Configure the camera input and output streams. This is mandatory for photographing.
+ Configuring an input stream is to add a device input, which means that the user selects a camera for photographing. Configuring an output stream is to select a data output mode. For example, to implement photographing, you must configure both the preview stream and photo stream as the output stream. The data of the preview stream is displayed on the XComponent, and that of the photo stream is saved to the Gallery application through the **ImageReceiver** API.
+
+- Perform more operations on the camera hardware. For example, add the flash and adjust the focal length. For details about the supported configurations and APIs, see [Camera API Reference](../reference/apis/js-apis-camera.md).
+
+- Control session switching. The application can switch the camera mode by removing and adding output streams. For example, to switch from photographing to video recording, the application must remove the photo output stream and add the video output stream.
+
+After the session configuration is complete, the application must commit the configuration and start the session before using the camera functionalities.
+
+## How to Develop
+
+1. Call **createCaptureSession()** in the **CameraManager** class to create a session.
+
+ ```ts
+ let captureSession;
+ try {
+ captureSession = cameraManager.createCaptureSession();
+ } catch (error) {
+ console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code);
+ }
+ ```
+
+2. Call **beginConfig()** in the **CaptureSession** class to start configuration for the session.
+
+ ```ts
+ try {
+ captureSession.beginConfig();
+ } catch (error) {
+ console.error('Failed to beginConfig. errorCode = ' + error.code);
+ }
+ ```
+
+3. Configure the session. You can call **addInput()** and **addOutput()** in the **CaptureSession** class to add the input and output streams to the session, respectively. The code snippet below uses adding the preview stream **previewOutput** and photo stream **photoOutput** as an example to implement the photographing and preview mode.
+
+ After the configuration, call **commitConfig()** and **start()** in the **CaptureSession** class in sequence to commit the configuration and start the session.
+
+ ```ts
+ try {
+ captureSession.addInput(cameraInput);
+ } catch (error) {
+ console.error('Failed to addInput. errorCode = ' + error.code);
+ }
+ try {
+ captureSession.addOutput(previewOutput);
+ } catch (error) {
+ console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code);
+ }
+ try {
+ captureSession.addOutput(photoOutput);
+ } catch (error) {
+ console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code);
+ }
+ await captureSession.commitConfig() ;
+ await captureSession.start().then(() => {
+ console.info('Promise returned to indicate the session start success.');
+ })
+ ```
+
+4. Control the session. You can call **stop()** in the **CaptureSession** class to stop the session, and call **removeOutput()** and **addOutput()** in this class to switch to another session. The code snippet below uses removing the photo stream **photoOutput** and adding the video stream **videoOutput** as an example to complete the switching from photographing to recording.
+
+ ```ts
+ await captureSession.stop();
+ try {
+ captureSession.beginConfig();
+ } catch (error) {
+ console.error('Failed to beginConfig. errorCode = ' + error.code);
+ }
+ // Remove the photo output stream from the session.
+ try {
+ captureSession.removeOutput(photoOutput);
+ } catch (error) {
+ console.error('Failed to removeOutput(photoOutput). errorCode = ' + error.code);
+ }
+ // Add the video output stream to the session.
+ try {
+ captureSession.addOutput(videoOutput);
+ } catch (error) {
+ console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code);
+ }
+ ```
diff --git a/en/application-dev/media/camera-shooting-case.md b/en/application-dev/media/camera-shooting-case.md
new file mode 100644
index 0000000000000000000000000000000000000000..da2588b10b844fd2a9432da909d1d387b8193d9f
--- /dev/null
+++ b/en/application-dev/media/camera-shooting-case.md
@@ -0,0 +1,239 @@
+# Camera Photographing Sample
+
+## Development Process
+
+After obtaining the output stream capabilities supported by the camera, create a photo stream. The development process is as follows:
+
+
+
+## Sample Code
+
+```ts
+import camera from '@ohos.multimedia.camera'
+import image from '@ohos.multimedia.image'
+import media from '@ohos.multimedia.media'
+
+// Create a CameraManager instance.
+context: any = getContext(this)
+let cameraManager = camera.getCameraManager(this.context)
+if (!cameraManager) {
+ console.error("camera.getCameraManager error")
+ return;
+}
+// Listen for camera status changes.
+cameraManager.on('cameraStatus', (cameraStatusInfo) => {
+ console.info(`camera : ${cameraStatusInfo.camera.cameraId}`);
+ console.info(`status: ${cameraStatusInfo.status}`);
+})
+
+// Obtain the camera list.
+let cameraArray = cameraManager.getSupportedCameras();
+if (cameraArray.length <= 0) {
+ console.error("cameraManager.getSupportedCameras error")
+ return;
+}
+
+for (let index = 0; index < cameraArray.length; index++) {
+ console.info('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID.
+ console.info('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position.
+ console.info('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type.
+ console.info('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type.
+}
+
+// Create a camera input stream.
+let cameraInput
+try {
+ cameraInput = cameraManager.createCameraInput(cameraArray[0]);
+} catch (error) {
+ console.error('Failed to createCameraInput errorCode = ' + error.code);
+}
+
+// Listen for camera input errors.
+let cameraDevice = cameraArray[0];
+cameraInput.on('error', cameraDevice, (error) => {
+ console.info(`Camera input error code: ${error.code}`);
+})
+
+// Open the camera.
+await cameraInput.open();
+
+// Obtain the output stream capabilities supported by the camera.
+let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]);
+if (!cameraOutputCap) {
+ console.error("cameraManager.getSupportedOutputCapability error")
+ return;
+}
+console.info("outputCapability: " + JSON.stringify(cameraOutputCap));
+
+let previewProfilesArray = cameraOutputCap.previewProfiles;
+if (!previewProfilesArray) {
+ console.error("createOutput previewProfilesArray == null || undefined")
+}
+
+let photoProfilesArray = cameraOutputCap.photoProfiles;
+if (!photoProfilesArray) {
+ console.error("createOutput photoProfilesArray == null || undefined")
+}
+
+// Create a preview output stream. For details about the surfaceId parameter, see the XComponent. The preview stream is the surface provided by the XComponent.
+let previewOutput
+try {
+ previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId)
+} catch (error) {
+ console.error("Failed to create the PreviewOutput instance.")
+}
+
+// Listen for preview output errors.
+previewOutput.on('error', (error) => {
+ console.info(`Preview output error code: ${error.code}`);
+})
+
+// Create an ImageReceiver instance and set photographing parameters. Wherein, the resolution must be one of the photographing resolutions supported by the current device, which are obtained by photoProfilesArray.
+let imageReceiver = await image.createImageReceiver(1920, 1080, 4, 8)
+// Obtain the surface ID for displaying the photos.
+let photoSurfaceId = await imageReceiver.getReceivingSurfaceId()
+// Create a photo output stream.
+let photoOutput
+try {
+ photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId)
+} catch (error) {
+ console.error('Failed to createPhotoOutput errorCode = ' + error.code);
+}
+// Create a session.
+let captureSession
+try {
+ captureSession = cameraManager.createCaptureSession()
+} catch (error) {
+ console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code);
+}
+
+// Listen for session errors.
+captureSession.on('error', (error) => {
+ console.info(`Capture session error code: ${error.code}`);
+})
+
+// Start configuration for the session.
+try {
+ captureSession.beginConfig()
+} catch (error) {
+ console.error('Failed to beginConfig. errorCode = ' + error.code);
+}
+
+// Add the camera input stream to the session.
+try {
+ captureSession.addInput(cameraInput)
+} catch (error) {
+ console.error('Failed to addInput. errorCode = ' + error.code);
+}
+
+// Add the preview output stream to the session.
+try {
+ captureSession.addOutput(previewOutput)
+} catch (error) {
+ console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code);
+}
+
+// Add the photo output stream to the session.
+try {
+ captureSession.addOutput(photoOutput)
+} catch (error) {
+ console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code);
+}
+
+// Commit the session configuration.
+await captureSession.commitConfig()
+
+// Start the session.
+await captureSession.start().then(() => {
+ console.info('Promise returned to indicate the session start success.');
+})
+// Check whether the camera has flash.
+let flashStatus
+try {
+ flashStatus = captureSession.hasFlash()
+} catch (error) {
+ console.error('Failed to hasFlash. errorCode = ' + error.code);
+}
+console.info('Promise returned with the flash light support status:' + flashStatus);
+
+if (flashStatus) {
+ // Check whether the auto flash mode is supported.
+ let flashModeStatus
+ try {
+ let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO)
+ flashModeStatus = status
+ } catch (error) {
+ console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code);
+ }
+ if(flashModeStatus) {
+ // Set the flash mode to auto.
+ try {
+ captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO)
+ } catch (error) {
+ console.error('Failed to set the flash mode. errorCode = ' + error.code);
+ }
+ }
+}
+
+// Check whether the continuous auto focus is supported.
+let focusModeStatus
+try {
+ let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO)
+ focusModeStatus = status
+} catch (error) {
+ console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code);
+}
+
+if (focusModeStatus) {
+ // Set the focus mode to continuous auto focus.
+ try {
+ captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO)
+ } catch (error) {
+ console.error('Failed to set the focus mode. errorCode = ' + error.code);
+ }
+}
+
+// Obtain the zoom ratio range supported by the camera.
+let zoomRatioRange
+try {
+ zoomRatioRange = captureSession.getZoomRatioRange()
+} catch (error) {
+ console.error('Failed to get the zoom ratio range. errorCode = ' + error.code);
+}
+
+// Set a zoom ratio.
+try {
+ captureSession.setZoomRatio(zoomRatioRange[0])
+} catch (error) {
+ console.error('Failed to set the zoom ratio value. errorCode = ' + error.code);
+}
+let settings = {
+ quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the photo quality to high.
+ rotation: camera.ImageRotation.ROTATION_0 // Set the rotation angle of the photo to 0.
+}
+// Use the current photographing settings to take photos.
+photoOutput.capture(settings, async (err) => {
+ if (err) {
+ console.error('Failed to capture the photo ${err.message}');
+ return;
+ }
+ console.info('Callback invoked to indicate the photo capture request success.');
+});
+// Stop the session.
+captureSession.stop()
+
+// Release the camera input stream.
+cameraInput.close()
+
+// Release the preview output stream.
+previewOutput.release()
+
+// Release the photo output stream.
+photoOutput.release()
+
+// Release the session.
+captureSession.release()
+
+// Set the session to null.
+captureSession = null
+```
diff --git a/en/application-dev/media/camera-shooting.md b/en/application-dev/media/camera-shooting.md
new file mode 100644
index 0000000000000000000000000000000000000000..9026267ebc0a6950ced6b5092ce88e8ed31d2e24
--- /dev/null
+++ b/en/application-dev/media/camera-shooting.md
@@ -0,0 +1,159 @@
+# Camera Photographing
+
+Photographing is an important function of the camera application. Based on the complex logic of the camera hardware, the camera module provides APIs for you to set information such as resolution, flash, focal length, photo quality, and rotation angle.
+
+## How to Develop
+
+Read [Camera](../reference/apis/js-apis-camera.md) for the API reference.
+
+1. Import the image module. The APIs provided by this module are used to obtain the surface ID and create a photo output stream.
+
+ ```ts
+ import image from '@ohos.multimedia.image';
+ ```
+
+2. Obtain the surface ID.
+
+ Call **createImageReceiver()** of the image module to create an **ImageReceiver** instance, and use **getReceivingSurfaceId()** of the instance to obtain the surface ID, which is associated with the photo output stream to process the data output by the stream.
+
+ ```ts
+ function getImageReceiverSurfaceId() {
+ let receiver = image.createImageReceiver(640, 480, 4, 8);
+ console.info('before ImageReceiver check');
+ if (receiver !== undefined) {
+ console.info('ImageReceiver is ok');
+ let photoSurfaceId = receiver.getReceivingSurfaceId();
+ console.info('ImageReceived id: ' + JSON.stringify(photoSurfaceId));
+ } else {
+ console.info('ImageReceiver is not ok');
+ }
+ }
+ ```
+
+3. Create a photo output stream.
+
+ Obtain the photo output streams supported by the current device from **photoProfiles** in **CameraOutputCapability**, and then call **createPhotoOutput()** to pass in a supported output stream and the surface ID obtained in step 1 to create a photo output stream.
+
+ ```ts
+ let photoProfilesArray = cameraOutputCapability.photoProfiles;
+ if (!photoProfilesArray) {
+ console.error("createOutput photoProfilesArray == null || undefined");
+ }
+ let photoOutput;
+ try {
+ photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId);
+ } catch (error) {
+ console.error('Failed to createPhotoOutput errorCode = ' + error.code);
+ }
+ ```
+
+4. Set camera parameters.
+
+ You can set camera parameters to adjust photographing functions, including the flash, zoom ratio, and focal length.
+
+ ```ts
+ // Check whether the camera has flash.
+ let flashStatus;
+ try {
+ flashStatus = captureSession.hasFlash();
+ } catch (error) {
+ console.error('Failed to hasFlash. errorCode = ' + error.code);
+ }
+ console.info('Promise returned with the flash light support status:' + flashStatus);
+ if (flashStatus) {
+ // Check whether the auto flash mode is supported.
+ let flashModeStatus;
+ try {
+ let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO);
+ flashModeStatus = status;
+ } catch (error) {
+ console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code);
+ }
+ if(flashModeStatus) {
+ // Set the flash mode to auto.
+ try {
+ captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO);
+ } catch (error) {
+ console.error('Failed to set the flash mode. errorCode = ' + error.code);
+ }
+ }
+ }
+ // Check whether the continuous auto focus is supported.
+ let focusModeStatus;
+ try {
+ let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO);
+ focusModeStatus = status;
+ } catch (error) {
+ console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code);
+ }
+ if (focusModeStatus) {
+ // Set the focus mode to continuous auto focus.
+ try {
+ captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO);
+ } catch (error) {
+ console.error('Failed to set the focus mode. errorCode = ' + error.code);
+ }
+ }
+ // Obtain the zoom ratio range supported by the camera.
+ let zoomRatioRange;
+ try {
+ zoomRatioRange = captureSession.getZoomRatioRange();
+ } catch (error) {
+ console.error('Failed to get the zoom ratio range. errorCode = ' + error.code);
+ }
+ // Set a zoom ratio.
+ try {
+ captureSession.setZoomRatio(zoomRatioRange[0]);
+ } catch (error) {
+ console.error('Failed to set the zoom ratio value. errorCode = ' + error.code);
+ }
+ ```
+
+5. Trigger photographing.
+
+ Call **capture()** in the **PhotoOutput** class to capture a photo. In this API, the first parameter specifies the settings (for example, photo quality and rotation angle) for photographing, and the second parameter is a callback function.
+
+ ```ts
+ let settings = {
+ quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the photo quality to high.
+ rotation: camera.ImageRotation.ROTATION_0, // Set the rotation angle of the photo to 0.
+ location: captureLocation, // Set the geolocation information of the photo.
+ mirror: false // Disable mirroring (disabled by default).
+ };
+ photoOutput.capture(settings, async (err) => {
+ if (err) {
+ console.error('Failed to capture the photo ${err.message}');
+ return;
+ }
+ console.info('Callback invoked to indicate the photo capture request success.');
+ });
+ ```
+
+## Status Listening
+
+During camera application development, you can listen for the status of the photo output stream, including the start of the photo stream, the start and end of the photo frame, and the errors of the photo output stream.
+
+- Register the 'captureStart' event to listen for photographing start events. This event can be registered when a **PhotoOutput** object is created and is triggered when the bottom layer starts exposure for photographing for the first time. The capture ID is returned.
+
+ ```ts
+ photoOutput.on('captureStart', (captureId) => {
+ console.info(`photo capture stated, captureId : ${captureId}`);
+ })
+ ```
+
+- Register the 'captureEnd' event to listen for photographing end events. This event can be registered when a **PhotoOutput** object is created and is triggered when the photographing is complete. [CaptureEndInfo](../reference/apis/js-apis-camera.md#captureendinfo) is returned.
+
+ ```ts
+ photoOutput.on('captureEnd', (captureEndInfo) => {
+ console.info(`photo capture end, captureId : ${captureEndInfo.captureId}`);
+ console.info(`frameCount : ${captureEndInfo.frameCount}`);
+ })
+ ```
+
+- Register the 'error' event to listen for photo output errors. The callback function returns an error code when an API is incorrectly used. For details about the error code types, see [Camera Error Codes](../reference/apis/js-apis-camera.md#cameraerrorcode).
+
+ ```ts
+ photoOutput.on('error', (error) => {
+ console.info(`Photo output error code: ${error.code}`);
+ })
+ ```
diff --git a/en/application-dev/media/camera.md b/en/application-dev/media/camera.md
deleted file mode 100644
index 0622db9c3ce6d962001b47ca6d2e6d1bc2aaff7c..0000000000000000000000000000000000000000
--- a/en/application-dev/media/camera.md
+++ /dev/null
@@ -1,511 +0,0 @@
-# Camera Development
-
-## When to Use
-
-With the APIs provided by the **Camera** module, you can access and operate camera devices and develop new functions. Common operations include preview, photographing, and video recording. You can also implement flash control, exposure time control, focus mode control, zoom control, and much more.
-
-Before calling camera APIs, be familiar with the following concepts:
-
-- **Static camera capabilities**: A series of parameters used to describe inherent capabilities of a camera, such as orientation and supported resolution.
-- **Physical camera**: An independent camera device. The physical camera ID is a string that uniquely identifies a physical camera.
-- **Asynchronous operation**: A non-blocking operation that allows other operations to execute before it completes. To prevent the UI thread from being blocked, some **Camera** calls are asynchronous. Each asynchronous API provides the callback and promise functions.
-
-## How to Develop
-
-### Available APIs
-
-For details about the APIs, see [Camera Management](../reference/apis/js-apis-camera.md).
-
-### Full-Process Scenario
-
-The full process includes applying for permissions, creating an instance, setting parameters, managing sessions, taking photos, recording videos, and releasing resources.
-
-#### Applying for Permissions
-
-You must apply for the permissions for your application to access the camera device and other functions. The following table lists camera-related permissions.
-
-| Permission| Attribute Value |
-| -------- | ------------------------------ |
-| Camera| ohos.permission.CAMERA |
-| Call recording| ohos.permission.MICROPHONE |
-| Storage| ohos.permission.WRITE_MEDIA |
-| Read| ohos.permission.READ_MEDIA |
-| Location| ohos.permission.MEDIA_LOCATION |
-
-The code snippet is as follows:
-
-```typescript
-const PERMISSIONS: Array = [
- 'ohos.permission.CAMERA',
- 'ohos.permission.MICROPHONE',
- 'ohos.permission.MEDIA_LOCATION',
- 'ohos.permission.READ_MEDIA',
- 'ohos.permission.WRITE_MEDIA'
-]
-
-function applyPermission() {
- console.info('[permission] get permission');
- globalThis.abilityContext.requestPermissionFromUser(PERMISSIONS)
- }
-```
-
-#### Creating an Instance
-
-You must create an independent **CameraManager** instance before performing camera operations. If this operation fails, the camera may be occupied or unusable. If the camera is occupied, wait until it is released. You can call **getSupportedCameras()** to obtain the list of cameras supported by the current device. The list stores all camera IDs of the current device. Each of these IDs can be used to create an independent **CameraManager** instance. If the list is empty, no camera is available for the current device and subsequent operations cannot be performed. The camera has preview, shooting, video recording, and metadata output streams. You can use **getSupportedOutputCapability()** to obtain the output stream capabilities of the camera and configure them in the **profile** field in **CameraOutputCapability**. The procedure for creating a **CameraManager** instance is as follows:
-
-```typescript
-import camera from '@ohos.multimedia.camera'
-import image from '@ohos.multimedia.image'
-import media from '@ohos.multimedia.media'
-
-// Create a CameraManager instance.
-context: any = getContext(this)
-let cameraManager = camera.getCameraManager(this.context)
-if (!cameraManager) {
- console.error("camera.getCameraManager error")
- return;
-}
-// Listen for camera state changes.
-cameraManager.on('cameraStatus', (cameraStatusInfo) => {
- console.log(`camera : ${cameraStatusInfo.camera.cameraId}`);
- console.log(`status: ${cameraStatusInfo.status}`);
-})
-
-// Obtain the camera list.
-let cameraArray = cameraManager.getSupportedCameras();
-if (cameraArray.length <= 0) {
- console.error("cameraManager.getSupportedCameras error")
- return;
-}
-
-for (let index = 0; index < cameraArray.length; index++) {
- console.log('cameraId : ' + cameraArray[index].cameraId); // Obtain the camera ID.
- console.log('cameraPosition : ' + cameraArray[index].cameraPosition); // Obtain the camera position.
- console.log('cameraType : ' + cameraArray[index].cameraType); // Obtain the camera type.
- console.log('connectionType : ' + cameraArray[index].connectionType); // Obtain the camera connection type.
-}
-
-// Create a camera input stream.
-let cameraInput
-try {
- cameraInput = cameraManager.createCameraInput(cameraArray[0]);
-} catch () {
- console.error('Failed to createCameraInput errorCode = ' + error.code);
-}
-
-// Listen for CameraInput errors.
-let cameraDevice = cameraArray[0];
-cameraInput.on('error', cameraDevice, (error) => {
- console.log(`Camera input error code: ${error.code}`);
-})
-
-// Open the camera.
-await cameraInput.open();
-
-// Obtain the output stream capabilities supported by the camera.
-let cameraOutputCap = cameraManager.getSupportedOutputCapability(cameraArray[0]);
-if (!cameraOutputCap) {
- console.error("cameraManager.getSupportedOutputCapability error")
- return;
-}
-console.info("outputCapability: " + JSON.stringify(cameraOutputCap));
-
-let previewProfilesArray = cameraOutputCap.previewProfiles;
-if (!previewProfilesArray) {
- console.error("createOutput previewProfilesArray == null || undefined")
-}
-
-let photoProfilesArray = cameraOutputCap.photoProfiles;
-if (!photoProfilesArray) {
- console.error("createOutput photoProfilesArray == null || undefined")
-}
-
-let videoProfilesArray = cameraOutputCap.videoProfiles;
-if (!videoProfilesArray) {
- console.error("createOutput videoProfilesArray == null || undefined")
-}
-
-let metadataObjectTypesArray = cameraOutputCap.supportedMetadataObjectTypes;
-if (!metadataObjectTypesArray) {
- console.error("createOutput metadataObjectTypesArray == null || undefined")
-}
-
-// Create a preview stream. For details about the surfaceId parameter, see the XComponent section. The preview stream is the surface provided by the XComponent.
-let previewOutput
-try {
- previewOutput = cameraManager.createPreviewOutput(previewProfilesArray[0], surfaceId)
-} catch (error) {
- console.error("Failed to create the PreviewOutput instance.")
-}
-
-// Listen for PreviewOutput errors.
-previewOutput.on('error', (error) => {
- console.log(`Preview output error code: ${error.code}`);
-})
-
-// Create an ImageReceiver instance and set photo parameters. Wherein, the resolution must be one of the photographing resolutions supported by the current device, which are obtained by photoProfilesArray.
-let imageReceiver = await image.createImageReceiver(1920, 1080, 4, 8)
-// Obtain the surface ID for displaying the photos.
-let photoSurfaceId = await imageReceiver.getReceivingSurfaceId()
-// Create a photographing output stream.
-let photoOutput
-try {
- photoOutput = cameraManager.createPhotoOutput(photoProfilesArray[0], photoSurfaceId)
-} catch (error) {
- console.error('Failed to createPhotoOutput errorCode = ' + error.code);
-}
-
-// Define video recording parameters.
-let videoConfig = {
- audioSourceType: 1,
- videoSourceType: 1,
- profile: {
- audioBitrate: 48000,
- audioChannels: 2,
- audioCodec: 'audio/mp4v-es',
- audioSampleRate: 48000,
- durationTime: 1000,
- fileFormat: 'mp4',
- videoBitrate: 48000,
- videoCodec: 'video/mp4v-es',
- videoFrameWidth: 640,
- videoFrameHeight: 480,
- videoFrameRate: 30
- },
- url: 'file:///data/media/01.mp4',
- orientationHint: 0,
- maxSize: 100,
- maxDuration: 500,
- rotation: 0
-}
-
-// Create a video recording output stream.
-let videoRecorder
-media.createVideoRecorder().then((recorder) => {
- console.log('createVideoRecorder called')
- videoRecorder = recorder
-})
-// Set video recording parameters.
-videoRecorder.prepare(videoConfig)
-// Obtain the surface ID for video recording.
-let videoSurfaceId
-videoRecorder.getInputSurface().then((id) => {
- console.log('getInputSurface called')
- videoSurfaceId = id
-})
-
-// Create a VideoOutput instance.
-let videoOutput
-try {
- videoOutput = cameraManager.createVideoOutput(videoProfilesArray[0], videoSurfaceId)
-} catch (error) {
- console.error('Failed to create the videoOutput instance. errorCode = ' + error.code);
-}
-
-// Listen for VideoOutput errors.
-videoOutput.on('error', (error) => {
- console.log(`Preview output error code: ${error.code}`);
-})
-```
-Surfaces must be created in advance for the preview, shooting, and video recording stream. The preview stream is the surface provided by the **XComponent**, the shooting stream is the surface provided by **ImageReceiver**, and the video recording stream is the surface provided by **VideoRecorder**.
-
-**XComponent**
-
-```typescript
-mXComponentController: XComponentController = new XComponentController // Create an XComponentController.
-
-build() {
- Flex() {
- XComponent({ // Create an XComponent.
- id: '',
- type: 'surface',
- libraryname: '',
- controller: this.mXComponentController
- })
- .onload(() => { // Set the onload callback.
- // Set the surface width and height (1920 x 1080). For details about how to set the preview size, see the preview resolutions supported by the current device, which are obtained by previewProfilesArray.
- this.mXComponentController.setXComponentSurfaceSize({surfaceWidth:1920,surfaceHeight:1080})
- // Obtain the surface ID.
- globalThis.surfaceId = mXComponentController.getXComponentSurfaceId()
- })
- .width('1920px') // Set the width of the XComponent.
- .height('1080px') // Set the height of the XComponent.
- }
-}
-```
-
-**ImageReceiver**
-
-```typescript
-function getImageReceiverSurfaceId() {
- let receiver = image.createImageReceiver(640, 480, 4, 8)
- console.log(TAG + 'before ImageReceiver check')
- if (receiver !== undefined) {
- console.log('ImageReceiver is ok')
- surfaceId1 = receiver.getReceivingSurfaceId()
- console.log('ImageReceived id: ' + JSON.stringify(surfaceId1))
- } else {
- console.log('ImageReceiver is not ok')
- }
- }
-```
-
-**VideoRecorder**
-
-```typescript
-function getVideoRecorderSurface() {
- await getFd('CameraManager.mp4');
- mVideoConfig.url = mFdPath;
- media.createVideoRecorder((err, recorder) => {
- console.info('Entering create video receiver')
- mVideoRecorder = recorder
- console.info('videoRecorder is :' + JSON.stringify(mVideoRecorder))
- console.info('videoRecorder.prepare called.')
- mVideoRecorder.prepare(mVideoConfig, (err) => {
- console.info('videoRecorder.prepare success.')
- mVideoRecorder.getInputSurface((err, id) => {
- console.info('getInputSurface called')
- mVideoSurface = id
- console.info('getInputSurface surfaceId: ' + JSON.stringify(mVideoSurface))
- })
- })
- })
- }
-```
-
-#### Managing Sessions
-
-##### Creating a Session
-
-```typescript
-// Create a session.
-let captureSession
-try {
- captureSession = cameraManager.createCaptureSession()
-} catch (error) {
- console.error('Failed to create the CaptureSession instance. errorCode = ' + error.code);
-}
-
-// Listen for session errors.
-captureSession.on('error', (error) => {
- console.log(`Capture session error code: ${error.code}`);
-})
-
-// Start configuration for the session.
-try {
- captureSession.beginConfig()
-} catch (error) {
- console.error('Failed to beginConfig. errorCode = ' + error.code);
-}
-
-// Add the camera input stream to the session.
-try {
- captureSession.addInput(cameraInput)
-} catch (error) {
- console.error('Failed to addInput. errorCode = ' + error.code);
-}
-
-// Add the preview input stream to the session.
-try {
- captureSession.addOutput(previewOutput)
-} catch (error) {
- console.error('Failed to addOutput(previewOutput). errorCode = ' + error.code);
-}
-
-// Add the photographing output stream to the session.
-try {
- captureSession.addOutput(photoOutput)
-} catch (error) {
- console.error('Failed to addOutput(photoOutput). errorCode = ' + error.code);
-}
-
-// Commit the session configuration.
-await captureSession.commitConfig()
-
-// Start the session.
-await captureSession.start().then(() => {
- console.log('Promise returned to indicate the session start success.');
-})
-```
-
-##### Switching a Session
-
-```typescript
-// Stop the session.
-await captureSession.stop()
-
-// Start configuration for the session.
-try {
- captureSession.beginConfig()
-} catch (error) {
- console.error('Failed to beginConfig. errorCode = ' + error.code);
-}
-
-// Remove the photographing output stream from the session.
-try {
- captureSession.removeOutput(photoOutput)
-} catch (error) {
- console.error('Failed to removeOutput(photoOutput). errorCode = ' + error.code);
-}
-
-// Add a video recording output stream to the session.
-try {
- captureSession.addOutput(videoOutput)
-} catch (error) {
- console.error('Failed to addOutput(videoOutput). errorCode = ' + error.code);
-}
-
-// Commit the session configuration.
-await captureSession.commitConfig()
-
-// Start the session.
-await captureSession.start().then(() => {
- console.log('Promise returned to indicate the session start success.');
-})
-```
-
-#### Setting Parameters
-
-```typescript
-// Check whether the camera has flash.
-let flashStatus
-try {
- flashStatus = captureSession.hasFlash()
-} catch (error) {
- console.error('Failed to hasFlash. errorCode = ' + error.code);
-}
-console.log('Promise returned with the flash light support status:' + flashStatus);
-
-if (flashStatus) {
- // Check whether the auto flash mode is supported.
- let flashModeStatus
- try {
- let status = captureSession.isFlashModeSupported(camera.FlashMode.FLASH_MODE_AUTO)
- flashModeStatus = status
- } catch (error) {
- console.error('Failed to check whether the flash mode is supported. errorCode = ' + error.code);
- }
- if(flashModeStatus) {
- // Set the flash mode to auto.
- try {
- captureSession.setFlashMode(camera.FlashMode.FLASH_MODE_AUTO)
- } catch (error) {
- console.error('Failed to set the flash mode. errorCode = ' + error.code);
- }
- }
-}
-
-// Check whether the continuous auto focus is supported.
-let focusModeStatus
-try {
- let status = captureSession.isFocusModeSupported(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO)
- focusModeStatus = status
-} catch (error) {
- console.error('Failed to check whether the focus mode is supported. errorCode = ' + error.code);
-}
-
-if (focusModeStatus) {
- // Set the focus mode to continuous auto focus.
- try {
- captureSession.setFocusMode(camera.FocusMode.FOCUS_MODE_CONTINUOUS_AUTO)
- } catch (error) {
- console.error('Failed to set the focus mode. errorCode = ' + error.code);
- }
-}
-
-// Obtain the zoom ratio range supported by the camera.
-let zoomRatioRange
-try {
- zoomRatioRange = captureSession.getZoomRatioRange()
-} catch (error) {
- console.error('Failed to get the zoom ratio range. errorCode = ' + error.code);
-}
-
-// Set a zoom ratio.
-try {
- captureSession.setZoomRatio(zoomRatioRange[0])
-} catch (error) {
- console.error('Failed to set the zoom ratio value. errorCode = ' + error.code);
-}
-```
-
-#### Taking Photos
-
-```typescript
-let settings = {
- quality: camera.QualityLevel.QUALITY_LEVEL_HIGH, // Set the image quality to high.
- rotation: camera.ImageRotation.ROTATION_0 // Set the image rotation angle to 0.
-}
-// Use the current photographing settings to take photos.
-photoOutput.capture(settings, async (err) => {
- if (err) {
- console.error('Failed to capture the photo ${err.message}');
- return;
- }
- console.log('Callback invoked to indicate the photo capture request success.');
-});
-```
-
-#### Recording Videos
-
-```typescript
-// Start the video recording output stream.
-videoOutput.start(async (err) => {
- if (err) {
- console.error('Failed to start the video output ${err.message}');
- return;
- }
- console.log('Callback invoked to indicate the video output start success.');
-});
-
-// Start video recording.
-videoRecorder.start().then(() => {
- console.info('videoRecorder start success');
-}
-
-// Stop video recording.
-videoRecorder.stop().then(() => {
- console.info('stop success');
-}
-
-// Stop the video recording output stream.
-videoOutput.stop((err) => {
- if (err) {
- console.error('Failed to stop the video output ${err.message}');
- return;
- }
- console.log('Callback invoked to indicate the video output stop success.');
-});
-```
-
-For details about the APIs used for saving photos, see [Image Processing](image.md#using-imagereceiver).
-
-#### Releasing Resources
-
-```typescript
-// Stop the session.
-captureSession.stop()
-
-// Release the camera input stream.
-cameraInput.close()
-
-// Release the preview output stream.
-previewOutput.release()
-
-// Release the photographing output stream.
-photoOutput.release()
-
-// Release the video recording output stream.
-videoOutput.release()
-
-// Release the session.
-captureSession.release()
-
-// Set the session to null.
-captureSession = null
-```
-
-## Process Flowchart
-
-The following figure shows the process of using the camera.
-
diff --git a/en/application-dev/media/distributed-audio-playback.md b/en/application-dev/media/distributed-audio-playback.md
new file mode 100644
index 0000000000000000000000000000000000000000..c56420de740e545168d009b5c743f2790146c475
--- /dev/null
+++ b/en/application-dev/media/distributed-audio-playback.md
@@ -0,0 +1,101 @@
+# Distributed Audio Playback (for System Applications Only)
+
+Distributed audio playback enables an application to continue audio playback on another device in the same network.
+
+You can use distributed audio playback to transfer all audio streams or the specified audio stream being played on the current device to a remote device.
+
+## How to Develop
+
+Before continuing audio playback on another device in the same network, you must obtain the device list on the network and listen for device connection state changes. For details, see [Audio Output Device Management](audio-output-device-management.md).
+
+When obtaining the device list on the network, you can specify **DeviceFlag** to filter out the required devices.
+
+| Name| Description|
+| -------- | -------- |
+| NONE_DEVICES_FLAG9+ | None. This is a system API.|
+| OUTPUT_DEVICES_FLAG | Local output device.|
+| INPUT_DEVICES_FLAG | Local input device.|
+| ALL_DEVICES_FLAG | Local input and output device.|
+| DISTRIBUTED_OUTPUT_DEVICES_FLAG9+ | Remote output device. This is a system API.|
+| DISTRIBUTED_INPUT_DEVICES_FLAG9+ | Remote input device. This is a system API.|
+| ALL_DISTRIBUTED_DEVICES_FLAG9+ | Remote input and output device. This is a system API.|
+
+For details about the API reference, see [AudioRoutingManager](../reference/apis/js-apis-audio.md#audioroutingmanager9).
+
+### Continuing the Playing of All Audio Streams
+
+1. [Obtain the output device information](audio-output-device-management.md#obtaining-output-device-information).
+
+2. Create an **AudioDeviceDescriptor** instance to describe an audio output device.
+
+3. Call **selectOutputDevice** to select a remote device, on which all the audio streams will continue playing.
+
+```ts
+let outputAudioDeviceDescriptor = [{
+ deviceRole: audio.DeviceRole.OUTPUT_DEVICE,
+ deviceType: audio.DeviceType.SPEAKER,
+ id: 1,
+ name: "",
+ address: "",
+ sampleRates: [44100],
+ channelCounts: [2],
+ channelMasks: [0],
+ networkId: audio.LOCAL_NETWORK_ID,
+ interruptGroupId: 1,
+ volumeGroupId: 1,
+}];
+
+async function selectOutputDevice() {
+ audioRoutingManager.selectOutputDevice(outputAudioDeviceDescriptor, (err) => {
+ if (err) {
+ console.error(`Invoke selectOutputDevice failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Invoke selectOutputDevice succeeded.');
+ }
+ });
+}
+```
+
+### Continuing the Playing of the Specified Audio Stream
+
+1. [Obtain the output device information](audio-output-device-management.md#obtaining-output-device-information).
+
+2. Create an **AudioRendererFilter** instance, with **uid** to specify an application and **rendererId** to specify an audio stream.
+
+3. Create an **AudioDeviceDescriptor** instance to describe an audio output device.
+
+4. Call **selectOutputDeviceByFilter** to select a remote device, on which the specified audio stream will continue playing.
+
+```ts
+let outputAudioRendererFilter = {
+ uid: 20010041,
+ rendererInfo: {
+ content: audio.ContentType.CONTENT_TYPE_MUSIC,
+ usage: audio.StreamUsage.STREAM_USAGE_MEDIA,
+ rendererFlags: 0 },
+ rendererId: 0 };
+
+let outputAudioDeviceDescriptor = [{
+ deviceRole: audio.DeviceRole.OUTPUT_DEVICE,
+ deviceType: audio.DeviceType.SPEAKER,
+ id: 1,
+ name: "",
+ address: "",
+ sampleRates: [44100],
+ channelCounts: [2],
+ channelMasks: [0],
+ networkId: audio.LOCAL_NETWORK_ID,
+ interruptGroupId: 1,
+ volumeGroupId: 1,
+}];
+
+async function selectOutputDeviceByFilter() {
+ audioRoutingManager.selectOutputDeviceByFilter(outputAudioRendererFilter, outputAudioDeviceDescriptor, (err) => {
+ if (err) {
+ console.error(`Invoke selectOutputDeviceByFilter failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Invoke selectOutputDeviceByFilter succeeded.');
+ }
+ });
+}
+```
diff --git a/en/application-dev/media/distributed-avsession-overview.md b/en/application-dev/media/distributed-avsession-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff293ed7332d0a9c5e66632f91c943af42d28030
--- /dev/null
+++ b/en/application-dev/media/distributed-avsession-overview.md
@@ -0,0 +1,54 @@
+# Distributed AVSession Overview
+
+With distributed AVSession, OpenHarmony allows users to project locally played media to a distributed device for a better playback effect. For example, users can project audio played on a tablet to a smart speaker.
+
+After the user initiates a projection, the media information is synchronized to the distributed device in real time, and the user can control the playback (for example, previous, next, play, and pause) on the distributed device. From the perspective of the user, the playback control operation on the distributed device is the same as that on the local device.
+
+
+## Interaction Process
+
+After the local device is paired with a distributed device, the controller on the local device projects media to the distributed device through AVSessionManager, thereby implementing a distributed AVSession. The interaction process is shown below.
+
+
+
+The AVSession service on the distributed device automatically creates an **AVSession** object for information synchronization with the local device. The information to synchronize includes the session information, control commands, and events.
+
+## Distributed AVSession Process
+
+After the user triggers a projection, the remote device automatically creates an **AVSession** object to associate it with that on the local device. The detailed process is as follows:
+
+1. After receiving an audio device switching command, the AVSession service on the local device synchronizes the session information to the distributed device.
+
+2. The controller (for example, Media Controller) on the distributed device detects the new **AVSession** object and creates an **AVSessionController** object for it.
+
+3. Through the **AVSessionController** object, the controller on the distributed device sends a control command to the **AVSession** object on the local device.
+
+4. Upon the receipt of the control command, the **AVSession** object on the local device triggers a callback to the local audio application.
+
+5. The **AVSession** object on the local device synchronizes the new session information to the controller on the distributed device in real time.
+
+6. When the remote device is disconnected, the audio stream is switched back to the local device and the playback is paused. (The audio module completes the switchback, and the AVSession service instructs the application to pause the playback.)
+
+## Distributed AVSession Scenarios
+
+There are two scenarios for projection implemented using the distributed AVSession:
+
+- System projection: The controller (for example, Media Controller) initiates a projection.
+
+This type of projection takes effect for all applications. After a system projection, all audios on the local device are played from the distributed device by default.
+
+- Application projection: An audio and video application integrates the projection component to initiate a projection. (This scenario is not supported yet.)
+
+ This type of projection takes effect for a single application. After an application projection, audio of the application on the local device is played from the distributed device, and audio of other applications is still played from the local device.
+
+Projection preemption is supported. If application A initiates a projection to a remote device and then application B initiates a projection to the same device, then audio of application B is played on the remote device.
+
+## Relationship Between Distributed AVSession and Distributed Audio Playback
+
+The internal logic for the distributed AVSession to implement projection is as follows:
+
+- API related to [distributed audio playback](distributed-audio-playback.md) are called to project audio streams to the distributed device.
+
+- The distributed capability is used to project the session metadata to the distributed device for display.
+
+Projection implemented by using the distributed AVSession not only enables audio to be played on the distributed device, but also enables media information to be displayed on the distributed device. It also allows the user to perform playback control on the distributed device.
diff --git a/en/application-dev/media/figures/audio-capturer-state.png b/en/application-dev/media/figures/audio-capturer-state.png
deleted file mode 100644
index 52b5556260dbf78c5e816b37013248a07e8dbbc6..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/audio-capturer-state.png and /dev/null differ
diff --git a/en/application-dev/media/figures/audio-playback-interaction-diagram.png b/en/application-dev/media/figures/audio-playback-interaction-diagram.png
new file mode 100644
index 0000000000000000000000000000000000000000..b96179b6b610463bc34d2515b145a57b29e574cb
Binary files /dev/null and b/en/application-dev/media/figures/audio-playback-interaction-diagram.png differ
diff --git a/en/application-dev/media/figures/audio-renderer-state.png b/en/application-dev/media/figures/audio-renderer-state.png
deleted file mode 100644
index 9ae30c2a9306dc85662405c36da9e11d07ed9a2a..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/audio-renderer-state.png and /dev/null differ
diff --git a/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png b/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png
new file mode 100644
index 0000000000000000000000000000000000000000..50ad902dd8b55a91a220e2705fea5674cd855ae6
Binary files /dev/null and b/en/application-dev/media/figures/audio-stream-mgmt-invoking-relationship.png differ
diff --git a/en/application-dev/media/figures/audiocapturer-status-change.png b/en/application-dev/media/figures/audiocapturer-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..aadbc4fb6470b7cdc0f399ee5954a96c01a7f7c3
Binary files /dev/null and b/en/application-dev/media/figures/audiocapturer-status-change.png differ
diff --git a/en/application-dev/media/figures/audiorenderer-status-change.png b/en/application-dev/media/figures/audiorenderer-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..a721044f7aeccfed0260176963d192cac40dd8a6
Binary files /dev/null and b/en/application-dev/media/figures/audiorenderer-status-change.png differ
diff --git a/en/application-dev/media/figures/avsession-interaction-process.png b/en/application-dev/media/figures/avsession-interaction-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..2347599b7d118c45c2d2eb58708729f91c4dc801
Binary files /dev/null and b/en/application-dev/media/figures/avsession-interaction-process.png differ
diff --git a/en/application-dev/media/figures/bitmap-operation.png b/en/application-dev/media/figures/bitmap-operation.png
new file mode 100644
index 0000000000000000000000000000000000000000..c5107dbabd86fdc29863d5f25947b447d9c1deeb
Binary files /dev/null and b/en/application-dev/media/figures/bitmap-operation.png differ
diff --git a/en/application-dev/media/figures/camera-development-model.png b/en/application-dev/media/figures/camera-development-model.png
new file mode 100644
index 0000000000000000000000000000000000000000..fa97f369dda840cb474bc8fffbb7396b8a7b6508
Binary files /dev/null and b/en/application-dev/media/figures/camera-development-model.png differ
diff --git a/en/application-dev/media/figures/camera-workflow.png b/en/application-dev/media/figures/camera-workflow.png
new file mode 100644
index 0000000000000000000000000000000000000000..31a7e814724cf97a80a5cc8b88778334ccb352fb
Binary files /dev/null and b/en/application-dev/media/figures/camera-workflow.png differ
diff --git a/en/application-dev/media/figures/camera_framework_process.png b/en/application-dev/media/figures/camera_framework_process.png
deleted file mode 100644
index bf4b6806fb19e087318306dbc7f9a4b0576273cd..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/camera_framework_process.png and /dev/null differ
diff --git a/en/application-dev/media/figures/cropping.jpeg b/en/application-dev/media/figures/cropping.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..a564818815eb3fde13a40ef02d0811bd56803fb9
Binary files /dev/null and b/en/application-dev/media/figures/cropping.jpeg differ
diff --git a/en/application-dev/media/figures/distributed-avsession-interaction-process.png b/en/application-dev/media/figures/distributed-avsession-interaction-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..d16e362db22857b2ddba3cdbf2142c3759f73fc8
Binary files /dev/null and b/en/application-dev/media/figures/distributed-avsession-interaction-process.png differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_player.png b/en/application-dev/media/figures/en-us_image_audio_player.png
deleted file mode 100644
index 4edcec759e7b8507d605823f157ba9c6c1108fcd..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_player.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png b/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png
deleted file mode 100644
index 8cd657cf19c48da5e52809bad387984f50d5a3c7..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_recorder_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png b/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png
deleted file mode 100644
index 7c33fcc1723fcdcc468bd3a6004de8b03b20100b..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_recorder_zero.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_routing_manager.png b/en/application-dev/media/figures/en-us_image_audio_routing_manager.png
deleted file mode 100644
index 710679f6cac0c30d06dffa97b0e80b3cebe80f79..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_routing_manager.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_state_machine.png b/en/application-dev/media/figures/en-us_image_audio_state_machine.png
deleted file mode 100644
index 22b7aeaa1db5b369d3daf44854d7f7f9a00f775b..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_stream_manager.png b/en/application-dev/media/figures/en-us_image_audio_stream_manager.png
deleted file mode 100644
index 1f326d4bd0798dd5ecc0b55130904cbf87d2ea1f..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_stream_manager.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_audio_volume_manager.png b/en/application-dev/media/figures/en-us_image_audio_volume_manager.png
deleted file mode 100644
index 0d47fbfacce9c1ff48811e1cf5d764231bdb596b..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_audio_volume_manager.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avplayer_audio.png b/en/application-dev/media/figures/en-us_image_avplayer_audio.png
deleted file mode 100644
index b5eb9b02a977d0e4551a236c7cc8a154710f5517..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avplayer_audio.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png b/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png
deleted file mode 100644
index aa8afdbcbf142fd745cee03fc422caec51cfe41b..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avplayer_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avplayer_video.png b/en/application-dev/media/figures/en-us_image_avplayer_video.png
deleted file mode 100644
index 54525ebed1d1792f43156ffbeb1ffa37f56d8237..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avplayer_video.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png b/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png
deleted file mode 100644
index 7d5da3bdc91fe8fb7be9f0b4054f934ec054b8e6..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avrecorder_module_interaction.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png b/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png
deleted file mode 100644
index 7ffcb21f09365e9b072bdaf48f8b98d7d45a8aaa..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avrecorder_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_avsession.png b/en/application-dev/media/figures/en-us_image_avsession.png
deleted file mode 100644
index 3289bc4ca3c54eb3e99c9230c821380f8f7c0c5b..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_avsession.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_video_player.png b/en/application-dev/media/figures/en-us_image_video_player.png
deleted file mode 100644
index f9b4aabdc7215f22788d92c68ef353fafffda1c3..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_video_player.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png b/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png
deleted file mode 100644
index 3e81dcc18d1f47b6de087a7a88fd75b308ea51a0..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_video_recorder_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_video_recorder_zero.png b/en/application-dev/media/figures/en-us_image_video_recorder_zero.png
deleted file mode 100644
index a7f7fa09392eb916132d891a84d62f31f0f27782..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_video_recorder_zero.png and /dev/null differ
diff --git a/en/application-dev/media/figures/en-us_image_video_state_machine.png b/en/application-dev/media/figures/en-us_image_video_state_machine.png
deleted file mode 100644
index c0595ed5120b632142d6da8841c9e45277b10f55..0000000000000000000000000000000000000000
Binary files a/en/application-dev/media/figures/en-us_image_video_state_machine.png and /dev/null differ
diff --git a/en/application-dev/media/figures/horizontal-flip.jpeg b/en/application-dev/media/figures/horizontal-flip.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..f43e4f6ab2adc68bf0f90eaf8177d36ee91f32ac
Binary files /dev/null and b/en/application-dev/media/figures/horizontal-flip.jpeg differ
diff --git a/en/application-dev/media/figures/image-development-process.png b/en/application-dev/media/figures/image-development-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..47db9d3faf7f8bffc80f63995dc73d0ad32799e5
Binary files /dev/null and b/en/application-dev/media/figures/image-development-process.png differ
diff --git a/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png b/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png
new file mode 100644
index 0000000000000000000000000000000000000000..a1f404f67bf18d91c2cc42ab65d8c7c5f01518a8
Binary files /dev/null and b/en/application-dev/media/figures/invoking-relationship-recording-stream-mgmt.png differ
diff --git a/en/application-dev/media/figures/local-avsession-interaction-process.png b/en/application-dev/media/figures/local-avsession-interaction-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..dfccf9c6874f26a7e030189191f34248b7230b1a
Binary files /dev/null and b/en/application-dev/media/figures/local-avsession-interaction-process.png differ
diff --git a/en/application-dev/media/figures/media-system-framework.png b/en/application-dev/media/figures/media-system-framework.png
new file mode 100644
index 0000000000000000000000000000000000000000..f1b92795c05db2caa6869acfba865f585a947c19
Binary files /dev/null and b/en/application-dev/media/figures/media-system-framework.png differ
diff --git a/en/application-dev/media/figures/offsets.jpeg b/en/application-dev/media/figures/offsets.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..ab4c87a69bae55a62feddc0ca61a0ef1081bf199
Binary files /dev/null and b/en/application-dev/media/figures/offsets.jpeg differ
diff --git a/en/application-dev/media/figures/original-drawing.jpeg b/en/application-dev/media/figures/original-drawing.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..01a0b0d7022dfc0130029154fec7321bc62dfe36
Binary files /dev/null and b/en/application-dev/media/figures/original-drawing.jpeg differ
diff --git a/en/application-dev/media/figures/photographing-development-process.png b/en/application-dev/media/figures/photographing-development-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..b7ee61acfa63da55ef1389212e090da14a091a68
Binary files /dev/null and b/en/application-dev/media/figures/photographing-development-process.png differ
diff --git a/en/application-dev/media/figures/playback-status-change.png b/en/application-dev/media/figures/playback-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..860764d3d15b93e544a6f27316584963acba2f0f
Binary files /dev/null and b/en/application-dev/media/figures/playback-status-change.png differ
diff --git a/en/application-dev/media/figures/recording-development-process.png b/en/application-dev/media/figures/recording-development-process.png
new file mode 100644
index 0000000000000000000000000000000000000000..c29043a1f8b9255664969b4e0b0a1ca971d4e1f7
Binary files /dev/null and b/en/application-dev/media/figures/recording-development-process.png differ
diff --git a/en/application-dev/media/figures/recording-status-change.png b/en/application-dev/media/figures/recording-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..9f15af9c1992e34fa7d750d08fd0245b6cb3ba67
Binary files /dev/null and b/en/application-dev/media/figures/recording-status-change.png differ
diff --git a/en/application-dev/media/figures/rotate.jpeg b/en/application-dev/media/figures/rotate.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5965abb46dc9648a3dfd9136e7cc0b5c5203e6a7
Binary files /dev/null and b/en/application-dev/media/figures/rotate.jpeg differ
diff --git a/en/application-dev/media/figures/transparency.png b/en/application-dev/media/figures/transparency.png
new file mode 100644
index 0000000000000000000000000000000000000000..b9b43939f0dad8ee40bf0b6b7e40ddf49d141c66
Binary files /dev/null and b/en/application-dev/media/figures/transparency.png differ
diff --git a/en/application-dev/media/figures/vertical-flip.jpeg b/en/application-dev/media/figures/vertical-flip.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..8ef368d6bb914815a90c8d82352cbd6fd9ab505c
Binary files /dev/null and b/en/application-dev/media/figures/vertical-flip.jpeg differ
diff --git a/en/application-dev/media/figures/video-playback-interaction-diagram.png b/en/application-dev/media/figures/video-playback-interaction-diagram.png
new file mode 100644
index 0000000000000000000000000000000000000000..93778e5fd397820e92b03f60a01076f251348ee6
Binary files /dev/null and b/en/application-dev/media/figures/video-playback-interaction-diagram.png differ
diff --git a/en/application-dev/media/figures/video-playback-status-change.png b/en/application-dev/media/figures/video-playback-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..860764d3d15b93e544a6f27316584963acba2f0f
Binary files /dev/null and b/en/application-dev/media/figures/video-playback-status-change.png differ
diff --git a/en/application-dev/media/figures/video-recording-interaction-diagram.png b/en/application-dev/media/figures/video-recording-interaction-diagram.png
new file mode 100644
index 0000000000000000000000000000000000000000..3fbbffe30f5ab06ba0f0a9e6487c76cecd5546c4
Binary files /dev/null and b/en/application-dev/media/figures/video-recording-interaction-diagram.png differ
diff --git a/en/application-dev/media/figures/video-recording-status-change.png b/en/application-dev/media/figures/video-recording-status-change.png
new file mode 100644
index 0000000000000000000000000000000000000000..9f15af9c1992e34fa7d750d08fd0245b6cb3ba67
Binary files /dev/null and b/en/application-dev/media/figures/video-recording-status-change.png differ
diff --git a/en/application-dev/media/figures/zoom.jpeg b/en/application-dev/media/figures/zoom.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..977db6cfbc5b81f5396e4d81f8954a9f7d4168e4
Binary files /dev/null and b/en/application-dev/media/figures/zoom.jpeg differ
diff --git a/en/application-dev/media/image-decoding.md b/en/application-dev/media/image-decoding.md
new file mode 100644
index 0000000000000000000000000000000000000000..00665aa430fb0d2ab95007f29d39b8adc5c5433c
--- /dev/null
+++ b/en/application-dev/media/image-decoding.md
@@ -0,0 +1,143 @@
+# Image Decoding
+
+Image decoding refers to the process of decoding an archived image in a supported format into a [pixel map](image-overview.md) for image display or [processing](image-transformation.md). Currently, the following image formats are supported: JPEG, PNG, GIF, RAW, WebP, BMP, and SVG.
+
+## How to Develop
+
+Read [Image](../reference/apis/js-apis-image.md#imagesource) for APIs related to image decoding.
+
+1. Import the image module.
+
+ ```ts
+ import image from '@ohos.multimedia.image';
+ ```
+
+2. Obtain an image.
+ - Method 1: Obtain the sandbox path. For details about how to obtain the sandbox path, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). For details about the application sandbox and how to push files to the application sandbox, see [File Management](../file-management/app-sandbox-directory.md).
+
+ ```ts
+ // Code on the stage model
+ const context = getContext(this);
+ const filePath = context.cacheDir + '/test.jpg';
+ ```
+
+ ```ts
+ // Code on the FA model
+ import featureAbility from '@ohos.ability.featureAbility';
+
+ const context = featureAbility.getContext();
+ const filePath = context.getCacheDir() + "/test.jpg";
+ ```
+ - Method 2: Obtain the file descriptor of the image through the sandbox path. For details, see [file.fs API Reference] (../reference/apis/js-apis-file-fs.md).
+ To use this method, you must import the \@ohos.file.fs module first.
+
+ ```ts
+ import fs from '@ohos.file.fs';
+ ```
+
+ Then call **fs.openSync()** to obtain the file descriptor.
+
+ ```ts
+ // Code on the stage model
+ const context = getContext(this);
+ const filePath = context.cacheDir + '/test.jpg';
+ const file = fs.openSync(filePath, fs.OpenMode.READ_WRITE);
+ const fd = file?.fd;
+ ```
+
+ ```ts
+ // Code on the FA model
+ import featureAbility from '@ohos.ability.featureAbility';
+
+ const context = featureAbility.getContext();
+ const filePath = context.getCacheDir() + "/test.jpg";
+ const file = fs.openSync(filePath, fs.OpenMode.READ_WRITE);
+ const fd = file?.fd;
+ ```
+ - Method 3: Obtain the array buffer of the resource file through the resource manager. For details, see [ResourceManager API Reference](../reference/apis/js-apis-resource-manager.md#getrawfilecontent9-1).
+
+ ```ts
+ // Code on the stage model
+ const context = getContext(this);
+ // Obtain a resource manager.
+ const resourceMgr = context.resourceManager;
+ ```
+
+ ```ts
+ // Code on the FA model
+ // Import the resourceManager module.
+ import resourceManager from '@ohos.resourceManager';
+ const resourceMgr = await resourceManager.getResourceManager();
+ ```
+
+ The method of obtaining the resource manager varies according to the application model. After obtaining the resource manager, call **resourceMgr.getRawFileContent()** to obtain the array buffer of the resource file.
+
+ ```ts
+ const fileData = await resourceMgr.getRawFileContent('test.jpg');
+ // Obtain the array buffer of the image.
+ const buffer = fileData.buffer;
+ ```
+
+3. Create an **ImageSource** instance.
+ - Method 1: Create an **ImageSource** instance using the sandbox path. The sandbox path can be obtained by using method 1 in step 2.
+
+ ```ts
+ // path indicates the obtained sandbox path.
+ const imageSource = image.createImageSource(filePath);
+ ```
+ - Method 2: Create an **ImageSource** instance using the file descriptor. The file descriptor can be obtained by using method 2 in step 2.
+
+ ```ts
+ // fd is the obtained file descriptor.
+ const imageSource = image.createImageSource(fd);
+ ```
+ - Method 3: Create an **ImageSource** instance using a buffer array. The buffer array can be obtained by using method 3 in step 2.
+
+ ```ts
+ const imageSource = image.createImageSource(buffer);
+ ```
+
+4. Set **DecodingOptions** and decode the image to obtain a pixel map.
+
+ ```ts
+ let decodingOptions = {
+ editable: true,
+ desiredPixelFormat: 3,
+ }
+ // Create a pixel map and perform rotation and scaling on it.
+ const pixelMap = await imageSource.createPixelMap(decodingOptions);
+ ```
+
+ After the decoding is complete and the pixel map is obtained, you can perform subsequent [image processing](image-transformation.md).
+
+## Sample Code - Decoding an Image in Resource Files
+
+1. Obtain a resource manager.
+
+ ```ts
+ const context = getContext(this);
+ // Obtain a resourceManager instance.
+ const resourceMgr = context.resourceManager;
+ ```
+
+2. Obtain the array buffer of the **test.jpg** file in the **rawfile** folder.
+
+ ```ts
+ const fileData = await resourceMgr.getRawFileContent('test.jpg');
+ // Obtain the array buffer of the image.
+ const buffer = fileData.buffer;
+ ```
+
+3. Create an **ImageSource** instance.
+
+ ```ts
+ const imageSource = image.createImageSource(buffer);
+ ```
+
+4. Create a **PixelMap** instance.
+
+ ```ts
+ const pixelMap = await imageSource.createPixelMap();
+ ```
+
+
\ No newline at end of file
diff --git a/en/application-dev/media/image-encoding.md b/en/application-dev/media/image-encoding.md
new file mode 100644
index 0000000000000000000000000000000000000000..96e23b6ba16c63bdaf282dbaf9abc01d95dd6221
--- /dev/null
+++ b/en/application-dev/media/image-encoding.md
@@ -0,0 +1,48 @@
+# Image Encoding
+
+Image encoding refers to the process of encoding a pixel map into an archived image in different formats (only in JPEG and WebP currently) for subsequent processing, such as storage and transmission.
+
+## How to Develop
+
+Read [Image](../reference/apis/js-apis-image.md#imagepacker) for APIs related to image encoding.
+
+1. Create an **ImagePacker** object.
+
+ ```ts
+ // Import the required module.
+ import image from '@ohos.multimedia.image';
+
+ const imagePackerApi = image.createImagePacker();
+ ```
+
+2. Set the encoding output stream and encoding parameters.
+
+ **format** indicates the image encoding format, and **quality** indicates the image quality. The value ranges from 0 to 100, and the value 100 indicates the optimal quality.
+
+ ```ts
+ let packOpts = { format:"image/jpeg", quality:98 };
+ ```
+
+3. [Create a PixelMap object or an ImageSource object](image-decoding.md).
+
+4. Encode the image and save the encoded image.
+
+ Method 1: Use the **PixelMap** object for encoding.
+
+ ```ts
+ imagePackerApi.packing(pixelMap, packOpts).then( data => {
+ // data is the file stream obtained after packing. You can write the file and save it to obtain an image.
+ }).catch(error => {
+ console.error('Failed to pack the image. And the error is: ' + error);
+ })
+ ```
+
+ Method 2: Use the **ImageSource** object for encoding.
+
+ ```ts
+ imagePackerApi.packing(imageSource, packOpts).then( data => {
+ // data is the file stream obtained after packing. You can write the file and save it to obtain an image.
+ }).catch(error => {
+ console.error('Failed to pack the image. And the error is: ' + error);
+ })
+ ```
diff --git a/en/application-dev/media/image-overview.md b/en/application-dev/media/image-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..a88eb049166b845068a67eecec5a613435d124ab
--- /dev/null
+++ b/en/application-dev/media/image-overview.md
@@ -0,0 +1,40 @@
+# Image Overview
+
+Image development is the process of parsing, processing, and constructing image pixel data to achieve the required image effect. Image development mainly involves image decoding, processing, and encoding.
+
+Before image development, be familiar with the following basic concepts:
+
+- Image decoding
+
+ The operation of decoding an archived image in a supported format into a pixel map for image display or processing. Currently, the following image formats are supported: JPEG, PNG, GIF, RAW, WebP, BMP, and SVG.
+
+- Pixel map
+
+ A bitmap that is not compressed after being decoded. It is used for image display or processing.
+
+- Image processing
+
+ A series of operations on the pixel map, such as rotation, scaling, opacity setting, image information obtaining, and pixel data reading and writing.
+
+- Image encoding
+
+ The operation of encoding a pixel map into an archived image in different formats (only in JPEG and WebP currently) for subsequent processing, such as storage and transmission.
+
+The figure below illustrates the image development process.
+
+**Figure 1** Image development process
+
+
+1. Image retrieval: Obtain a raw image through the application sandbox.
+
+2. Instance creation: Create an **ImageSource** instance, which is the source class of decoded images and is used to obtain or modify image information.
+
+3. [Image decoding](image-decoding.md): Decode the image source to generate a pixel map.
+
+4. [Image processing](image-transformation.md): Process the pixel map by modifying the image attributes to implement image rotation, scaling, and cropping, and then use the [Image component](../ui/arkts-graphics-display.md) to display the image.
+
+5. [Image encoding](image-encoding.md): Use the **ImagePacker** class to compress and encode the pixel map or image source to generate a new image.
+
+In addition to the preceding basic image development capabilities, OpenHarmony provides the [image tool](image-tool.md) to ease your development.
+
+
\ No newline at end of file
diff --git a/en/application-dev/media/image-pixelmap-operation.md b/en/application-dev/media/image-pixelmap-operation.md
new file mode 100644
index 0000000000000000000000000000000000000000..d9b17b2c4dc5e5911e921d19a46d1b3066af5100
--- /dev/null
+++ b/en/application-dev/media/image-pixelmap-operation.md
@@ -0,0 +1,60 @@
+# Pixel Map Operation
+
+To process a certain area in an image, you can perform pixel map operations, which are usually used to beautify the image.
+
+As shown in the figure below, the pixel data of a rectangle in an image is read, modified, and then written back to the corresponding area of the original image.
+
+**Figure 1** Pixel map operation
+
+
+## How to Develop
+
+Read [Image](../reference/apis/js-apis-image.md#pixelmap7) for APIs related to pixel map operations.
+
+1. Complete [image decoding](image-decoding.md#how-to-develop) and obtain a **PixelMap** object.
+
+2. Obtain information from the **PixelMap** object.
+
+ ```ts
+ // Obtain the total number of bytes of this pixel map.
+ let pixelBytesNumber = pixelMap.getPixelBytesNumber();
+ // Obtain the number of bytes per row of this pixel map.
+ let rowCount = pixelMap.getBytesNumberPerRow();
+ // Obtain the pixel density of the image. Pixel density refers to the number of pixels per inch of an image. A larger value of the pixel density indicates a finer image.
+ let getDensity = pixelMap.getDensity();
+ ```
+
+3. Read and modify the pixel data of the target area, and write the modified data back to the original image.
+
+ ```ts
+ // Scenario 1: Read the pixel data of the entire image and write the modified data to an array buffer.
+ const readBuffer = new ArrayBuffer(pixelBytesNumber);
+ pixelMap.readPixelsToBuffer(readBuffer).then(() => {
+ console.info('Succeeded in reading image pixel data.');
+ }).catch(error => {
+ console.error('Failed to read image pixel data. And the error is: ' + error);
+ })
+
+ // Scenario 2: Read the pixel data in a specified area and write the modified data to area.pixels.
+ const area = {
+ pixels: new ArrayBuffer(8),
+ offset: 0,
+ stride: 8,
+ region: { size: { height: 1, width: 2 }, x: 0, y: 0 }
+ }
+ pixelMap.readPixels(area).then(() => {
+ console.info('Succeeded in reading the image data in the area.');
+ }).catch(error => {
+ console.error('Failed to read the image data in the area. And the error is: ' + error);
+ })
+
+ // The read image data can be used independently (by creating a pixel map) or modified as required.
+ // Write area.pixels to the specified area.
+ pixelMap.writePixels(area).then(() => {
+ console.info('Succeeded to write pixelMap into the specified area.');
+ })
+
+ // Write the image data result to a pixel map.
+ const writeColor = new ArrayBuffer(96);
+ pixelMap.writeBufferToPixels(writeColor, () => {});
+ ```
diff --git a/en/application-dev/media/image-tool.md b/en/application-dev/media/image-tool.md
new file mode 100644
index 0000000000000000000000000000000000000000..16748ff0b56557005793cdbe2798477995412cdf
--- /dev/null
+++ b/en/application-dev/media/image-tool.md
@@ -0,0 +1,43 @@
+# Image Tool
+
+The image tool provides the capabilities of reading and editing Exchangeable Image File Format (EXIF) data of an image.
+
+EXIF is a file format dedicated for photos taken by digital cameras and is used to record attributes and shooting data of the photos. Currently, the image tool supports images in JPEG format only.
+
+Users may need to view or modify the EXIF data of photos in the Gallery application, for example, when the manual lens parameters of the camera are not automatically written as part of the EXIF data or the shooting time is incorrect due to camera power-off.
+
+Currently, OpenHarmony allows you to view and modify part of EXIF data. For details, see [EIXF](../reference/apis/js-apis-image.md#propertykey7).
+
+## How to Develop
+
+Read [Image](../reference/apis/js-apis-image.md#getimageproperty7) for APIs used to read and edit EXIF data.
+
+1. Obtain the image and create an **ImageSource** object.
+
+ ```ts
+ // Import the required module.
+ import image from '@ohos.multimedia.image';
+
+ // Obtain the sandbox path and create an ImageSource object.
+ const fd =...; //Obtain the file descriptor of the image to be processed.
+ const imageSource = image.createImageSource(fd);
+ ```
+
+2. Read and edit EXIF data.
+
+ ```ts
+ // Read the EXIF data, where BitsPerSample indicates the number of bits per pixel.
+ imageSource.getImageProperty('BitsPerSample', (error, data) => {
+ if (error) {
+ console.error('Failed to get the value of the specified attribute key of the image.And the error is: ' + error);
+ } else {
+ console.info('Succeeded in getting the value of the specified attribute key of the image ' + data);
+ }
+ })
+
+ // Edit the EXIF data.
+ imageSource.modifyImageProperty('ImageWidth', '120').then(() => {
+ const width = imageSource.getImageProperty("ImageWidth");
+ console.info('The new imageWidth is ' + width);
+ })
+ ```
diff --git a/en/application-dev/media/image-transformation.md b/en/application-dev/media/image-transformation.md
new file mode 100644
index 0000000000000000000000000000000000000000..8965d409dda0fa9271feebb34b3b936c4b624bc6
--- /dev/null
+++ b/en/application-dev/media/image-transformation.md
@@ -0,0 +1,93 @@
+# Image Transformation
+
+Image processing refers to a series of operations performed on the pixel map, such as obtaining image information, cropping, scaling, translating, rotating, flipping, setting opacity, and reading and writing pixel data. These operations can be classified into image transformation and [pixel map operation](image-pixelmap-operation.md). This topic describes the image transformation operations that you can perform.
+
+## How to Develop
+
+Read [Image](../reference/apis/js-apis-image.md#pixelmap7) for APIs related to image transformation.
+
+1. Complete [image decoding](image-decoding.md#how-to-develop) and obtain a **PixelMap** object.
+
+2. Obtain image information.
+
+ ```
+ // Obtain the image size.
+ pixelMap.getImageInfo().then( info => {
+ console.info('info.width = ' + info.size.width);
+ console.info('info.height = ' + info.size.height);
+ }).catch((err) => {
+ console.error("Failed to obtain the image pixel map information.And the error is: " + err);
+ });
+ ```
+
+3. Perform image transformation.
+
+ Original image:
+
+ 
+ - Crop the image.
+
+ ```
+ // x: x-axis coordinate of the start point for cropping (0).
+ // y: y-axis coordinate of the start point for cropping (0).
+ // height: height after cropping (400), cropping from top to bottom.
+ // width: width after cropping (400), cropping from left to right.
+ pixelMap.crop({x: 0, y: 0, size: { height: 400, width: 400 } });
+ ```
+
+ 
+
+ - Scale the image.
+
+ ```
+ // The width of the image after scaling is 0.5 of the original width.
+ // The height of the image after scaling is 0.5 of the original height.
+ pixelMap.scale(0.5, 0.5);
+ ```
+
+ 
+
+ - Translate the image.
+
+ ```
+ // Translate the image by 100 units downwards.
+ // Translate the image by 100 units to the right.
+ pixelMap.translate(100, 100);
+ ```
+
+ 
+
+ - Rotate the image.
+
+ ```
+ // Rate the image clockwise by 90°.
+ pixelMap.rotate(90);
+ ```
+
+ 
+
+ - Flip the image.
+
+ ```
+ // Flip the image vertically.
+ pixelMap.flip(false, true);
+ ```
+
+ 
+
+
+ ```
+ // Flip the image horizontally.
+ pixelMap.flip(true, false);
+ ```
+
+ 
+
+ - Set the opacity of the image.
+
+ ```
+ // Set the opacity to 0.5.
+ pixelMap.opacity(0.5);
+ ```
+
+ 
diff --git a/en/application-dev/media/image.md b/en/application-dev/media/image.md
deleted file mode 100644
index fb4e648b56839ef76cb0e5277443605734d7ab6f..0000000000000000000000000000000000000000
--- a/en/application-dev/media/image.md
+++ /dev/null
@@ -1,283 +0,0 @@
-# Image Development
-
-## When to Use
-
-You can use image development APIs to decode images into pixel maps and encode the pixel maps into a supported format.
-
-## Available APIs
-
-For details about the APIs, see [Image Processing](../reference/apis/js-apis-image.md).
-
-## How to Develop
-
-### Full-Process Scenario
-
-The full process includes creating an instance, reading image information, reading and writing pixel maps, updating data, packaging pixels, and releasing resources.
-
-```js
-const color = new ArrayBuffer(96); // Create a buffer to store image pixel data.
-let opts = { alphaType: 0, editable: true, pixelFormat: 4, scaleMode: 1, size: { height: 2, width: 3 } } // Image pixel data.
-
-// Create a PixelMap object.
-image.createPixelMap(color, opts, (err, pixelmap) => {
- console.log('Succeeded in creating pixelmap.');
- // Failed to create the PixelMap object.
- if (err) {
- console.info('create pixelmap failed, err' + err);
- return
- }
-
- // Read pixels.
- const area = {
- pixels: new ArrayBuffer(8),
- offset: 0,
- stride: 8,
- region: { size: { height: 1, width: 2 }, x: 0, y: 0 }
- }
- pixelmap.readPixels(area,() => {
- let bufferArr = new Uint8Array(area.pixels);
- let res = true;
- for (let i = 0; i < bufferArr.length; i++) {
- console.info(' buffer ' + bufferArr[i]);
- if(res) {
- if(bufferArr[i] == 0) {
- res = false;
- console.log('readPixels end.');
- break;
- }
- }
- }
- })
-
- // Store pixels.
- const readBuffer = new ArrayBuffer(96);
- pixelmap.readPixelsToBuffer(readBuffer,() => {
- let bufferArr = new Uint8Array(readBuffer);
- let res = true;
- for (let i = 0; i < bufferArr.length; i++) {
- if(res) {
- if (bufferArr[i] !== 0) {
- res = false;
- console.log('readPixelsToBuffer end.');
- break;
- }
- }
- }
- })
-
- // Write pixels.
- pixelmap.writePixels(area,() => {
- const readArea = { pixels: new ArrayBuffer(20), offset: 0, stride: 8, region: { size: { height: 1, width: 2 }, x: 0, y: 0 }}
- pixelmap.readPixels(readArea,() => {
- let readArr = new Uint8Array(readArea.pixels);
- let res = true;
- for (let i = 0; i < readArr.length; i++) {
- if(res) {
- if (readArr[i] !== 0) {
- res = false;
- console.log('readPixels end.please check buffer');
- break;
- }
- }
- }
- })
- })
-
- const writeColor = new ArrayBuffer(96); // Pixel data of the image.
- // Write pixels to the buffer.
- pixelmap.writeBufferToPixels(writeColor).then(() => {
- const readBuffer = new ArrayBuffer(96);
- pixelmap.readPixelsToBuffer(readBuffer).then (() => {
- let bufferArr = new Uint8Array(readBuffer);
- let res = true;
- for (let i = 0; i < bufferArr.length; i++) {
- if(res) {
- if (bufferArr[i] !== i) {
- res = false;
- console.log('readPixels end.please check buffer');
- break;
- }
- }
- }
- })
- })
-
- // Obtain image information.
- pixelmap.getImageInfo((err, imageInfo) => {
- // Failed to obtain the image information.
- if (err || imageInfo == null) {
- console.info('getImageInfo failed, err' + err);
- return
- }
- if (imageInfo !== null) {
- console.log('Succeeded in getting imageInfo');
- }
- })
-
- // Release the PixelMap object.
- pixelmap.release(()=>{
- console.log('Succeeded in releasing pixelmap');
- })
-})
-
-// Create an image source (uri).
-let path = '/data/local/tmp/test.jpg';
-const imageSourceApi1 = image.createImageSource(path);
-
-// Create an image source (fd).
-let fd = 29;
-const imageSourceApi2 = image.createImageSource(fd);
-
-// Create an image source (data).
-const data = new ArrayBuffer(96);
-const imageSourceApi3 = image.createImageSource(data);
-
-// Release the image source.
-imageSourceApi3.release(() => {
- console.log('Succeeded in releasing imagesource');
-})
-
-// Encode the image.
-const imagePackerApi = image.createImagePacker();
-const imageSourceApi = image.createImageSource(0);
-let packOpts = { format:"image/jpeg", quality:98 };
-imagePackerApi.packing(imageSourceApi, packOpts, (err, data) => {
- if (err) {
- console.info('packing from imagePackerApi failed, err' + err);
- return
- }
- console.log('Succeeded in packing');
-})
-
-// Release the ImagePacker object.
-imagePackerApi.release();
-```
-
-### Decoding Scenario
-
-```js
-let path = '/data/local/tmp/test.jpg'; // Set the path for creating an image source.
-
-// Create an image source using a path.
-const imageSourceApi = image.createImageSource(path); // '/data/local/tmp/test.jpg'
-
-// Set parameters.
-let decodingOptions = {
- sampleSize:1, // Sampling size of the thumbnail.
- editable: true, // Whether the image can be edited.
- desiredSize:{ width:1, height:2}, // Desired output size of the image.
- rotateDegrees:10, // Rotation angle of the image.
- desiredPixelFormat:2, // Decoded pixel format.
- desiredRegion: { size: { height: 1, width: 2 }, x: 0, y: 0 }, // Region of the image to decode.
- index:0// Image sequence number.
- };
-
-// Create a pixel map in callback mode.
-imageSourceApi.createPixelMap(decodingOptions, (err, pixelmap) => {
- // Failed to create the PixelMap object.
- if (err) {
- console.info('create pixelmap failed, err' + err);
- return
- }
- console.log('Succeeded in creating pixelmap.');
-})
-
-// Create a pixel map in promise mode.
-imageSourceApi.createPixelMap().then(pixelmap => {
- console.log('Succeeded in creating pixelmap.');
-
- // Obtain the number of bytes in each line of pixels.
- let num = pixelmap.getBytesNumberPerRow();
-
- // Obtain the total number of pixel bytes.
- let pixelSize = pixelmap.getPixelBytesNumber();
-
- // Obtain the pixel map information.
- pixelmap.getImageInfo().then( imageInfo => {});
-
- // Release the PixelMap object.
- pixelmap.release(()=>{
- console.log('Succeeded in releasing pixelmap');
- })
-}).catch(error => {
- console.log('Failed in creating pixelmap.' + error);
-})
-```
-
-### Encoding Scenario
-
-```js
-let path = '/data/local/tmp/test.png' // Set the path for creating an image source.
-
-// Set the image source.
-const imageSourceApi = image.createImageSource(path); // '/data/local/tmp/test.png'
-
-// Print the error message if the image source fails to be created.
-if (imageSourceApi == null) {
- console.log('Failed in creating imageSource.');
-}
-
-// Create an image packer if the image source is successfully created.
-const imagePackerApi = image.createImagePacker();
-
-// Print the error information if the image packer fails to be created.
-if (imagePackerApi == null) {
- console.log('Failed in creating imagePacker.');
-}
-
-// Set encoding parameters if the image packer is successfully created.
-let packOpts = { format:"image/jpeg", // The supported encoding format is jpg.
- quality:98 } // Image quality, which ranges from 0 to 100.
-
-// Encode the image.
-imagePackerApi.packing(imageSourceApi, packOpts)
-.then( data => {
- console.log('Succeeded in packing');
-})
-
-// Release the image packer after the encoding is complete.
-imagePackerApi.release();
-
-// Obtain the image source information.
-imageSourceApi.getImageInfo((err, imageInfo) => {
- console.log('Succeeded in getting imageInfo');
-})
-
-const array = new ArrayBuffer(100); // Incremental data.
-// Update incremental data.
-imageSourceApi.updateData(array, false, 0, 10,(error, data)=> {})
-
-```
-
-### Using ImageReceiver
-
-Example scenario: The camera functions as the client to transmit image data to the server.
-
-```js
-public async init(surfaceId: any) {
-
- // (Server code) Create an ImageReceiver object.
- let receiver = image.createImageReceiver(8 * 1024, 8, image.ImageFormat.JPEG, 1);
-
- // Obtain the surface ID.
- receiver.getReceivingSurfaceId((err, surfaceId) => {
- // Failed to obtain the surface ID.
- if (err) {
- console.info('getReceivingSurfaceId failed, err' + err);
- return
- }
- console.info("receiver getReceivingSurfaceId success");
- });
- // Register a surface listener, which is triggered after the buffer of the surface is ready.
- receiver.on('imageArrival', () => {
- // Obtain the latest buffer of the surface.
- receiver.readNextImage((err, img) => {
- img.getComponent(4, (err, component) => {
- // Consume component.byteBuffer. For example, save the content in the buffer as an image.
- })
- })
- })
-
- // Call a Camera API to transfer the surface ID to the camera, which then obtains the surface based on the surface ID and generates a surface buffer.
-}
-```
diff --git a/en/application-dev/media/local-avsession-overview.md b/en/application-dev/media/local-avsession-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..2ced0a180e3bed3a1adea4e4b3ff196721bc23a8
--- /dev/null
+++ b/en/application-dev/media/local-avsession-overview.md
@@ -0,0 +1,63 @@
+# Local AVSession Overview
+
+## Interaction Process
+
+For a local AVSession, the data sources are on the local device. The figure below illustrates the interaction process.
+
+
+
+This process involves two roles: provider and controller.
+
+In the local AVSession, the provider exchanges information with the controller through AVSessionManager.
+
+1. The provider creates an **AVSession** object through AVSessionManager.
+
+2. Through the **AVSession** object, the provider sets session metadata (such as the asset ID, title, and duration) and playback attributes (such as the playback state, speed, and position).
+
+3. The controller creates an **AVSessionController** object through AVSessionManager.
+
+4. Through the **AVSessionController** object, the controller listens for changes of the session metadata and playback attributes.
+
+5. Through the **AVSessionController** object, the controller sends control commands to the **AVSession** object.
+
+6. Through the **AVSession** object, the provider listens for the control commands, for example, play, playNext, fastForward, and setSpeed, from the controller.
+
+## AVSessionManager
+
+AVSessionManager provides the capability of managing sessions. It can create an **AVSession** object, create an **AVSessionController** object, send control commands, and listen for session state changes.
+
+Unlike the **AVSession** and **AVSessionController** objects, AVSessionManager is not a specific object, but the root namespace of AVSessions. You can import AVSessionManager as follows:
+
+```ts
+import AVSessionManager from '@ohos.multimedia.avsession';
+```
+
+All the APIs in the root namespace can be used as APIs of AVSessionManager.
+
+The code snippet below shows how the provider creates an **AVSession** object by using AVSessionManager:
+
+```ts
+// Create an AVSession object.
+async createSession() {
+ let session: AVSessionManager.AVSession = await AVSessionManager.createAVSession(this.context, 'SESSION_NAME', 'audio');
+ console.info(`session create done : sessionId : ${session.sessionId}`);
+}
+```
+
+The code snippet below shows how the controller creates an **AVSessionController** object by using AVSessionManager:
+
+```ts
+// Create an AVSessionController object.
+async createController() {
+ // Obtain the descriptors of all live AVSession objects.
+ let descriptorsArray: Array> = await AVSessionManager.getAllSessionDescriptors();
+ if (descriptorsArray.length > 0) {
+ // For demonstration, the session ID of the first descriptor is used to create the AVSessionController object.
+ let sessionId: string = descriptorsArray[0].sessionId;
+ let avSessionController: AVSessionManager.AVSessionController = await AVSessionManager.createController(sessionId);
+ console.info(`controller create done : sessionId : ${avSessionController.sessionId}`);
+ }
+}
+```
+
+For more information about AVSessionManager APIs, see [API Reference](../reference/apis/js-apis-avsession.md).
diff --git a/en/application-dev/media/media-application-overview.md b/en/application-dev/media/media-application-overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..d350482e61e7bc9659054b0426c10ce07da88045
--- /dev/null
+++ b/en/application-dev/media/media-application-overview.md
@@ -0,0 +1,19 @@
+# Media Application Development Overview
+
+## Multimedia Subsystem Architecture
+
+The multimedia subsystem provides the capability of processing users' visual and auditory information. For example, it can be used to collect, compress, store, decompress, and play audio and video information. Based on the type of media information to process, the media system is usually divided into four modules: audio, media, camera, and image.
+
+As shown in the figure below, the multimedia subsystem provides APIs for developing audio/video, camera, and gallery applications, and provides adaptation and acceleration for different hardware chips. In the middle part, it provides core media functionalities and management mechanisms in the form of services.
+
+**Figure 1** Overall framework of the multimedia subsystem
+
+
+
+- Audio module: provides interfaces and services for volume management, audio route management, and audio mixing management.
+
+- Media module: provides interfaces and services for audio and video decompression, playback, compression, and recording.
+
+- Camera module: provides interfaces and services for accurately controlling camera lenses and collecting visual information.
+
+- Image module: provides interfaces and services for image encoding, decoding, and processing.
diff --git a/en/application-dev/media/mic-management.md b/en/application-dev/media/mic-management.md
new file mode 100644
index 0000000000000000000000000000000000000000..952aeef3f3c607d3a2132eb6d1e0ab6bdd4490c9
--- /dev/null
+++ b/en/application-dev/media/mic-management.md
@@ -0,0 +1,114 @@
+# Microphone Management
+
+The microphone is used to record audio data. To deliver an optimal recording effect, you are advised to query the microphone state before starting recording and listen for state changes during recording.
+
+If the user mutes the microphone during audio recording, the recording process is normal, the size of the recorded file increases with the recording duration, but the data volume written into the file is 0.
+
+## How to Develop
+
+The **AudioVolumeGroupManager** class provides APIs for managing the microphone state. For details, see [API Reference](../reference/apis/js-apis-audio.md#audiovolumegroupmanager9).
+
+1. Create an **audioVolumeGroupManager** object.
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+
+ let audioVolumeGroupManager;
+ async function loadVolumeGroupManager() { // Create an audioVolumeGroupManager object.
+ const groupid = audio.DEFAULT_VOLUME_GROUP_ID;
+ audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid);
+ console.info('audioVolumeGroupManager create success.');
+ }
+ ```
+
+2. Call **on('micStateChange')** to listen for microphone state changes. When the microphone state changes, the application will be notified of the change.
+
+ Currently, when multiple **AudioManager** instances are used in a single process, only the subscription of the last instance takes effect, and the subscription of other instances is overwritten (even if the last instance does not initiate a subscription). Therefore, you are advised to use a single **AudioManager** instance.
+
+
+ ```ts
+ async function on() { // Subscribe to microphone state changes.
+ audioVolumeGroupManager.on('micStateChange', (micStateChange) => {
+ console.info(`Current microphone status is: ${micStateChange.mute} `);
+ });
+ }
+ ```
+
+3. Call **isMicrophoneMute** to check whether the microphone is muted. If the returned value is **true**, the microphone is muted; otherwise, the microphone is not muted.
+
+ ```ts
+ async function isMicrophoneMute() { // Check whether the microphone is muted.
+ await audioVolumeGroupManager.isMicrophoneMute().then((value) => {
+ console.info(`isMicrophoneMute is: ${value}.`);
+ });
+ }
+ ```
+
+4. Call **setMicrophoneMute** to mute or unmute the microphone. To mute the microphone, pass in **true**. To unmute the microphone, pass in **false**.
+
+ ```ts
+ async function setMicrophoneMuteTrue() { // Pass in true to mute the microphone.
+ await audioVolumeGroupManager.setMicrophoneMute(true).then(() => {
+ console.info('setMicrophoneMute to mute.');
+ });
+ }
+ async function setMicrophoneMuteFalse() { // Pass in false to unmute the microphone.
+ await audioVolumeGroupManager.setMicrophoneMute(false).then(() => {
+ console.info('setMicrophoneMute to not mute.');
+ });
+ }
+ ```
+
+## Sample Code
+
+Refer to the sample code below to complete the process of muting and unmuting the microphone.
+
+```ts
+import audio from '@ohos.multimedia.audio';
+
+@Entry
+@Component
+struct AudioVolumeGroup {
+ private audioVolumeGroupManager: audio.AudioVolumeGroupManager;
+
+ async loadVolumeGroupManager() {
+ const groupid = audio.DEFAULT_VOLUME_GROUP_ID;
+ this.audioVolumeGroupManager = await audio.getAudioManager().getVolumeManager().getVolumeGroupManager(groupid);
+ console.info('audioVolumeGroupManager------create-------success.');
+ }
+
+ async on() { // Subscribe to microphone state changes.
+ await this.loadVolumeGroupManager();
+ this.audioVolumeGroupManager.on('micStateChange', (micStateChange) => {
+ console.info(`Current microphone status is: ${micStateChange.mute} `);
+ });
+ }
+ async isMicrophoneMute() { // Check whether the microphone is muted.
+ await this.audioVolumeGroupManager.isMicrophoneMute().then((value) => {
+ console.info(`isMicrophoneMute is: ${value}.`);
+ });
+ }
+ async setMicrophoneMuteTrue() { // Mute the microphone.
+ await this.loadVolumeGroupManager();
+ await this.audioVolumeGroupManager.setMicrophoneMute(true).then(() => {
+ console.info('setMicrophoneMute to mute.');
+ });
+ }
+ async setMicrophoneMuteFalse() { // Unmute the microphone.
+ await this.loadVolumeGroupManager();
+ await this.audioVolumeGroupManager.setMicrophoneMute(false).then(() => {
+ console.info('setMicrophoneMute to not mute.');
+ });
+ }
+ async test(){
+ await this.on();
+ await this.isMicrophoneMute();
+ await this.setMicrophoneMuteTrue();
+ await this.isMicrophoneMute();
+ await this.setMicrophoneMuteFalse();
+ await this.isMicrophoneMute();
+ await this.setMicrophoneMuteTrue();
+ await this.isMicrophoneMute();
+ }
+}
+```
diff --git a/en/application-dev/media/opensles-capture.md b/en/application-dev/media/opensles-capture.md
deleted file mode 100644
index 4e1775e178db20e01f15ee7a9b8f685a5c47b07b..0000000000000000000000000000000000000000
--- a/en/application-dev/media/opensles-capture.md
+++ /dev/null
@@ -1,151 +0,0 @@
-# OpenSL ES Audio Recording Development
-
-## Introduction
-
-You can use OpenSL ES to develop the audio recording function in OpenHarmony. Currently, only some [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) are implemented. If an API that has not been implemented is called, **SL_RESULT_FEATURE_UNSUPPORTED** will be returned.
-
-## How to Develop
-
-To use OpenSL ES to develop the audio recording function in OpenHarmony, perform the following steps:
-
-1. Add the header files.
-
- ```c++
- #include
- #include
- #include
- ```
-
-2. Use the **slCreateEngine** API to create and instantiate the **engine** instance.
-
- ```c++
- SLObjectItf engineObject = nullptr;
- slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
- (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
- ```
-
-3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** interface.
-
- ```c++
- SLEngineItf engineItf = nullptr;
- result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineItf);
- ```
-
-4. Configure the recorder information (including the input source **audiosource** and output source **audiosink**), and create a **pcmCapturerObject** instance.
-
- ```c++
- SLDataLocator_IODevice io_device = {
- SL_DATALOCATOR_IODEVICE,
- SL_IODEVICE_AUDIOINPUT,
- SL_DEFAULTDEVICEID_AUDIOINPUT,
- NULL
- };
-
- SLDataSource audioSource = {
- &io_device,
- NULL
- };
-
- SLDataLocator_BufferQueue buffer_queue = {
- SL_DATALOCATOR_BUFFERQUEUE,
- 3
- };
-
- // Configure the parameters based on the audio file format.
- SLDataFormat_PCM format_pcm = {
- SL_DATAFORMAT_PCM,
- OHOS::AudioStandard::AudioChannel::MONO,
- OHOS::AudioStandard::AudioSamplingRate::SAMPLE_RATE_44100,
- OHOS::AudioStandard::AudioSampleFormat::SAMPLE_S16LE,
- 0,
- 0,
- 0
- };
-
- SLDataSink audioSink = {
- &buffer_queue,
- &format_pcm
- };
-
- SLObjectItf pcmCapturerObject = nullptr;
- result = (*engineItf)->CreateAudioRecorder(engineItf, &pcmCapturerObject,
- &audioSource, &audioSink, 0, nullptr, nullptr);
- (*pcmCapturerObject)->Realize(pcmCapturerObject, SL_BOOLEAN_FALSE);
- ```
-
-5. Obtain the **recordItf** instance of the **SL_IID_RECORD** interface.
-
- ```c++
- SLRecordItf recordItf;
- (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_RECORD, &recordItf);
- ```
-
-6. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** interface.
-
- ```c++
- SLOHBufferQueueItf bufferQueueItf;
- (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf);
- ```
-
-7. Register the **BufferQueueCallback** function.
-
- ```c++
- static void BufferQueueCallback(SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size)
- {
- AUDIO_INFO_LOG("BufferQueueCallback");
- FILE *wavFile = (FILE *)pContext;
- if (wavFile != nullptr) {
- SLuint8 *buffer = nullptr;
- SLuint32 pSize = 0;
- (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize);
- if (buffer != nullptr) {
- fwrite(buffer, 1, pSize, wavFile);
- (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size);
- }
- }
-
- return;
- }
-
- // Set wavFile_ to the descriptor of the file to be recorded.
- (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, wavFile_);
- ```
-
-8. Start audio recording.
-
- ```c++
- static void CaptureStart(SLRecordItf recordItf, SLOHBufferQueueItf bufferQueueItf, FILE *wavFile)
- {
- AUDIO_INFO_LOG("CaptureStart");
- (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_RECORDING);
- if (wavFile != nullptr) {
- SLuint8* buffer = nullptr;
- SLuint32 pSize = 0;
- (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize);
- if (buffer != nullptr) {
- AUDIO_INFO_LOG("CaptureStart, enqueue buffer length: %{public}lu.", pSize);
- fwrite(buffer, 1, pSize, wavFile);
- (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, pSize);
- } else {
- AUDIO_INFO_LOG("CaptureStart, buffer is null or pSize: %{public}lu.", pSize);
- }
- }
-
- return;
- }
- ```
-
-9. Stop audio recording.
-
- ```c++
- static void CaptureStop(SLRecordItf recordItf)
- {
- AUDIO_INFO_LOG("Enter CaptureStop");
- fflush(wavFile_);
- (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_STOPPED);
- (*pcmCapturerObject)->Destroy(pcmCapturerObject);
- fclose(wavFile_);
- wavFile_ = nullptr;
- return;
- }
- ```
diff --git a/en/application-dev/media/opensles-playback.md b/en/application-dev/media/opensles-playback.md
deleted file mode 100644
index fe89bc9553da3163e1e18ca43922ff99e13c1307..0000000000000000000000000000000000000000
--- a/en/application-dev/media/opensles-playback.md
+++ /dev/null
@@ -1,104 +0,0 @@
-# OpenSL ES Audio Playback Development
-
-## Introduction
-
-You can use OpenSL ES to develop the audio playback function in OpenHarmony. Currently, only some [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) are implemented. If an API that has not been implemented is called, **SL_RESULT_FEATURE_UNSUPPORTED** will be returned.
-
-## How to Develop
-
-To use OpenSL ES to develop the audio playback function in OpenHarmony, perform the following steps:
-
-1. Add the header files.
-
- ```c++
- #include
- #include
- #include
- ```
-
-2. Use the **slCreateEngine** API to obtain an **engine** instance.
-
- ```c++
- SLObjectItf engineObject = nullptr;
- slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
- (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
- ```
-
-3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** interface.
-
- ```c++
- SLEngineItf engineEngine = nullptr;
- (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
- ```
-
-4. Configure the player and create an **AudioPlayer** instance.
-
- ```c++
- SLDataLocator_BufferQueue slBufferQueue = {
- SL_DATALOCATOR_BUFFERQUEUE,
- 0
- };
-
- // Configure the parameters based on the audio file format.
- SLDataFormat_PCM pcmFormat = {
- SL_DATAFORMAT_PCM,
- 2,
- 48000,
- 16,
- 0,
- 0,
- 0
- };
- SLDataSource slSource = {&slBufferQueue, &pcmFormat};
-
- SLObjectItf pcmPlayerObject = nullptr;
- (*engineEngine)->CreateAudioPlayer(engineEngine, &pcmPlayerObject, &slSource, null, 0, nullptr, nullptr);
- (*pcmPlayerObject)->Realize(pcmPlayerObject, SL_BOOLEAN_FALSE);
- ```
-
-5. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** interface.
-
- ```c++
- SLOHBufferQueueItf bufferQueueItf;
- (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf);
- ```
-
-6. Open an audio file and register the **BufferQueueCallback** function.
-
- ```c++
- FILE *wavFile_ = nullptr;
-
- static void BufferQueueCallback (SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size)
- {
- FILE *wavFile = (FILE *)pContext;
- if (!feof(wavFile)) {
- SLuint8 *buffer = nullptr;
- SLuint32 pSize = 0;
- (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, pSize);
- // Read data from the file.
- fread(buffer, 1, size, wavFile);
- (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size);
- }
- return;
- }
-
- // Set wavFile_ to the descriptor of the file to be played.
- wavFile_ = fopen(path, "rb");
- (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, wavFile_);
- ```
-
-7. Obtain the **playItf** instance of the **SL_PLAYSTATE_PLAYING** interface and start playback.
-
- ```c++
- SLPlayItf playItf = nullptr;
- (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_PLAY, &playItf);
- (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING);
- ```
-
-8. Stop audio playback.
-
- ```c++
- (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED);
- (*pcmPlayerObject)->Destroy(pcmPlayerObject);
- (*engineObject)->Destroy(engineObject);
- ```
diff --git a/en/application-dev/media/remote-camera.md b/en/application-dev/media/remote-camera.md
deleted file mode 100644
index d7bf710279c1504cd9703eca9af7cf5433cb3dac..0000000000000000000000000000000000000000
--- a/en/application-dev/media/remote-camera.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Distributed Camera Development
-
-## When to Use
-
-You can call the APIs provided by the **Camera** module to develop a distributed camera that provides the basic camera functions such as shooting and video recording.
-
-## How to Develop
-Connect your calculator to a distributed device. Your calculator will call **getSupportedCameras()** to obtain the camera list and traverse the returned camera list to check **ConnectionType** of the **Camera** objects. If **ConnectionType** of a **Camera** object is **CAMERA_CONNECTION_REMOTE**, your calculator will use this object to create a **cameraInput** object. The subsequent call process is the same as that of the local camera development. For details about the local camera development, see [Camera Development](./camera.md).
-
-For details about the APIs, see [Camera Management](../reference/apis/js-apis-camera.md).
-
-### Connecting to a Distributed Camera
-
-Connect the calculator and the distributed device to the same LAN.
-
-Open the calculator and click the arrow icon in the upper right corner. A new window is displayed. Enter the verification code as prompted, and the calculator will be connected to the distributed device.
-
-### Creating an Instance
-
-```js
-import camera from '@ohos.multimedia.camera'
-import image from '@ohos.multimedia.image'
-import media from '@ohos.multimedia.media'
-import featureAbility from '@ohos.ability.featureAbility'
-
-// Create a CameraManager object.
-let cameraManager = camera.getCameraManager(globalThis.Context)
-if (!cameraManager) {
- console.error("camera.getCameraManager error")
- return;
-}
-
-// Register a callback to listen for camera status changes and obtain the updated camera status information.
-cameraManager.on('cameraStatus', (cameraStatusInfo) => {
- console.log('camera : ' + cameraStatusInfo.camera.cameraId);
- console.log('status: ' + cameraStatusInfo.status);
-})
-
-// Obtain the camera list.
-let remoteCamera
-let cameraArray = cameraManager.getSupportedCameras();
-if (cameraArray.length <= 0) {
- console.error("cameraManager.getSupportedCameras error")
- return;
-}
-
-for(let cameraIndex = 0; cameraIndex < cameraArray.length; cameraIndex++) {
- console.log('cameraId : ' + cameraArray[cameraIndex].cameraId) // Obtain the camera ID.
- console.log('cameraPosition : ' + cameraArray[cameraIndex].cameraPosition) // Obtain the camera position.
- console.log('cameraType : ' + cameraArray[cameraIndex].cameraType) // Obtain the camera type.
- console.log('connectionType : ' + cameraArray[cameraIndex].connectionType) // Obtain the camera connection type.
- if (cameraArray[cameraIndex].connectionType == CAMERA_CONNECTION_REMOTE) {
- remoteCamera = cameraArray[cameraIndex]
- }
-}
-
-// Create a camera input stream.
-let cameraInput
-try {
- cameraInput = cameraManager.createCameraInput(remoteCamera);
-} catch () {
- console.error('Failed to createCameraInput errorCode = ' + error.code);
-}
-```
-For details about the subsequent steps, see [Camera Development](./camera.md).
diff --git a/en/application-dev/media/using-audiocapturer-for-recording.md b/en/application-dev/media/using-audiocapturer-for-recording.md
new file mode 100644
index 0000000000000000000000000000000000000000..87d13fa3f749cb18ba1c9d61843b750a36a1bcad
--- /dev/null
+++ b/en/application-dev/media/using-audiocapturer-for-recording.md
@@ -0,0 +1,211 @@
+# Using AudioCapturer for Audio Recording
+
+The AudioCapturer is used to record Pulse Code Modulation (PCM) audio data. It is suitable if you have extensive audio development experience and want to implement more flexible recording features.
+
+## Development Guidelines
+
+The full recording process involves creating an **AudioCapturer** instance, configuring audio recording parameters, starting and stopping recording, and releasing the instance. In this topic, you will learn how to use the AudioCapturer to recording audio data. Before the development, you are advised to read [AudioCapturer](../reference/apis/js-apis-audio.md#audiocapturer8) for the API reference.
+
+The figure below shows the state changes of the AudioCapturer. After an **AudioCapturer** instance is created, different APIs can be called to switch the AudioCapturer to different states and trigger the required behavior. If an API is called when the AudioCapturer is not in the given state, the system may throw an exception or generate other undefined behavior. Therefore, you are advised to check the AudioCapturer state before triggering state transition.
+
+**Figure 1** AudioCapturer state transition
+
+
+You can call **on('stateChange')** to listen for state changes. For details about each state, see [AudioState](../reference/apis/js-apis-audio.md#audiostate8).
+
+### How to Develop
+
+1. Set audio recording parameters and create an **AudioCapturer** instance. For details about the parameters, see [AudioCapturerOptions](../reference/apis/js-apis-audio.md#audiocaptureroptions8).
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+
+ let audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
+ channels: audio.AudioChannel.CHANNEL_2,
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
+ };
+
+ let audioCapturerInfo = {
+ source: audio.SourceType.SOURCE_TYPE_MIC,
+ capturerFlags: 0
+ };
+
+ let audioCapturerOptions = {
+ streamInfo: audioStreamInfo,
+ capturerInfo: audioCapturerInfo
+ };
+
+ audio.createAudioCapturer(audioCapturerOptions, (err, data) => {
+ if (err) {
+ console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Invoke createAudioCapturer succeeded.');
+ let audioCapturer = data;
+ }
+ });
+ ```
+
+2. Call **start()** to switch the AudioCapturer to the **running** state and start recording.
+
+ ```ts
+ audioCapturer.start((err) => {
+ if (err) {
+ console.error(`Capturer start failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Capturer start success.');
+ }
+ });
+ ```
+
+3. Specify the recording file path and call **read()** to read the data in the buffer.
+
+ ```ts
+ let file = fs.openSync(path, 0o2 | 0o100);
+ let bufferSize = await audioCapturer.getBufferSize();
+ let buffer = await audioCapturer.read(bufferSize, true);
+ fs.writeSync(file.fd, buffer);
+ ```
+
+4. Call **stop()** to stop recording.
+
+ ```ts
+ audioCapturer.stop((err) => {
+ if (err) {
+ console.error(`Capturer stop failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Capturer stopped.');
+ }
+ });
+ ```
+
+5. Call **release()** to release the instance.
+
+ ```ts
+ audioCapturer.release((err) => {
+ if (err) {
+ console.error(`capturer release failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('capturer released.');
+ }
+ });
+ ```
+
+
+### Sample Code
+
+Refer to the sample code below to record audio using AudioCapturer.
+
+```ts
+import audio from '@ohos.multimedia.audio';
+import fs from '@ohos.file.fs';
+
+const TAG = 'AudioCapturerDemo';
+
+export default class AudioCapturerDemo {
+ private audioCapturer = undefined;
+ private audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
+ channels: audio.AudioChannel.CHANNEL_1,
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
+ }
+ private audioCapturerInfo = {
+ source: audio.SourceType.SOURCE_TYPE_MIC, // Audio source type.
+ capturerFlags: 0 // Flag indicating an AudioCapturer.
+ }
+ private audioCapturerOptions = {
+ streamInfo: this.audioStreamInfo,
+ capturerInfo: this.audioCapturerInfo
+ }
+
+ // Create an AudioCapturer instance, and set the events to listen for.
+ init() {
+ audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // Create an AudioCapturer instance.
+ if (err) {
+ console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
+ return;
+ }
+
+ console.info(`${TAG}: create AudioCapturer success`);
+ this.audioCapturer = capturer;
+ this.audioCapturer.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of captured frames reaches 1000.
+ if (position === 1000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+ this.audioCapturer.on('periodReach', 2000, (position) => { // Subscribe to the periodReach event. A callback is triggered when the number of captured frames reaches 2000.
+ if (position === 2000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+
+ });
+ }
+
+ // Start audio recording.
+ async start() {
+ let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
+ if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // Recording can be started only when the AudioCapturer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state.
+ console.error(`${TAG}: start failed`);
+ return;
+ }
+ await this.audioCapturer.start(); // Start recording.
+
+ let context = getContext(this);
+ const path = context.filesDir + '/test.wav'; // Path for storing the recorded audio file.
+
+ let file = fs.openSync(path, 0o2 | 0o100); // Create the file if it does not exist.
+ let fd = file.fd;
+ let numBuffersToCapture = 150; // Write data for 150 times.
+ let count = 0;
+ while (numBuffersToCapture) {
+ let bufferSize = await this.audioCapturer.getBufferSize();
+ let buffer = await this.audioCapturer.read(bufferSize, true);
+ let options = {
+ offset: count * bufferSize,
+ length: bufferSize
+ };
+ if (buffer === undefined) {
+ console.error(`${TAG}: read buffer failed`);
+ } else {
+ let number = fs.writeSync(fd, buffer, options);
+ console.info(`${TAG}: write date: ${number}`);
+ }
+ numBuffersToCapture--;
+ count++;
+ }
+ }
+
+ // Stop recording.
+ async stop() {
+ // The AudioCapturer can be stopped only when it is in the STATE_RUNNING or STATE_PAUSED state.
+ if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) {
+ console.info('Capturer is not running or paused');
+ return;
+ }
+ await this.audioCapturer.stop(); // Stop recording.
+ if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) {
+ console.info('Capturer stopped');
+ } else {
+ console.error('Capturer stop failed');
+ }
+ }
+
+ // Release the instance.
+ async release() {
+ // The AudioCapturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state.
+ if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) {
+ console.info('Capturer already released');
+ return;
+ }
+ await this.audioCapturer.release(); // Release the instance.
+ if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) {
+ console.info('Capturer released');
+ } else {
+ console.error('Capturer release failed');
+ }
+ }
+}
+```
diff --git a/en/application-dev/media/using-audiorenderer-for-playback.md b/en/application-dev/media/using-audiorenderer-for-playback.md
new file mode 100644
index 0000000000000000000000000000000000000000..11934e669813fa7a89ceef43bd2c3795db6bad75
--- /dev/null
+++ b/en/application-dev/media/using-audiorenderer-for-playback.md
@@ -0,0 +1,268 @@
+# Using AudioRenderer for Audio Playback
+
+The AudioRenderer is used to play Pulse Code Modulation (PCM) audio data. Unlike the AVPlayer, the AudioRenderer can perform data preprocessing before audio input. Therefore, the AudioRenderer is more suitable if you have extensive audio development experience and want to implement more flexible playback features.
+
+## Development Guidelines
+
+The full rendering process involves creating an **AudioRenderer** instance, configuring audio rendering parameters, starting and stopping rendering, and releasing the instance. In this topic, you will learn how to use the AudioRenderer to render audio data. Before the development, you are advised to read [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8) for the API reference.
+
+The figure below shows the state changes of the AudioRenderer. After an **AudioRenderer** instance is created, different APIs can be called to switch the AudioRenderer to different states and trigger the required behavior. If an API is called when the AudioRenderer is not in the given state, the system may throw an exception or generate other undefined behavior. Therefore, you are advised to check the AudioRenderer state before triggering state transition.
+
+To prevent the UI thread from being blocked, most **AudioRenderer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the callback functions.
+
+**Figure 1** AudioRenderer state transition
+
+
+
+During application development, you are advised to use **on('stateChange')** to subscribe to state changes of the AudioRenderer. This is because some operations can be performed only when the AudioRenderer is in a given state. If the application performs an operation when the AudioRenderer is not in the given state, the system may throw an exception or generate other undefined behavior.
+
+- **prepared**: The AudioRenderer enters this state by calling **createAudioRenderer()**.
+
+- **running**: The AudioRenderer enters this state by calling **start()** when it is in the **prepared**, **paused**, or **stopped** state.
+
+- **paused**: The AudioRenderer enters this state by calling **pause()** when it is in the **running** state. When the audio playback is paused, it can call **start()** to resume the playback.
+
+- **stopped**: The AudioRenderer enters this state by calling **stop()** when it is in the **paused** or **running** state
+
+- **released**: The AudioRenderer enters this state by calling **release()** when it is in the **prepared**, **paused**, or **stopped** state. In this state, the AudioRenderer releases all occupied hardware and software resources and will not transit to any other state.
+
+### How to Develop
+
+1. Set audio rendering parameters and create an **AudioRenderer** instance. For details about the parameters, see [AudioRendererOptions](../reference/apis/js-apis-audio.md#audiorendereroptions8).
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+
+ let audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100,
+ channels: audio.AudioChannel.CHANNEL_1,
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
+ };
+
+ let audioRendererInfo = {
+ content: audio.ContentType.CONTENT_TYPE_SPEECH,
+ usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION,
+ rendererFlags: 0
+ };
+
+ let audioRendererOptions = {
+ streamInfo: audioStreamInfo,
+ rendererInfo: audioRendererInfo
+ };
+
+ audio.createAudioRenderer(audioRendererOptions, (err, data) => {
+ if (err) {
+ console.error(`Invoke createAudioRenderer failed, code is ${err.code}, message is ${err.message}`);
+ return;
+ } else {
+ console.info('Invoke createAudioRenderer succeeded.');
+ let audioRenderer = data;
+ }
+ });
+ ```
+
+2. Call **start()** to switch the AudioRenderer to the **running** state and start rendering.
+
+ ```ts
+ audioRenderer.start((err) => {
+ if (err) {
+ console.error(`Renderer start failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Renderer start success.');
+ }
+ });
+ ```
+
+3. Specify the address of the file to render. Open the file and call **write()** to continuously write audio data to the buffer for rendering and playing. To implement personalized playback, process the audio data before writing it.
+
+ ```ts
+ const bufferSize = await audioRenderer.getBufferSize();
+ let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
+ let buf = new ArrayBuffer(bufferSize);
+ let readsize = await fs.read(file.fd, buf);
+ let writeSize = await new Promise((resolve, reject) => {
+ audioRenderer.write(buf, (err, writeSize) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(writeSize);
+ }
+ });
+ });
+ ```
+
+4. Call **stop()** to stop rendering.
+
+ ```ts
+ audioRenderer.stop((err) => {
+ if (err) {
+ console.error(`Renderer stop failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Renderer stopped.');
+ }
+ });
+ ```
+
+5. Call **release()** to release the instance.
+
+ ```ts
+ audioRenderer.release((err) => {
+ if (err) {
+ console.error(`Renderer release failed, code is ${err.code}, message is ${err.message}`);
+ } else {
+ console.info('Renderer released.');
+ }
+ });
+ ```
+
+### Sample Code
+
+Refer to the sample code below to render an audio file using AudioRenderer.
+
+```ts
+import audio from '@ohos.multimedia.audio';
+import fs from '@ohos.file.fs';
+
+const TAG = 'AudioRendererDemo';
+
+export default class AudioRendererDemo {
+ private renderModel = undefined;
+ private audioStreamInfo = {
+ samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // Sampling rate.
+ channels: audio.AudioChannel.CHANNEL_2, // Channel.
+ sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format.
+ encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format.
+ }
+ private audioRendererInfo = {
+ content: audio.ContentType.CONTENT_TYPE_MUSIC, // Media type.
+ usage: audio.StreamUsage.STREAM_USAGE_MEDIA, // Audio stream usage type.
+ rendererFlags: 0 // AudioRenderer flag.
+ }
+ private audioRendererOptions = {
+ streamInfo: this.audioStreamInfo,
+ rendererInfo: this.audioRendererInfo
+ }
+
+ // Create an AudioRenderer instance, and set the events to listen for.
+ init() {
+ audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // Create an AudioRenderer instance.
+ if (!err) {
+ console.info(`${TAG}: creating AudioRenderer success`);
+ this.renderModel = renderer;
+ this.renderModel.on('stateChange', (state) => { // Set the events to listen for. A callback is invoked when the AudioRenderer is switched to the specified state.
+ if (state == 1) {
+ console.info('audio renderer state is: STATE_PREPARED');
+ }
+ if (state == 2) {
+ console.info('audio renderer state is: STATE_RUNNING');
+ }
+ });
+ this.renderModel.on('markReach', 1000, (position) => { // Subscribe to the markReach event. A callback is triggered when the number of rendered frames reaches 1000.
+ if (position == 1000) {
+ console.info('ON Triggered successfully');
+ }
+ });
+ } else {
+ console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
+ }
+ });
+ }
+
+ // Start audio rendering.
+ async start() {
+ let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
+ if (stateGroup.indexOf(this.renderModel.state) === -1) { // Rendering can be started only when the AudioRenderer is in the prepared, paused, or stopped state.
+ console.error(TAG + 'start failed');
+ return;
+ }
+ await this.renderModel.start(); // Start rendering.
+
+ const bufferSize = await this.renderModel.getBufferSize();
+ let context = getContext(this);
+ let path = context.filesDir;
+ const filePath = path + '/test.wav'; // Use the sandbox path to obtain the file. The actual file path is /data/storage/el2/base/haps/entry/files/test.wav.
+
+ let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
+ let stat = await fs.stat(filePath);
+ let buf = new ArrayBuffer(bufferSize);
+ let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
+ for (let i = 0; i < len; i++) {
+ let options = {
+ offset: i * bufferSize,
+ length: bufferSize
+ };
+ let readsize = await fs.read(file.fd, buf, options);
+
+ // buf indicates the audio data to be written to the buffer. Before calling AudioRenderer.write(), you can preprocess the audio data for personalized playback. The AudioRenderer reads the audio data written to the buffer for rendering.
+
+ let writeSize = await new Promise((resolve, reject) => {
+ this.renderModel.write(buf, (err, writeSize) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(writeSize);
+ }
+ });
+ });
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // The rendering stops if the AudioRenderer is in the released state.
+ fs.close(file);
+ await this.renderModel.stop();
+ }
+ if (this.renderModel.state === audio.AudioState.STATE_RUNNING) {
+ if (i === len - 1) { // The rendering stops if the file finishes reading.
+ fs.close(file);
+ await this.renderModel.stop();
+ }
+ }
+ }
+ }
+
+ // Pause the rendering.
+ async pause() {
+ // Rendering can be paused only when the AudioRenderer is in the running state.
+ if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) {
+ console.info('Renderer is not running');
+ return;
+ }
+ await this.renderModel.pause(); // Pause rendering.
+ if (this.renderModel.state === audio.AudioState.STATE_PAUSED) {
+ console.info('Renderer is paused.');
+ } else {
+ console.error('Pausing renderer failed.');
+ }
+ }
+
+ // Stop rendering.
+ async stop() {
+ // Rendering can be stopped only when the AudioRenderer is in the running or paused state.
+ if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) {
+ console.info('Renderer is not running or paused.');
+ return;
+ }
+ await this.renderModel.stop(); // Stop rendering.
+ if (this.renderModel.state === audio.AudioState.STATE_STOPPED) {
+ console.info('Renderer stopped.');
+ } else {
+ console.error('Stopping renderer failed.');
+ }
+ }
+
+ // Release the instance.
+ async release() {
+ // The AudioRenderer can be released only when it is not in the released state.
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
+ console.info('Renderer already released');
+ return;
+ }
+ await this.renderModel.release(); // Release the instance.
+ if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
+ console.info('Renderer released');
+ } else {
+ console.error('Renderer release failed.');
+ }
+ }
+}
+```
+
+When audio streams with the same or higher priority need to use the output device, the current audio playback will be interrupted. The application can respond to and handle the interruption event. For details about how to process concurrent audio playback, see [Audio Playback Concurrency Policies](audio-playback-concurrency.md).
diff --git a/en/application-dev/media/using-avplayer-for-playback.md b/en/application-dev/media/using-avplayer-for-playback.md
new file mode 100644
index 0000000000000000000000000000000000000000..6cb6ab1e67ef0ae8a44e04fa915ad87bcc9ed024
--- /dev/null
+++ b/en/application-dev/media/using-avplayer-for-playback.md
@@ -0,0 +1,167 @@
+# Using AVPlayer for Audio Playback
+
+The AVPlayer is used to play raw media assets in an end-to-end manner. In this topic, you will learn how to use the AVPlayer to play a complete piece of music.
+
+If you want the application to continue playing the music in the background or when the screen is off, you must use the [AVSession](avsession-overview.md) and [continuous task](../task-management/continuous-task-dev-guide.md) to prevent the playback from being forcibly interrupted by the system.
+
+
+The full playback process includes creating an **AVPlayer** instance, setting the media asset to play, setting playback parameters (volume, speed, and focus mode), controlling playback (play, pause, seek, and stop), resetting the playback configuration, and releasing the instance.
+
+
+During application development, you can use the **state** attribute of the AVPlayer to obtain the AVPlayer state or call **on('stateChange')** to listen for state changes. If the application performs an operation when the AVPlayer is not in the given state, the system may throw an exception or generate other undefined behavior.
+
+
+**Figure 1** Playback state transition
+
+
+For details about the state, see [AVPlayerState](../reference/apis/js-apis-media.md#avplayerstate9). When the AVPlayer is in the **prepared**, **playing**, **paused**, or **completed** state, the playback engine is working and a large amount of RAM is occupied. If your application does not need to use the AVPlayer, call **reset()** or **release()** to release the instance.
+
+## How to Develop
+
+Read [AVPlayer](../reference/apis/js-apis-media.md#avplayer9) for the API reference.
+
+1. Call **createAVPlayer()** to create an **AVPlayer** instance. The AVPlayer is the **idle** state.
+
+2. Set the events to listen for, which will be used in the full-process scenario. The table below lists the supported events.
+ | Event Type| Description|
+ | -------- | -------- |
+ | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVPlayer.|
+ | error | Mandatory; used to listen for AVPlayer errors.|
+ | durationUpdate | Used to listen for progress bar updates to refresh the media asset duration.|
+ | timeUpdate | Used to listen for the current position of the progress bar to refresh the current time.|
+ | seekDone | Used to listen for the completion status of the **seek()** request.
This event is reported when the AVPlayer seeks to the playback position specified in **seek()**.|
+ | speedDone | Used to listen for the completion status of the **setSpeed()** request.
This event is reported when the AVPlayer plays music at the speed specified in **setSpeed()**.|
+ | volumeChange | Used to listen for the completion status of the **setVolume()** request.
This event is reported when the AVPlayer plays music at the volume specified in **setVolume()**.|
+ | bufferingUpdate | Used to listen for network playback buffer information. This event reports the buffer percentage and playback progress.|
+ | audioInterrupt | Used to listen for audio interruption. This event is used together with the **audioInterruptMode** attribute.
This event is reported when the current audio playback is interrupted by another (for example, when a call is coming), so the application can process the event in time.|
+
+3. Set the media asset URL. The AVPlayer enters the **initialized** state.
+ > **NOTE**
+ >
+ > The URL in the code snippet below is for reference only. You need to check the media asset validity and set the URL based on service requirements.
+ >
+ > - If local files are used for playback, ensure that the files are available and the application sandbox path is used for access. For details about how to obtain the application sandbox path, see [Obtaining the Application Development Path](../application-models/application-context-stage.md#obtaining-the-application-development-path). For details about the application sandbox and how to push files to the application sandbox, see [File Management](../file-management/app-sandbox-directory.md).
+ >
+ > - If a network playback path is used, you must request the ohos.permission.INTERNET [permission](../security/accesstoken-guidelines.md).
+ >
+ > - You can also use **ResourceManager.getRawFd** to obtain the file descriptor of a file packed in the HAP file. For details, see [ResourceManager API Reference](../reference/apis/js-apis-resource-manager.md#getrawfd9).
+ >
+ > - The [playback formats and protocols](avplayer-avrecorder-overview.md#supported-formats-and-protocols) in use must be those supported by the system.
+
+4. Call **prepare()** to switch the AVPlayer to the **prepared** state. In this state, you can obtain the duration of the media asset to play and set the volume.
+
+5. Call **play()**, **pause()**, **seek()**, and **stop()** to perform audio playback control as required.
+
+6. (Optional) Call **reset()** to reset the AVPlayer. The AVPlayer enters the **idle** state again and you can change the media asset URL.
+
+7. Call **release()** to switch the AVPlayer to the **released** state. Now your application exits the playback.
+
+## Sample Code
+
+Refer to the sample code below to play a complete piece of music.
+
+```ts
+import media from '@ohos.multimedia.media';
+import fs from '@ohos.file.fs';
+import common from '@ohos.app.ability.common';
+
+export class AVPlayerDemo {
+ private avPlayer;
+ private count: number = 0;
+
+ // Set AVPlayer callback functions.
+ setAVPlayerCallback() {
+ // Callback function for the seek operation.
+ this.avPlayer.on('seekDone', (seekDoneTime) => {
+ console.info(`AVPlayer seek succeeded, seek time is ${seekDoneTime}`);
+ })
+ // Callback function for errors. If an error occurs during the operation on the AVPlayer, reset() is called to reset the AVPlayer.
+ this.avPlayer.on('error', (err) => {
+ console.error(`Invoke avPlayer failed, code is ${err.code}, message is ${err.message}`);
+ this.avPlayer.reset(); // Call reset() to reset the AVPlayer, which enters the idle state.
+ })
+ // Callback function for state changes.
+ this.avPlayer.on('stateChange', async (state, reason) => {
+ switch (state) {
+ case 'idle': // This state is reported upon a successful callback of reset().
+ console.info('AVPlayer state idle called.');
+ this.avPlayer.release(); // Call release() to release the instance.
+ break;
+ case 'initialized': // This state is reported when the AVPlayer sets the playback source.
+ console.info('AVPlayerstate initialized called.');
+ this.avPlayer.prepare().then(() => {
+ console.info('AVPlayer prepare succeeded.');
+ }, (err) => {
+ console.error(`Invoke prepare failed, code is ${err.code}, message is ${err.message}`);
+ });
+ break;
+ case 'prepared': // This state is reported upon a successful callback of prepare().
+ console.info('AVPlayer state prepared called.');
+ this.avPlayer.play(); // Call play() to start playback.
+ break;
+ case 'playing': // This state is reported upon a successful callback of play().
+ console.info('AVPlayer state playing called.');
+ if (this.count !== 0) {
+ console.info('AVPlayer start to seek.');
+ this.avPlayer.seek (this.avPlayer.duration); // Call seek() to seek to the end of the audio clip.
+ } else {
+ this.avPlayer.pause(); // Call pause() to pause the playback.
+ }
+ this.count++;
+ break;
+ case 'paused': // This state is reported upon a successful callback of pause().
+ console.info('AVPlayer state paused called.');
+ this.avPlayer.play(); // Call play() again to start playback.
+ break;
+ case 'completed': // This state is reported upon the completion of the playback.
+ console.info('AVPlayer state completed called.');
+ this.avPlayer.stop(); // Call stop() to stop the playback.
+ break;
+ case 'stopped': // This state is reported upon a successful callback of stop().
+ console.info('AVPlayer state stopped called.');
+ this.avPlayer.reset(); // Call reset() to reset the AVPlayer state.
+ break;
+ case 'released':
+ console.info('AVPlayer state released called.');
+ break;
+ default:
+ console.info('AVPlayer state unknown called.');
+ break;
+ }
+ })
+ }
+
+ // The following demo shows how to use the file system to open the sandbox address, obtain the media file address, and play the media file using the URL attribute.
+ async avPlayerUrlDemo() {
+ // Create an AVPlayer instance.
+ this.avPlayer = await media.createAVPlayer();
+ // Set a callback function for state changes.
+ this.setAVPlayerCallback();
+ let fdPath = 'fd://';
+ // Obtain the sandbox address filesDir through UIAbilityContext. The stage model is used as an example.
+ let context = getContext(this) as common.UIAbilityContext;
+ let pathDir = context.filesDir;
+ let path = pathDir + '/01.mp3';
+ // Open the corresponding file address to obtain the file descriptor and assign a value to the URL to trigger the reporting of the initialized state.
+ let file = await fs.open(path);
+ fdPath = fdPath + '' + file.fd;
+ this.avPlayer.url = fdPath;
+ }
+
+ // The following demo shows how to use resourceManager to obtain the media file packed in the HAP file and play the media file by using the fdSrc attribute.
+ async avPlayerFdSrcDemo() {
+ // Create an AVPlayer instance.
+ this.avPlayer = await media.createAVPlayer();
+ // Set a callback function for state changes.
+ this.setAVPlayerCallback();
+ // Call getRawFd of the resourceManager member of UIAbilityContext to obtain the media asset URL.
+ // The return type is {fd,offset,length}, where fd indicates the file descriptor address of the HAP file, offset indicates the media asset offset, and length indicates the duration of the media asset to play.
+ let context = getContext(this) as common.UIAbilityContext;
+ let fileDescriptor = await context.resourceManager.getRawFd('01.mp3');
+ // Assign a value to fdSrc to trigger the reporting of the initialized state.
+ this.avPlayer.fdSrc = fileDescriptor;
+ }
+}
+```
+
+
\ No newline at end of file
diff --git a/en/application-dev/media/using-avrecorder-for-recording.md b/en/application-dev/media/using-avrecorder-for-recording.md
new file mode 100644
index 0000000000000000000000000000000000000000..71ab8557df470671088adfaa0473a6448d935881
--- /dev/null
+++ b/en/application-dev/media/using-avrecorder-for-recording.md
@@ -0,0 +1,182 @@
+# Using AVRecorder for Audio Recording
+
+You will learn how to use the AVRecorder to develop audio recording functionalities including starting, pausing, resuming, and stopping recording.
+
+During application development, you can use the **state** attribute of the AVRecorder to obtain the AVRecorder state or call **on('stateChange')** to listen for state changes. Your code must meet the state machine requirements. For example, **pause()** is called only when the AVRecorder is in the **started** state, and **resume()** is called only when it is in the **paused** state.
+
+**Figure 1** Recording state transition
+
+
+
+For details about the state, see [AVRecorderState](../reference/apis/js-apis-media.md#avrecorderstate9).
+
+
+## How to Develop
+
+Read [AVRecorder](../reference/apis/js-apis-media.md#avrecorder9) for the API reference.
+
+1. Create an **AVRecorder** instance. The AVRecorder is the **idle** state.
+
+ ```ts
+ import media from '@ohos.multimedia.media';
+
+ let avRecorder = undefined;
+ media.createAVRecorder().then((recorder) => {
+ avRecorder = recorder;
+ }, (err) => {
+ console.error(`Invoke createAVRecorder failed, code is ${err.code}, message is ${err.message}`);
+ })
+ ```
+
+2. Set the events to listen for.
+ | Event Type| Description|
+ | -------- | -------- |
+ | stateChange | Mandatory; used to listen for changes of the **state** attribute of the AVRecorder.|
+ | error | Mandatory; used to listen for AVRecorder errors.|
+
+
+ ```ts
+ // Callback function for state changes.
+ avRecorder.on('stateChange', (state, reason) => {
+ console.log(`current state is ${state}`);
+ // You can add the action to be performed after the state is switched.
+ })
+
+ // Callback function for errors.
+ avRecorder.on('error', (err) => {
+ console.error(`avRecorder failed, code is ${err.code}, message is ${err.message}`);
+ })
+ ```
+
+3. Set audio recording parameters and call **prepare()**. The AVRecorder enters the **prepared** state.
+ > **NOTE**
+ >
+ > Pay attention to the following when configuring parameters:
+ >
+ > - In pure audio recording scenarios, set only audio-related parameters in **avConfig** of **prepare()**.
+ > If video-related parameters are configured, an error will be reported in subsequent steps. If video recording is required, follow the instructions provided in [Video Recording Development](video-recording.md).
+ >
+ > - The [recording formats](avplayer-avrecorder-overview.md#supported-formats) in use must be those supported by the system.
+ >
+ > - The recording output URL (URL in **avConfig** in the sample code) must be in the format of fd://xx (where xx indicates a file descriptor). You must call [ohos.file.fs](../reference/apis/js-apis-file-fs.md) to implement access to the application file. For details, see [Application File Access and Management](../file-management/app-file-access.md).
+
+
+ ```ts
+ let avProfile = {
+ audioBitrate: 100000, // Audio bit rate.
+ audioChannels: 2, // Number of audio channels.
+ audioCodec: media.CodecMimeType.AUDIO_AAC, // Audio encoding format. Currently, only AAC is supported.
+ audioSampleRate: 48000, // Audio sampling rate.
+ fileFormat: media.ContainerFormatType.CFT_MPEG_4A, // Encapsulation format. Currently, only M4A is supported.
+ }
+ let avConfig = {
+ audioSourceType: media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, // Audio input source. In this example, the microphone is used.
+ profile: avProfile,
+ url: 'fd://35', // Obtain the file descriptor of the created audio file by referring to the sample code in Application File Access and Management.
+ }
+ avRecorder.prepare(avConfig).then(() => {
+ console.log('Invoke prepare succeeded.');
+ }, (err) => {
+ console.error(`Invoke prepare failed, code is ${err.code}, message is ${err.message}`);
+ })
+ ```
+
+4. Call **start()** to start recording. The AVRecorder enters the **started** state.
+
+5. Call **pause()** to pause recording. The AVRecorder enters the **paused** state.
+
+6. Call **resume()** to resume recording. The AVRecorder enters the **started** state again.
+
+7. Call **stop()** to stop recording. The AVRecorder enters the **stopped** state.
+
+8. Call **reset()** to reset the resources. The AVRecorder enters the **idle** state. In this case, you can reconfigure the recording parameters.
+
+9. Call **release()** to switch the AVRecorder to the **released** state. Now your application exits the recording.
+
+
+## Sample Code
+
+ Refer to the sample code below to complete the process of starting, pausing, resuming, and stopping recording.
+
+```ts
+import media from '@ohos.multimedia.media';
+
+export class AudioRecorderDemo {
+ private avRecorder;
+ private avProfile = {
+ audioBitrate: 100000, // Audio bit rate.
+ audioChannels: 2, // Number of audio channels.
+ audioCodec: media.CodecMimeType.AUDIO_AAC, // Audio encoding format. Currently, only AAC is supported.
+ audioSampleRate: 48000, // Audio sampling rate.
+ fileFormat: media.ContainerFormatType.CFT_MPEG_4A, // Encapsulation format. Currently, only M4A is supported.
+ };
+ private avConfig = {
+ audioSourceType: media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, // Audio input source. In this example, the microphone is used.
+ profile: this.avProfile,
+ url: 'fd://35', // Create, read, and write a file by referring to the sample code in Application File Access and Management.
+ };
+
+ // Set AVRecorder callback functions.
+ setAudioRecorderCallback() {
+ // Callback function for state changes.
+ this.avRecorder.on('stateChange', (state, reason) => {
+ console.log(`AudioRecorder current state is ${state}`);
+ })
+ // Callback function for errors.
+ this.avRecorder.on('error', (err) => {
+ console.error(`AudioRecorder failed, code is ${err.code}, message is ${err.message}`);
+ })
+ }
+
+ // Process of starting recording.
+ async startRecordingProcess() {
+ // 1. Create an AVRecorder instance.
+ this.avRecorder = await media.createAVRecorder();
+ this.setAudioRecorderCallback();
+ // 2. Obtain the file descriptor of the recording file and assign it to the URL in avConfig. For details, see FilePicker.
+ // 3. Set recording parameters to complete the preparations.
+ await this.avRecorder.prepare(this.avConfig);
+ // 4. Start recording.
+ await this.avRecorder.start();
+ }
+
+ // Process of pausing recording.
+ async pauseRecordingProcess() {
+ if (this.avRecorder.state ==='started') { // pause() can be called only when the AVRecorder is in the started state .
+ await this.avRecorder.pause();
+ }
+ }
+
+ // Process of resuming recording.
+ async resumeRecordingProcess() {
+ if (this.avRecorder.state === 'paused') { // resume() can be called only when the AVRecorder is in the paused state .
+ await this.avRecorder.resume();
+ }
+ }
+
+ // Process of stopping recording.
+ async stopRecordingProcess() {
+ // 1. Stop recording.
+ if (this.avRecorder.state === 'started'
+ || this.avRecorder.state ==='paused') { // stop() can be called only when the AVRecorder is in the started or paused state.
+ await this.avRecorder.stop();
+ }
+ // 2. Reset the AVRecorder.
+ await this.avRecorder.reset();
+ // 3. Release the AVRecorder instance.
+ await this.avRecorder.release();
+ // 4. Close the file descriptor of the recording file.
+ }
+
+ // Complete sample code for starting, pausing, resuming, and stopping recording.
+ async audioRecorderDemo() {
+ await this.startRecordingProcess(); // Start recording.
+ // You can set the recording duration. For example, you can set the sleep mode to prevent code execution.
+ await this.pauseRecordingProcess(); // Pause recording.
+ await this.resumeRecordingProcess(); // Resume recording.
+ await this.stopRecordingProcess(); // Stop recording.
+ }
+}
+```
+
+
\ No newline at end of file
diff --git a/en/application-dev/media/using-avsession-controller.md b/en/application-dev/media/using-avsession-controller.md
new file mode 100644
index 0000000000000000000000000000000000000000..5e4b69d8b48f5acad64f120892062e66d67c6b12
--- /dev/null
+++ b/en/application-dev/media/using-avsession-controller.md
@@ -0,0 +1,244 @@
+# AVSession Controller
+
+Media Controller preset in OpenHarmony functions as the controller to interact with audio and video applications, for example, obtaining and displaying media information and delivering control commands.
+
+You can develop a system application (for example, a new playback control center or voice assistant) as the controller to interact with audio and video applications in the system.
+
+## Basic Concepts
+
+- AVSessionDescriptor: session information, including the session ID, session type (audio/video), custom session name (**sessionTag**), information about the corresponding application (**elementName**), and whether the session is pined on top (isTopSession).
+
+- Top session: session with the highest priority in the system, for example, a session that is being played. Generally, the controller must hold an **AVSessionController** object to communicate with a session. However, the controller can directly communicate with the top session, for example, directly sending a control command or key event, without holding an **AVSessionController** object.
+
+## Available APIs
+
+The table below lists the key APIs used by the controller. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise.
+
+For details, see [AVSession Management](../reference/apis/js-apis-avsession.md).
+
+| API| Description|
+| -------- | -------- |
+| getAllSessionDescriptors(callback: AsyncCallback<Array<Readonly<AVSessionDescriptor>>>): void | Obtains the descriptors of all AVSessions in the system.|
+| createController(sessionId: string, callback: AsyncCallback<AVSessionController>): void | Creates an AVSessionController.|
+| getValidCommands(callback: AsyncCallback<Array<AVControlCommandType>>): void | Obtains valid commands supported by the AVSession.
Control commands listened by an audio and video application when it accesses the AVSession are considered as valid commands supported by the AVSession. For details, see [Provider of AVSession](using-avsession-developer.md).|
+| getLaunchAbility(callback: AsyncCallback<WantAgent>): void | Obtains the UIAbility that is configured in the AVSession and can be started.
The UIAbility configured here is started when a user operates the UI of the controller, for example, clicking a widget in Media Controller.|
+| sendAVKeyEvent(event: KeyEvent, callback: AsyncCallback<void>): void | Sends a key event to an AVSession through the AVSessionController object.|
+| sendSystemAVKeyEvent(event: KeyEvent, callback: AsyncCallback<void>): void | Sends a key event to the top session.|
+| sendControlCommand(command: AVControlCommand, callback: AsyncCallback<void>): void | Sends a control command to an AVSession through the AVSessionController object.|
+| sendSystemControlCommand(command: AVControlCommand, callback: AsyncCallback<void>): void | Sends a control command to the top session.|
+
+## How to Develop
+
+To enable a system application to access the AVSession service as a controller, proceed as follows:
+
+1. Obtain **AVSessionDescriptor** through AVSessionManager and create an **AVSessionController** object.
+ The controller may obtain all **AVSessionDescriptor**s in the current system, and create an **AVSessionController** object for each session, so as to perform unified playback control on all the audio and video applications.
+
+ ```ts
+ // Import the AVSessionManager module.
+ import AVSessionManager from '@ohos.multimedia.avsession';
+
+ // Define global variables.
+ let g_controller = new Array();
+ let g_centerSupportCmd:Set = new Set(['play', 'pause', 'playNext', 'playPrevious', 'fastForward', 'rewind', 'seek','setSpeed', 'setLoopMode', 'toggleFavorite']);
+ let g_validCmd:Set;
+ // Obtain the session descriptors and create an AVSessionController object.
+ AVSessionManager.getAllSessionDescriptors().then((descriptors) => {
+ descriptors.forEach((descriptor) => {
+ AVSessionManager.createController(descriptor.sessionId).then((controller) => {
+ g_controller.push(controller);
+ }).catch((err) => {
+ console.error(`createController : ERROR : ${err.message}`);
+ });
+ });
+ }).catch((err) => {
+ console.error(`getAllSessionDescriptors : ERROR : ${err.message}`);
+ });
+
+ ```
+
+2. Listen for the session state and service state events.
+
+ The following session state events are available:
+
+ - **sessionCreate**: triggered when a session is created.
+ - **sessionDestroy**: triggered when a session is destroyed.
+ - **topSessionChange**: triggered when the top session is changed.
+
+ The service state event **sessionServiceDie** is reported when the AVSession service is abnormal.
+
+ ```ts
+ // Subscribe to the 'sessionCreate' event and create an AVSessionController object.
+ AVSessionManager.on('sessionCreate', (session) => {
+ // After an AVSession is added, you must create an AVSessionController object.
+ AVSessionManager.createController(session.sessionId).then((controller) => {
+ g_controller.push(controller);
+ }).catch((err) => {
+ console.info(`createController : ERROR : ${err.message}`);
+ });
+ });
+
+ // Subscribe to the 'sessionDestroy' event to enable the application to get notified when the session dies.
+ AVSessionManager.on('sessionDestroy', (session) => {
+ let index = g_controller.findIndex((controller) => {
+ return controller.sessionId === session.sessionId;
+ });
+ if (index !== 0) {
+ g_controller[index].destroy();
+ g_controller.splice(index, 1);
+ }
+ });
+ // Subscribe to the 'topSessionChange' event.
+ AVSessionManager.on('topSessionChange', (session) => {
+ let index = g_controller.findIndex((controller) => {
+ return controller.sessionId === session.sessionId;
+ });
+ // Place the session on the top.
+ if (index !== 0) {
+ g_controller.sort((a, b) => {
+ return a.sessionId === session.sessionId ? -1 : 0;
+ });
+ }
+ });
+ // Subscribe to the 'sessionServiceDie' event.
+ AVSessionManager.on('sessionServiceDie', () => {
+ // The server is abnormal, and the application clears resources.
+ console.info("Server exception.");
+ })
+ ```
+
+3. Subscribe to media information changes and other session events.
+
+ The following media information change events are available:
+
+ - **metadataChange**: triggered when the session metadata changes.
+ - **playbackStateChange**: triggered when the playback state changes.
+ - **activeStateChange**: triggered when the activation state of the session changes.
+ - **validCommandChange**: triggered when the valid commands supported by the session changes.
+ - **outputDeviceChange**: triggered when the output device changes.
+ - **sessionDestroy**: triggered when a session is destroyed.
+
+ The controller can listen for events as required.
+
+ ```ts
+ // Subscribe to the 'activeStateChange' event.
+ controller.on('activeStateChange', (isActive) => {
+ if (isActive) {
+ console.info("The widget corresponding to the controller is highlighted.");
+ } else {
+ console.info("The widget corresponding to the controller is invalid.");
+ }
+ });
+ // Subscribe to the 'sessionDestroy' event to enable the controller to get notified when the session dies.
+ controller.on('sessionDestroy', () => {
+ console.info('on sessionDestroy : SUCCESS ');
+ controller.destroy().then(() => {
+ console.info('destroy : SUCCESS ');
+ }).catch((err) => {
+ console.info(`destroy : ERROR :${err.message}`);
+ });
+ });
+
+ // Subscribe to metadata changes.
+ let metaFilter = ['assetId', 'title', 'description'];
+ controller.on('metadataChange', metaFilter, (metadata) => {
+ console.info(`on metadataChange assetId : ${metadata.assetId}`);
+ });
+ // Subscribe to playback state changes.
+ let playbackFilter = ['state', 'speed', 'loopMode'];
+ controller.on('playbackStateChange', playbackFilter, (playbackState) => {
+ console.info(`on playbackStateChange state : ${playbackState.state}`);
+ });
+ // Subscribe to supported command changes.
+ controller.on('validCommandChange', (cmds) => {
+ console.info(`validCommandChange : SUCCESS : size : ${cmds.size}`);
+ console.info(`validCommandChange : SUCCESS : cmds : ${cmds.values()}`);
+ g_validCmd.clear();
+ for (let c of g_centerSupportCmd) {
+ if (cmds.has(c)) {
+ g_validCmd.add(c);
+ }
+ }
+ });
+ // Subscribe to output device changes.
+ controller.on('outputDeviceChange', (device) => {
+ console.info(`on outputDeviceChange device isRemote : ${device.isRemote}`);
+ });
+ ```
+
+4. Obtain the media information transferred by the provider for display on the UI, for example, displaying the track being played and the playback state in Media Controller.
+
+ ```ts
+ async getInfoFromSessionByController() {
+ // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above.
+ let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER;
+ // Obtain the session ID.
+ let sessionId: string = controller.sessionId;
+ console.info(`get sessionId by controller : isActive : ${sessionId}`);
+ // Obtain the activation state of the session.
+ let isActive: boolean = await controller.isActive();
+ console.info(`get activeState by controller : ${isActive}`);
+ // Obtain the media information of the session.
+ let metadata: AVSessionManager.AVMetadata = await controller.getAVMetadata();
+ console.info(`get media title by controller : ${metadata.title}`);
+ console.info(`get media artist by controller : ${metadata.artist}`);
+ // Obtain the playback information of the session.
+ let avPlaybackState: AVSessionManager.AVPlaybackState = await controller.getAVPlaybackState();
+ console.info(`get playbackState by controller : ${avPlaybackState.state}`);
+ console.info(`get favoriteState by controller : ${avPlaybackState.isFavorite}`);
+ }
+ ```
+
+5. Control the playback behavior, for example, sending a command to operate (play/pause/previous/next) the item being played in Media Controller.
+
+ After listening for the control command event, the audio and video application serving as the provider needs to implement the corresponding operation.
+
+
+ ```ts
+ async sendCommandToSessionByController() {
+ // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above.
+ let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER;
+ // Obtain the commands supported by the session.
+ let validCommandTypeArray: Array = await controller.getValidCommands();
+ console.info(`get validCommandArray by controller : length : ${validCommandTypeArray.length}`);
+ // Deliver the 'play' command.
+ // If the 'play' command is valid, deliver it. Normal sessions should provide and implement the playback.
+ if (validCommandTypeArray.indexOf('play') >= 0) {
+ let avCommand: AVSessionManager.AVControlCommand = {command:'play'};
+ controller.sendControlCommand(avCommand);
+ }
+ // Deliver the 'pause' command.
+ if (validCommandTypeArray.indexOf('pause') >= 0) {
+ let avCommand: AVSessionManager.AVControlCommand = {command:'pause'};
+ controller.sendControlCommand(avCommand);
+ }
+ // Deliver the 'playPrevious' command.
+ if (validCommandTypeArray.indexOf('playPrevious') >= 0) {
+ let avCommand: AVSessionManager.AVControlCommand = {command:'playPrevious'};
+ controller.sendControlCommand(avCommand);
+ }
+ // Deliver the 'playNext' command.
+ if (validCommandTypeArray.indexOf('playNext') >= 0) {
+ let avCommand: AVSessionManager.AVControlCommand = {command:'playNext'};
+ controller.sendControlCommand(avCommand);
+ }
+ }
+ ```
+
+6. When the audio and video application exits, cancel the listener and release the resources.
+
+ ```ts
+ async destroyController() {
+ // It is assumed that an AVSessionController object corresponding to the session already exists. For details about how to create an AVSessionController object, see the code snippet above.
+ let controller: AVSessionManager.AVSessionController = ALLREADY_HAVE_A_CONTROLLER;
+
+ // Destroy the AVSessionController object. After being destroyed, it is no longer available.
+ controller.destroy(function (err) {
+ if (err) {
+ console.info(`Destroy controller ERROR : code: ${err.code}, message: ${err.message}`);
+ } else {
+ console.info('Destroy controller SUCCESS');
+ }
+ });
+ }
+ ```
diff --git a/en/application-dev/media/using-avsession-developer.md b/en/application-dev/media/using-avsession-developer.md
new file mode 100644
index 0000000000000000000000000000000000000000..07bd4bf1297f3afc5352d30e9acd674fe056f815
--- /dev/null
+++ b/en/application-dev/media/using-avsession-developer.md
@@ -0,0 +1,198 @@
+# AVSession Provider
+
+An audio and video application needs to access the AVSession service as a provider in order to display media information in the controller (for example, Media Controller) and respond to control commands delivered by the controller.
+
+## Basic Concepts
+
+- AVMetadata: media data related attributes, including the IDs of the current media asset (assetId), previous media asset (previousAssetId), and next media asset (nextAssetId), title, author, album, writer, and duration.
+
+- AVPlaybackState: playback state attributes, including the playback state, position, speed, buffered time, loop mode, and whether the media asset is favorited (**isFavorite**).
+
+## Available APIs
+
+The table below lists the key APIs used by the provider. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise.
+
+For details, see [AVSession Management](../reference/apis/js-apis-avsession.md).
+
+| API| Description|
+| -------- | -------- |
+| createAVSession(context: Context, tag: string, type: AVSessionType, callback: AsyncCallback<AVSession>): void | Creates an AVSession.
Only one AVSession can be created for a UIAbility.|
+| setAVMetadata(data: AVMetadata, callback: AsyncCallback<void>): void | Sets AVSession metadata.|
+| setAVPlaybackState(state: AVPlaybackState, callback: AsyncCallback<void>): void | Sets the AVSession playback state.|
+| setLaunchAbility(ability: WantAgent, callback: AsyncCallback<void>): void | Starts a UIAbility.|
+| getController(callback: AsyncCallback<AVSessionController>): void | Obtains the controller of the AVSession.|
+| activate(callback: AsyncCallback<void>): void | Activates the AVSession.|
+| destroy(callback: AsyncCallback<void>): void | Destroys the AVSession.|
+
+## How to Develop
+
+To enable an audio and video application to access the AVSession service as a provider, proceed as follows:
+
+1. Call an API in the **AVSessionManager** class to create and activate an **AVSession** object.
+
+ ```ts
+ import AVSessionManager from '@ohos.multimedia.avsession'; // Import the AVSessionManager module.
+
+ // Create an AVSession object.
+ async createSession() {
+ let session: AVSessionManager.AVSession = await AVSessionManager.createAVSession(this.context, 'SESSION_NAME', 'audio');
+ session.activate();
+ console.info(`session create done : sessionId : ${session.sessionId}`);
+ }
+ ```
+
+2. Set AVSession information, which includes:
+ - AVMetadata
+ - AVPlaybackState
+
+ The controller will call an API in the **AVSessionController** class to obtain the information and display or process the information.
+
+ ```ts
+ async setSessionInfo() {
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+ // The player logic that triggers changes in the session metadata and playback state is omitted here.
+ // Set necessary session metadata.
+ let metadata: AVSessionManager.AVMetadata = {
+ assetId: "0",
+ title: "TITLE",
+ artist: "ARTIST"
+ };
+ session.setAVMetadata(metadata).then(() => {
+ console.info('SetAVMetadata successfully');
+ }).catch((err) => {
+ console.info(`SetAVMetadata BusinessError: code: ${err.code}, message: ${err.message}`);
+ });
+ // Set the playback state to paused and set isFavorite to false.
+ let playbackState: AVSessionManager.AVPlaybackState = {
+ state:AVSessionManager.PlaybackState.PLAYBACK_STATE_PAUSE,
+ isFavorite:false
+ };
+ session.setAVPlaybackState(playbackState, function (err) {
+ if (err) {
+ console.info(`SetAVPlaybackState BusinessError: code: ${err.code}, message: ${err.message}`);
+ } else {
+ console.info('SetAVPlaybackState successfully');
+ }
+ });
+ }
+ ```
+
+3. Set the UIAbility to be started by the controller. The UIAbility configured here is started when a user operates the UI of the controller, for example, clicking a widget in Media Controller.
+ The UIAbility is set through the **WantAgent** API. For details, see [WantAgent](../reference/apis/js-apis-app-ability-wantAgent.md).
+
+ ```ts
+ import WantAgent from "@ohos.app.ability.wantAgent";
+ ```
+
+ ```ts
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+ let wantAgentInfo: {
+ wants: [
+ {
+ bundleName: "com.example.musicdemo",
+ abilityName: "com.example.musicdemo.MainAbility"
+ }
+ ],
+ operationType: WantAgent.OperationType.START_ABILITIES,
+ requestCode: 0,
+ wantAgentFlags: [WantAgent.WantAgentFlags.UPDATE_PRESENT_FLAG]
+ }
+ WantAgent.getWantAgent(wantAgentInfo).then((agent) => {
+ session.setLaunchAbility(agent)
+ })
+ ```
+
+4. Listen for control commands delivered by the controller, for example, Media Controller.
+ > **NOTE**
+ >
+ > After the provider registers a listener for the control command event, the event will be reflected in **getValidCommands()** of the controller. In other words, the controller determines that the command is valid and triggers the corresponding event as required. To ensure that the control commands delivered by the controller can be executed normally, the provider should not use a null implementation for listening.
+
+ ```ts
+ async setListenerForMesFromController() {
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+ // Generally, logic processing on the player is implemented in the listener.
+ // After the processing is complete, use the setter to synchronize the playback information. For details, see the code snippet above.
+ session.on('play', () => {
+ console.info('on play , do play task');
+
+ // do some tasks ···
+ });
+ session.on('pause', () => {
+ console.info('on pause , do pause task');
+ // do some tasks ···
+ });
+ session.on('stop', () => {
+ console.info('on stop , do stop task');
+ // do some tasks ···
+ });
+ session.on('playNext', () => {
+ console.info('on playNext , do playNext task');
+ // do some tasks ···
+ });
+ session.on('playPrevious', () => {
+ console.info('on playPrevious , do playPrevious task');
+ // do some tasks ···
+ });
+ }
+ ```
+
+5. Obtain an **AVSessionController** object for this **AVSession** object for interaction.
+
+ ```ts
+ async createControllerFromSession() {
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+
+ // Obtain an AVSessionController object for this AVSession object.
+ let controller: AVSessionManager.AVSessionController = await session.getController();
+
+ // The AVSessionController object can interact with the AVSession object, for example, by delivering a control command.
+ let avCommand: AVSessionManager.AVControlCommand = {command:'play'};
+ controller.sendControlCommand(avCommand);
+
+ // Alternatively, listen for state changes.
+ controller.on('playbackStateChange', 'all', (state: AVSessionManager.AVPlaybackState) => {
+
+ // do some things
+ });
+
+ // The AVSessionController object can perform many operations. For details, see the description of the controller.
+ }
+ ```
+
+6. When the audio and video application exits and does not need to continue playback, cancel the listener and destroy the **AVSession** object.
+ The code snippet below is used for canceling the listener for control commands:
+
+ ```ts
+ async unregisterSessionListener() {
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+
+ // Cancel the listener of the AVSession object.
+ session.off('play');
+ session.off('pause');
+ session.off('stop');
+ session.off('playNext');
+ session.off('playPrevious');
+ }
+ ```
+
+ The code snippet below is used for destroying the AVSession object:
+
+ ```ts
+ async destroySession() {
+ // It is assumed that an AVSession object has been created. For details about how to create an AVSession object, see the node snippet above.
+ let session: AVSessionManager.AVSession = ALLREADY_CREATE_A_SESSION;
+ // Destroy the AVSession object.
+ session.destroy(function (err) {
+ if (err) {
+ console.info(`Destroy BusinessError: code: ${err.code}, message: ${err.message}`);
+ } else {
+ console.info('Destroy : SUCCESS ');
+ }
+ });
+ }
+ ```
diff --git a/en/application-dev/media/using-distributed-avsession.md b/en/application-dev/media/using-distributed-avsession.md
new file mode 100644
index 0000000000000000000000000000000000000000..c1835d661fdd2b57b7dce0f2507dbea748eaea7e
--- /dev/null
+++ b/en/application-dev/media/using-distributed-avsession.md
@@ -0,0 +1,55 @@
+# Using Distributed AVSession
+
+## Basic Concepts
+
+- Remote AVSession: an AVSession automatically created on the remote device by the AVSession service for synchronization with an AVSession on the local device.
+
+- Remote AVSessionController: AVSessionController automatically created on the remote device after projection.
+
+## Available APIs
+
+The table below describes the key APIs used for remote projection with the distributed AVSession. The APIs use either a callback or promise to return the result. The APIs listed below use a callback. They provide the same functions as their counterparts that use a promise.
+
+For details, see [AVSession Management](../reference/apis/js-apis-avsession.md).
+
+| API| Description|
+| -------- | -------- |
+| castAudio(session: SessionToken \| 'all', audioDevices: Array<audio.AudioDeviceDescriptor>, callback: AsyncCallback<void>): void | Casts a session to a list of devices.|
+
+## How to Develop
+
+To enable a system application that accesses the AVSession service as the controller to use the distributed AVSession for projection, proceed as follows:
+
+1. Import the modules. Before projection, you must obtain the AudioDeviceDescriptor from the audio module. Therefore, import the audio module and AVSessionManager module.
+
+ ```ts
+ import AVSessionManager from '@ohos.multimedia.avsession';
+ import audio from '@ohos.multimedia.audio';
+ ```
+
+2. Use **castAudio** in the **AVSessionManager** class to project all sessions of the local device to another device.
+
+ ```ts
+ // Cast the sessions to another device.
+ let audioManager = audio.getAudioManager();
+ let audioRoutingManager = audioManager.getRoutingManager();
+ let audioDevices;
+ await audioRoutingManager.getDevices(audio.DeviceFlag.OUTPUT_DEVICES_FLAG).then((data) => {
+ audioDevices = data;
+ console.info('Promise returned to indicate that the device list is obtained.');
+ }).catch((err) => {
+ console.info(`getDevices : ERROR : ${err.message}`);
+ });
+
+ AVSessionManager.castAudio('all', audioDevices).then(() => {
+ console.info('createController : SUCCESS');
+ }).catch((err) => {
+ console.info(`createController : ERROR : ${err.message}`);
+ });
+ ```
+
+ After the system application on the local service initiates projection to a remote device, the AVSession framework instructs the AVSession service of the remote device to create a remote AVSession. When the AVSession on the local device changes (for example, the media information or playback state changes), the AVSession framework automatically synchronizes the change to the remote device.
+
+ The AVSession processing mechanism on the remote device is consistent with that on the local device. That is, the controller (for example, the Media Controller) on the remote device listens for the AVSession creation event, and creates a remote **AVSessionController** object to manage the remote AVSession. In addition, the control commands are automatically synchronized by the AVSession framework to the local device.
+
+ The provider (for example, an audio and video application) on the local device listens for control command events, so as to respond to the commands from the remote device in time.
diff --git a/en/application-dev/media/using-opensl-es-for-playback.md b/en/application-dev/media/using-opensl-es-for-playback.md
new file mode 100644
index 0000000000000000000000000000000000000000..c5dedbba659154a1893a471e5e9a3d33d33be20a
--- /dev/null
+++ b/en/application-dev/media/using-opensl-es-for-playback.md
@@ -0,0 +1,131 @@
+# Using OpenSL ES for Audio Playback
+
+OpenSL ES, short for Open Sound Library for Embedded Systems, is an embedded, cross-platform audio processing library that is free of charge. It provides high-performance and low-latency APIs for you to develop applications running on embedded mobile multimedia devices. OpenHarmony have implemented certain native APIs based on [OpenSL ES](https://www.khronos.org/opensles/) 1.0.1 API specifications developed by the [Khronos Group](https://www.khronos.org/). You can use these APIs through and .
+
+## OpenSL ES on OpenHarmony
+
+Currently, OpenHarmony implements parts of [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) to implement basic audio playback functionalities.
+
+If an API that has not been implemented on OpenHarmony is called, **SL_RESULT_FEATURE_UNSUPPORTED** is returned.
+
+The following lists the OpenSL ES APIs that have been implemented on OpenHarmony. For details, see the [OpenSL ES](https://www.khronos.org/opensles/) specifications.
+
+- **Engine APIs implemented on OpenHarmony**
+ - SLresult (\*CreateAudioPlayer) (SLEngineItf self, SLObjectItf \* pPlayer, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+ - SLresult (\*CreateAudioRecorder) (SLEngineItf self, SLObjectItf \* pRecorder, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+ - SLresult (\*CreateOutputMix) (SLEngineItf self, SLObjectItf \* pMix, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+
+- **Object APIs implemented on OpenHarmony**
+ - SLresult (\*Realize) (SLObjectItf self, SLboolean async)
+ - SLresult (\*GetState) (SLObjectItf self, SLuint32 \* pState)
+ - SLresult (\*GetInterface) (SLObjectItf self, const SLInterfaceID iid, void \* pInterface)
+ - void (\*Destroy) (SLObjectItf self)
+
+- **Playback APIs implemented on OpenHarmony**
+ - SLresult (\*SetPlayState) (SLPlayItf self, SLuint32 state)
+ - SLresult (\*GetPlayState) (SLPlayItf self, SLuint32 \*pState)
+
+- **Volume control APIs implemented on OpenHarmony**
+ - SLresult (\*SetVolumeLevel) (SLVolumeItf self, SLmillibel level)
+ - SLresult (\*GetVolumeLevel) (SLVolumeItf self, SLmillibel \*pLevel)
+ - SLresult (\*GetMaxVolumeLevel) (SLVolumeItf self, SLmillibel \*pMaxLevel)
+
+- **BufferQueue APIs implemented on OpenHarmony**
+
+ The APIs listed below can be used only after is introduced.
+ | API| Description|
+ | -------- | -------- |
+ | SLresult (\*Enqueue) (SLOHBufferQueueItf self, const void \*buffer, SLuint32 size) | Adds a buffer to the corresponding queue.
For an audio playback operation, this API adds the buffer with audio data to the **filledBufferQ_** queue. For an audio recording operation, this API adds the idle buffer after recording data storage to the **freeBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the pointer to the buffer with audio data or the pointer to the idle buffer after the recording data is stored.
The **size** parameter indicates the size of the buffer.|
+ | SLresult (\*Clear) (SLOHBufferQueueItf self) | Releases a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.|
+ | SLresult (\*GetState) (SLOHBufferQueueItf self, SLOHBufferQueueState \*state) | Obtains the state of a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **state** parameter indicates the pointer to the state of the **BufferQueue** object.|
+ | SLresult (\*RegisterCallback) (SLOHBufferQueueItf self, SlOHBufferQueueCallback callback, void\* pContext) | Registers a callback.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **callback** parameter indicates the callback to be registered for the audio playback or recording operation.
The **pContext** parameter indicates the pointer to the audio file to be played for an audio playback operation or the pointer to the audio file to be recorded for an audio recording operation.|
+ | SLresult (\*GetBuffer) (SLOHBufferQueueItf self, SLuint8\*\* buffer, SLuint32\* size) | Obtains a buffer.
For an audio playback operation, this API obtains an idle buffer from the **freeBufferQ_** queue. For an audio recording operation, this API obtains the buffer that carries recording data from the **filledBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the double pointer to the idle buffer or the buffer carrying recording data.
The **size** parameter indicates the size of the buffer.|
+
+## Sample Code
+
+Refer to the sample code below to play an audio file.
+
+1. Add the header files.
+
+ ```c++
+ #include
+ #include
+ #include
+ ```
+
+2. Use the **slCreateEngine** API to obtain an **engine** instance.
+
+ ```c++
+ SLObjectItf engineObject = nullptr;
+ slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
+ (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
+ ```
+
+3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** API.
+
+ ```c++
+ SLEngineItf engineEngine = nullptr;
+ (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
+ ```
+
+4. Configure the player and create an **AudioPlayer** instance.
+
+ ```c++
+ SLDataLocator_BufferQueue slBufferQueue = {
+ SL_DATALOCATOR_BUFFERQUEUE,
+ 0
+ };
+
+ // Configure the parameters based on the audio file format.
+ SLDataFormat_PCM pcmFormat = {
+ SL_DATAFORMAT_PCM,
+ 2, // Number of channels.
+ SL_SAMPLINGRATE_48, // Sampling rate.
+ SL_PCMSAMPLEFORMAT_FIXED_16, // Audio sample format.
+ 0,
+ 0,
+ 0
+ };
+ SLDataSource slSource = {&slBufferQueue, &pcmFormat};
+ SLObjectItf pcmPlayerObject = nullptr;
+ (*engineEngine)->CreateAudioPlayer(engineEngine, &pcmPlayerObject, &slSource, null, 0, nullptr, nullptr);
+ (*pcmPlayerObject)->Realize(pcmPlayerObject, SL_BOOLEAN_FALSE);
+ ```
+
+5. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** API.
+
+ ```c++
+ SLOHBufferQueueItf bufferQueueItf;
+ (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf);
+ ```
+
+6. Open an audio file and register the **BufferQueueCallback** function.
+
+ ```c++
+ static void BufferQueueCallback (SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size)
+ {
+ SLuint8 *buffer = nullptr;
+ SLuint32 pSize;
+ (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, &pSize);
+ // Write the audio data to be played to the buffer.
+ (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size);
+ }
+ void *pContext; // This callback can be used to obtain the custom context information passed in.
+ (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, pContext);
+ ```
+
+7. Obtain the **playItf** instance of the **SL_PLAYSTATE_PLAYING** API and start playing.
+
+ ```c++
+ SLPlayItf playItf = nullptr;
+ (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_PLAY, &playItf);
+ (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING);
+ ```
+
+8. Stop playing.
+
+ ```c++
+ (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED);
+ (*pcmPlayerObject)->Destroy(pcmPlayerObject);
+ (*engineObject)->Destroy(engineObject);
+ ```
diff --git a/en/application-dev/media/using-opensl-es-for-recording.md b/en/application-dev/media/using-opensl-es-for-recording.md
new file mode 100644
index 0000000000000000000000000000000000000000..55a18fc561c0117d5aff5aaedb22c36f1b7706bf
--- /dev/null
+++ b/en/application-dev/media/using-opensl-es-for-recording.md
@@ -0,0 +1,148 @@
+# Using OpenSL ES for Audio Recording
+
+OpenSL ES, short for Open Sound Library for Embedded Systems, is an embedded, cross-platform audio processing library that is free of charge. It provides high-performance and low-latency APIs for you to develop applications running on embedded mobile multimedia devices. OpenHarmony have implemented certain native APIs based on [OpenSL ES](https://www.khronos.org/opensles/) 1.0.1 API specifications developed by the [Khronos Group](https://www.khronos.org/). You can use these APIs through and .
+
+## OpenSL ES on OpenHarmony
+
+Currently, OpenHarmony implements parts of [OpenSL ES APIs](https://gitee.com/openharmony/third_party_opensles/blob/master/api/1.0.1/OpenSLES.h) to implement basic audio recording functionalities.
+
+If an API that has not been implemented on OpenHarmony is called, **SL_RESULT_FEATURE_UNSUPPORTED** is returned.
+
+The following lists the OpenSL ES APIs that have been implemented on OpenHarmony. For details, see the [OpenSL ES](https://www.khronos.org/opensles/) specifications.
+
+- **Engine APIs implemented on OpenHarmony**
+ - SLresult (\*CreateAudioPlayer) (SLEngineItf self, SLObjectItf \* pPlayer, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+ - SLresult (\*CreateAudioRecorder) (SLEngineItf self, SLObjectItf \* pRecorder, SLDataSource \*pAudioSrc, SLDataSink \*pAudioSnk, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+ - SLresult (\*CreateOutputMix) (SLEngineItf self, SLObjectItf \* pMix, SLuint32 numInterfaces, const SLInterfaceID \* pInterfaceIds, const SLboolean \* pInterfaceRequired)
+
+- **Object APIs implemented on OpenHarmony**
+ - SLresult (\*Realize) (SLObjectItf self, SLboolean async)
+ - SLresult (\*GetState) (SLObjectItf self, SLuint32 \* pState)
+ - SLresult (\*GetInterface) (SLObjectItf self, const SLInterfaceID iid, void \* pInterface)
+ - void (\*Destroy) (SLObjectItf self)
+
+- **Recorder APIs implemented on OpenHarmony**
+ - SLresult (\*SetRecordState) (SLRecordItf self, SLuint32 state)
+ - SLresult (\*GetRecordState) (SLRecordItf self,SLuint32 \*pState)
+
+- **BufferQueue APIs implemented on OpenHarmony**
+
+ The APIs listed below can be used only after is introduced.
+ | API| Description|
+ | -------- | -------- |
+ | SLresult (\*Enqueue) (SLOHBufferQueueItf self, const void \*buffer, SLuint32 size) | Adds a buffer to the corresponding queue.
For an audio playback operation, this API adds the buffer with audio data to the **filledBufferQ_** queue. For an audio recording operation, this API adds the idle buffer after recording data storage to the **freeBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the pointer to the buffer with audio data or the pointer to the idle buffer after the recording data is stored.
The **size** parameter indicates the size of the buffer.|
+ | SLresult (\*Clear) (SLOHBufferQueueItf self) | Releases a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.|
+ | SLresult (\*GetState) (SLOHBufferQueueItf self, SLOHBufferQueueState \*state) | Obtains the state of a **BufferQueue** object.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **state** parameter indicates the pointer to the state of the **BufferQueue** object.|
+ | SLresult (\*RegisterCallback) (SLOHBufferQueueItf self, SlOHBufferQueueCallback callback, void\* pContext) | Registers a callback.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **callback** parameter indicates the callback to be registered for the audio playback or recording operation.
The **pContext** parameter indicates the pointer to the audio file to be played for an audio playback operation or the pointer to the audio file to be recorded for an audio recording operation.|
+ | SLresult (\*GetBuffer) (SLOHBufferQueueItf self, SLuint8\*\* buffer, SLuint32\* size) | Obtains a buffer.
For an audio playback operation, this API obtains an idle buffer from the **freeBufferQ_** queue. For an audio recording operation, this API obtains the buffer that carries recording data from the **filledBufferQ_** queue.
The **self** parameter indicates the **BufferQueue** object that calls this API.
The **buffer** parameter indicates the double pointer to the idle buffer or the buffer carrying recording data.
The **size** parameter indicates the size of the buffer.|
+
+## Sample Code
+
+Refer to the sample code below to record an audio file.
+
+1. Add the header files.
+
+ ```c++
+ #include
+ #include
+ #include
+ ```
+
+2. Use the **slCreateEngine** API to create and instantiate an **engine** object.
+
+ ```c++
+ SLObjectItf engineObject = nullptr;
+ slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr);
+ (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
+ ```
+
+3. Obtain the **engineEngine** instance of the **SL_IID_ENGINE** API.
+
+ ```c++
+ SLEngineItf engineItf = nullptr;
+ (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineItf);
+ ```
+
+4. Configure the recorder information (including the input source **audiosource** and output source **audiosink**), and create a **pcmCapturerObject** instance.
+
+ ```c++
+ SLDataLocator_IODevice io_device = {
+ SL_DATALOCATOR_IODEVICE,
+ SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT,
+ NULL
+ };
+ SLDataSource audioSource = {
+ &io_device,
+ NULL
+ };
+ SLDataLocator_BufferQueue buffer_queue = {
+ SL_DATALOCATOR_BUFFERQUEUE,
+ 3
+ };
+ // Configure the parameters based on the audio file format.
+ SLDataFormat_PCM format_pcm = {
+ SL_DATAFORMAT_PCM, // Input audio format.
+ 1, // Mono channel.
+ SL_SAMPLINGRATE_44_1, // Sampling rate, 44100 Hz.
+ SL_PCMSAMPLEFORMAT_FIXED_16, // Audio sampling format, a signed 16-bit integer in little-endian format.
+ 0,
+ 0,
+ 0
+ };
+ SLDataSink audioSink = {
+ &buffer_queue,
+ &format_pcm
+ };
+
+ SLObjectItf pcmCapturerObject = nullptr;
+ (*engineItf)->CreateAudioRecorder(engineItf, &pcmCapturerObject,
+ &audioSource, &audioSink, 0, nullptr, nullptr);
+ (*pcmCapturerObject)->Realize(pcmCapturerObject, SL_BOOLEAN_FALSE);
+
+ ```
+
+5. Obtain the **recordItf** instance of the **SL_IID_RECORD** API.
+
+ ```c++
+ SLRecordItf recordItf;
+ (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_RECORD, &recordItf);
+ ```
+
+6. Obtain the **bufferQueueItf** instance of the **SL_IID_OH_BUFFERQUEUE** API.
+
+ ```c++
+ SLOHBufferQueueItf bufferQueueItf;
+ (*pcmCapturerObject)->GetInterface(pcmCapturerObject, SL_IID_OH_BUFFERQUEUE, &bufferQueueItf);
+ ```
+
+7. Register the **BufferQueueCallback** function.
+
+ ```c++
+ static void BufferQueueCallback(SLOHBufferQueueItf bufferQueueItf, void *pContext, SLuint32 size)
+ {
+ // Obtain the user information passed in during the registration from pContext.
+ SLuint8 *buffer = nullptr;
+ SLuint32 pSize = 0;
+ (*bufferQueueItf)->GetBuffer(bufferQueueItf, &buffer, &pSize);
+ if (buffer != nullptr) {
+ // The recording data can be read from the buffer for subsequent processing.
+ (*bufferQueueItf)->Enqueue(bufferQueueItf, buffer, size);
+ }
+ }
+ void *pContext; // This callback can be used to obtain the custom context information passed in.
+ (*bufferQueueItf)->RegisterCallback(bufferQueueItf, BufferQueueCallback, pContext);
+ ```
+
+8. Start audio recording.
+
+ ```c++
+ (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_RECORDING);
+ ```
+
+9. Stop audio recording.
+
+ ```c++
+ (*recordItf)->SetRecordState(recordItf, SL_RECORDSTATE_STOPPED);
+ (*pcmCapturerObject)->Destroy(pcmCapturerObject);
+ ```
diff --git a/en/application-dev/media/using-toneplayer-for-playback.md b/en/application-dev/media/using-toneplayer-for-playback.md
new file mode 100644
index 0000000000000000000000000000000000000000..11a528786b5bae712d8c4f07b9cad4ee29af2f48
--- /dev/null
+++ b/en/application-dev/media/using-toneplayer-for-playback.md
@@ -0,0 +1,140 @@
+# Using TonePlayer for Audio Playback (for System Applications Only)
+
+TonePlayer9+ provides APIs for playing and managing Dual Tone Multi Frequency (DTMF) tones, such as dial tones, ringback tones, supervisory tones, and proprietary tones. The main task of the TonePlayer is to generate sine waves of different frequencies by using the built-in algorithm based on the [ToneType](../reference/apis/js-apis-audio.md#tonetype9)s and add the sine waves to create a sound. The sound can then be played by using the [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8), and the playback task can also be managed by using the [AudioRenderer](../reference/apis/js-apis-audio.md#audiorenderer8). The full process includes loading the DTMF tone configuration, starting DTMF tone playing, stopping the playback, and releasing the resources associated with the **TonePlayer** object. For details about the APIs, see the [TonePlayer API Reference](../reference/apis/js-apis-audio.md#toneplayer9).
+
+
+## Supported Tone Types
+
+The table below lists the supported [ToneType](../reference/apis/js-apis-audio.md#tonetype9)s. You can call **load()** with **audio.ToneType.*type*** as a parameter to load the tone resource of the specified type.
+
+| Tone Type| Value| Description|
+| -------- | -------- | -------- |
+| TONE_TYPE_DIAL_0 | 0 | DTMF tone of key 0.|
+| TONE_TYPE_DIAL_1 | 1 | DTMF tone of key 1.|
+| TONE_TYPE_DIAL_2 | 2 | DTMF tone of key 2.|
+| TONE_TYPE_DIAL_3 | 3 | DTMF tone of key 3.|
+| TONE_TYPE_DIAL_4 | 4 | DTMF tone of key 4.|
+| TONE_TYPE_DIAL_5 | 5 | DTMF tone of key 5.|
+| TONE_TYPE_DIAL_6 | 6 | DTMF tone of key 6.|
+| TONE_TYPE_DIAL_7 | 7 | DTMF tone of key 7.|
+| TONE_TYPE_DIAL_8 | 8 | DTMF tone of key 8.|
+| TONE_TYPE_DIAL_9 | 9 | DTMF tone of key 9.|
+| TONE_TYPE_DIAL_S | 10 | DTMF tone of the star key (*).|
+| TONE_TYPE_DIAL_P | 11 | DTMF tone of the pound key (#).|
+| TONE_TYPE_DIAL_A | 12 | DTMF tone of key A.|
+| TONE_TYPE_DIAL_B | 13 | DTMF tone of key B.|
+| TONE_TYPE_DIAL_C | 14 | DTMF tone of key C.|
+| TONE_TYPE_DIAL_D | 15 | DTMF tone of key D.|
+| TONE_TYPE_COMMON_SUPERVISORY_DIAL | 100 | Supervisory tone - dial tone.|
+| TONE_TYPE_COMMON_SUPERVISORY_BUSY | 101 | Supervisory tone - busy.|
+| TONE_TYPE_COMMON_SUPERVISORY_CONGESTION | 102 | Supervisory tone - congestion.|
+| TONE_TYPE_COMMON_SUPERVISORY_RADIO_ACK | 103 | Supervisory tone - radio path acknowledgment.|
+| TONE_TYPE_COMMON_SUPERVISORY_RADIO_NOT_AVAILABLE | 104 | Supervisory tone - radio path not available.|
+| TONE_TYPE_COMMON_SUPERVISORY_CALL_WAITING | 106 | Supervisory tone - call waiting tone.|
+| TONE_TYPE_COMMON_SUPERVISORY_RINGTONE | 107 | Supervisory tone - ringing tone.|
+| TONE_TYPE_COMMON_PROPRIETARY_BEEP | 200 | Proprietary tone - beep tone.|
+| TONE_TYPE_COMMON_PROPRIETARY_ACK | 201 | Proprietary tone - ACK.|
+| TONE_TYPE_COMMON_PROPRIETARY_PROMPT | 203 | Proprietary tone - PROMPT.|
+| TONE_TYPE_COMMON_PROPRIETARY_DOUBLE_BEEP | 204 | Proprietary tone - double beep tone.|
+
+
+## How to Develop
+
+To implement audio playback with the TonePlayer, perform the following steps:
+
+1. Create a **TonePlayer** instance.
+
+ ```ts
+ import audio from '@ohos.multimedia.audio';
+ let audioRendererInfo = {
+ content : audio.ContentType.CONTENT_TYPE_SONIFICATION,
+ usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
+ rendererFlags : 0
+ };
+ tonePlayerPromise = audio.createTonePlayer(audioRendererInfo);
+ ```
+
+2. Load the DTMF tone configuration of the specified type.
+
+ ```ts
+ tonePlayerPromise.load(audio.ToneType.TONE_TYPE_DIAL_0);
+ ```
+
+3. Start DTMF tone playing.
+
+ ```ts
+ tonePlayerPromise.start();
+ ```
+
+4. Stop the tone that is being played.
+
+ ```ts
+ tonePlayerPromise.stop();
+ ```
+
+5. Release the resources associated with the **TonePlayer** instance.
+
+ ```ts
+ tonePlayerPromise.release();
+ ```
+
+If the APIs are not called in the preceding sequence, the error code **6800301 NAPI_ERR_SYSTEM** is returned.
+
+
+## Sample Code
+
+Refer to the following code to play the DTMF tone when the dial key on the keyboard is pressed.
+
+To prevent the UI thread from being blocked, most **TonePlayer** calls are asynchronous. Each API provides the callback and promise functions. The following examples use the promise functions. For more information, see [TonePlayer](../reference/apis/js-apis-audio.md#toneplayer9).
+
+
+```ts
+import audio from '@ohos.multimedia.audio';
+
+export class TonelayerDemo {
+ private timer : number;
+ private timerPro : number;
+ // Promise mode.
+ async testTonePlayerPromise(type) {
+ console.info('testTonePlayerPromise start');
+ if (this.timerPro) clearTimeout(this.timerPro);
+ let tonePlayerPromise;
+ let audioRendererInfo = {
+ content : audio.ContentType.CONTENT_TYPE_SONIFICATION,
+ usage : audio.StreamUsage.STREAM_USAGE_MEDIA,
+ rendererFlags : 0
+ };
+ this.timerPro = setTimeout(async () => {
+ try {
+ console.info('testTonePlayerPromise: createTonePlayer');
+ // Create a DTMF player.
+ tonePlayerPromise = await audio.createTonePlayer(audioRendererInfo);
+ console.info('testTonePlayerPromise: createTonePlayer-success');
+ console.info(`testTonePlayerPromise: load type: ${type}`);
+ // Load the tone configuration of the specified type.
+ await tonePlayerPromise.load(type);
+ console.info('testTonePlayerPromise: load-success');
+ console.info(`testTonePlayerPromise: start type: ${type}`);
+ // Start DTMF tone playing.
+ await tonePlayerPromise.start();
+ console.info('testTonePlayerPromise: start-success');
+ console.info(`testTonePlayerPromise: stop type: ${type}`);
+ setTimeout(async()=>{
+ // Stop the tone that is being played.
+ await tonePlayerPromise.stop();
+ console.info('testTonePlayerPromise: stop-success');
+ console.info(`testTonePlayerPromise: release type: ${type}`);
+ // Release the resources associated with the TonePlayer instance.
+ await tonePlayerPromise.release();
+ console.info('testTonePlayerPromise: release-success');
+ }, 30)
+ } catch(err) {
+ console.error(`testTonePlayerPromise err : ${err}`);
+ }
+ }, 200)
+ };
+ async testTonePlayer() {
+ this.testTonePlayerPromise(audio.ToneType.TONE_TYPE_DIAL_0);
+ }
+}
+```
diff --git a/en/application-dev/media/video-playback.md b/en/application-dev/media/video-playback.md
index d4c895b452aa31b28690bd96bd9ef0fac64c4eb4..fff4aa830ddc45e7d20e0fd06655adfdc5243fe5 100644
--- a/en/application-dev/media/video-playback.md
+++ b/en/application-dev/media/video-playback.md
@@ -1,419 +1,178 @@
-# Video Playback Development
+# Video Playback
-## Introduction
-
-You can use video playback APIs to convert audio data into audible analog signals and play the signals using output devices. You can also manage playback tasks. For example, you can start, suspend, stop playback, release resources, set the volume, seek to a playback position, set the playback speed, and obtain track information. This document describes development for the following video playback scenarios: full-process, normal playback, video switching, and loop playback.
-
-## Working Principles
-
-The following figures show the video playback state transition and the interaction with external modules for video playback.
-
-**Figure 1** Video playback state transition
-
-
-
-**Figure 2** Interaction with external modules for video playback
-
-
-
-**NOTE**: When a third-party application calls a JS interface provided by the JS interface layer, the framework layer invokes the audio component through the media service of the native framework to output the audio data decoded by the software to the audio HDI. The graphics subsystem outputs the image data decoded by the codec HDI at the hardware interface layer to the display HDI. In this way, video playback is implemented.
-
-*Note: Video playback requires hardware capabilities such as display, audio, and codec.*
-
-1. A third-party application obtains a surface ID from the XComponent.
-2. The third-party application transfers the surface ID to the VideoPlayer JS.
-3. The media service flushes the frame data to the surface buffer.
-
-## Compatibility
-
-Use the mainstream playback formats and resolutions, rather than custom ones to avoid playback failures, frame freezing, and artifacts. The system is not affected by incompatibility issues. If such an issue occurs, you can exit stream playback mode.
-
-The table below lists the mainstream playback formats and resolutions.
-
-| Video Container Format| Description | Resolution |
-| :----------: | :-----------------------------------------------: | :--------------------------------: |
-| mp4 | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| mkv | Video format: H.264/MPEG-2/MPEG-4/H.263; audio format: AAC/MP3| Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| ts | Video format: H.264/MPEG-2/MPEG-4; audio format: AAC/MP3 | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-| webm | Video format: VP8; audio format: VORBIS | Mainstream resolutions, such as 1080p, 720p, 480p, and 270p|
-
-## How to Develop
-
-For details about the APIs, see [VideoPlayer in the Media API](../reference/apis/js-apis-media.md#videoplayer8).
-
-### Full-Process Scenario
-
-The full video playback process includes creating an instance, setting the URL, setting the surface ID, preparing for video playback, playing video, pausing playback, obtaining track information, seeking to a playback position, setting the volume, setting the playback speed, stopping playback, resetting the playback configuration, and releasing resources.
-
-For details about the **url** types supported by **VideoPlayer**, see the [url attribute](../reference/apis/js-apis-media.md#videoplayer_attributes).
-
-For details about how to create an XComponent, see [XComponent](../reference/arkui-ts/ts-basic-components-xcomponent.md).
-
-```js
-import media from '@ohos.multimedia.media'
-import fs from '@ohos.file.fs'
-export class VideoPlayerDemo {
- // Report an error in the case of a function invocation failure.
- failureCallback(error) {
- console.info(`error happened,error Name is ${error.name}`);
- console.info(`error happened,error Code is ${error.code}`);
- console.info(`error happened,error Message is ${error.message}`);
- }
-
- // Report an error in the case of a function invocation exception.
- catchCallback(error) {
- console.info(`catch error happened,error Name is ${error.name}`);
- console.info(`catch error happened,error Code is ${error.code}`);
- console.info(`catch error happened,error Message is ${error.message}`);
- }
-
- // Used to print the video track information.
- printfDescription(obj) {
- for (let item in obj) {
- let property = obj[item];
- console.info('key is ' + item);
- console.info('value is ' + property);
- }
- }
-
- async videoPlayerDemo() {
- let videoPlayer = undefined;
- let surfaceID = 'test' // The surfaceID parameter is used for screen display. Its value is obtained through the XComponent API. For details about the document link, see the method of creating the XComponent.
- let fdPath = 'fd://'
- // The stream in the path can be pushed to the device by running the "hdc file send D:\xxx\H264_AAC.mp4 /data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile" command.
- let path = '/data/app/el1/bundle/public/ohos.acts.multimedia.video.videoplayer/ohos.acts.multimedia.video.videoplayer/assets/entry/resources/rawfile/H264_AAC.mp4';
- let file = await fs.open(path);
- fdPath = fdPath + '' + file.fd;
- // Call createVideoPlayer to create a VideoPlayer instance.
- await media.createVideoPlayer().then((video) => {
- if (typeof (video) != 'undefined') {
- console.info('createVideoPlayer success!');
- videoPlayer = video;
- } else {
- console.info('createVideoPlayer fail!');
+OpenHarmony provides two solutions for video playback development:
+
+- [AVPlayer](using-avplayer-for-playback.md) class: provides ArkTS and JS APIs to implement audio and video playback. It also supports parsing streaming media and local assets, decapsulating media assets, decoding video, and rendering video. It is applicable to end-to-end playback of media assets and can be used to play video files in MP4 and MKV formats.
+
+-